summaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/unittests')
-rw-r--r--deps/v8/test/unittests/BUILD.gn278
-rw-r--r--deps/v8/test/unittests/api/api-wasm-unittest.cc50
-rw-r--r--deps/v8/test/unittests/api/deserialize-unittest.cc105
-rw-r--r--deps/v8/test/unittests/api/exception-unittest.cc2
-rw-r--r--deps/v8/test/unittests/api/v8-script-unittest.cc170
-rw-r--r--deps/v8/test/unittests/assembler/assembler-x64-unittest.cc48
-rw-r--r--deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc147
-rw-r--r--deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc8
-rw-r--r--deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc2
-rw-r--r--deps/v8/test/unittests/assembler/disasm-x64-unittest.cc19
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-arm-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc)34
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc325
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-ia32-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-loong64-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-mips64-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-ppc-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc)75
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-riscv-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-riscv-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-s390-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc59
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc254
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc65
-rw-r--r--deps/v8/test/unittests/base/ieee754-unittest.cc245
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc18
-rw-r--r--deps/v8/test/unittests/base/threaded-list-unittest.cc37
-rw-r--r--deps/v8/test/unittests/base/vector-unittest.cc27
-rw-r--r--deps/v8/test/unittests/codegen/code-layout-unittest.cc21
-rw-r--r--deps/v8/test/unittests/codegen/code-pages-unittest.cc60
-rw-r--r--deps/v8/test/unittests/codegen/factory-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc23
-rw-r--r--deps/v8/test/unittests/compiler/codegen-tester.h10
-rw-r--r--deps/v8/test/unittests/compiler/compiler-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/function-tester.cc11
-rw-r--r--deps/v8/test/unittests/compiler/function-tester.h4
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.h1
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h1
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc74
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc17
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc38
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc372
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc1
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h2
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/revec-unittest.cc239
-rw-r--r--deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/run-deopt-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc324
-rw-r--r--deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc346
-rw-r--r--deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc787
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc12
-rw-r--r--deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc17
-rw-r--r--deps/v8/test/unittests/flags/flag-definitions-unittest.cc14
-rw-r--r--deps/v8/test/unittests/heap/allocation-observer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc18
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc287
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc34
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc347
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc25
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h9
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc401
-rw-r--r--deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc62
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc42
-rw-r--r--deps/v8/test/unittests/heap/cppgc/logging-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/member-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc208
-rw-r--r--deps/v8/test/unittests/heap/cppgc/test-platform.cc3
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.cc3
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h1
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc1212
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc19
-rw-r--r--deps/v8/test/unittests/heap/global-handles-unittest.cc130
-rw-r--r--deps/v8/test/unittests/heap/global-safepoint-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc24
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.cc13
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.h30
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc18
-rw-r--r--deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc164
-rw-r--r--deps/v8/test/unittests/heap/marking-worklist-unittest.cc46
-rw-r--r--deps/v8/test/unittests/heap/memory-reducer-unittest.cc247
-rw-r--r--deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc179
-rw-r--r--deps/v8/test/unittests/heap/shared-heap-unittest.cc33
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc17
-rw-r--r--deps/v8/test/unittests/inspector/inspector-unittest.cc48
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc3
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden28
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden9
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden20
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden9
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden33
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden30
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc3
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-unittest.cc28
-rw-r--r--deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc3
-rw-r--r--deps/v8/test/unittests/libplatform/tracing-unittest.cc20
-rw-r--r--deps/v8/test/unittests/logging/log-unittest.cc9
-rw-r--r--deps/v8/test/unittests/objects/concurrent-string-unittest.cc4
-rw-r--r--deps/v8/test/unittests/objects/roots-unittest.cc11
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc172
-rw-r--r--deps/v8/test/unittests/objects/weakmaps-unittest.cc6
-rw-r--r--deps/v8/test/unittests/parser/decls-unittest.cc2
-rw-r--r--deps/v8/test/unittests/parser/parsing-unittest.cc12
-rw-r--r--deps/v8/test/unittests/regexp/regexp-unittest.cc7
-rw-r--r--deps/v8/test/unittests/test-utils.cc20
-rw-r--r--deps/v8/test/unittests/test-utils.h36
-rw-r--r--deps/v8/test/unittests/testcfg.py20
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc2
-rw-r--r--deps/v8/test/unittests/unittests.status35
-rw-r--r--deps/v8/test/unittests/utils/identity-map-unittest.cc1
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc137
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc199
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc5
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc29
-rw-r--r--deps/v8/test/unittests/wasm/memory-protection-unittest.cc66
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc8
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc356
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc80
-rw-r--r--deps/v8/test/unittests/wasm/struct-types-unittest.cc70
-rw-r--r--deps/v8/test/unittests/wasm/subtyping-unittest.cc157
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-arm64-unittest.cc (renamed from deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc)145
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc145
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc135
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc107
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc1
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc31
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc9
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc49
-rw-r--r--deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc1135
-rw-r--r--deps/v8/test/unittests/zone/zone-vector-unittest.cc373
136 files changed, 7002 insertions, 4924 deletions
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index ff952d03b2..a006614021 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -4,6 +4,17 @@
import("../../gni/v8.gni")
+if (v8_enable_webassembly) {
+ # Specifies if the target build is a simulator build. Comparing target cpu
+ # with v8 target cpu to not affect simulator builds for making cross-compile
+ # snapshots.
+ target_is_simulator = (target_cpu != v8_target_cpu && !v8_multi_arch_build) ||
+ (current_cpu != v8_current_cpu && v8_multi_arch_build)
+ if (!target_is_simulator && v8_current_cpu == "x64") {
+ v8_enable_wasm_simd256_revec = true
+ }
+}
+
if (is_fuchsia) {
import("//build/config/fuchsia/generate_runner_scripts.gni")
import("//third_party/fuchsia-sdk/sdk/build/component.gni")
@@ -11,7 +22,7 @@ if (is_fuchsia) {
fuchsia_component("v8_unittests_component") {
testonly = true
- data_deps = [ ":unittests" ]
+ data_deps = [ ":v8_unittests" ]
manifest = "v8_unittests.cml"
}
@@ -185,7 +196,7 @@ v8_source_set("cppgc_unittests_sources") {
}
}
-v8_executable("unittests") {
+v8_executable("v8_unittests") {
testonly = true
if (current_os == "aix") {
@@ -194,7 +205,7 @@ v8_executable("unittests") {
deps = [
":inspector_unittests_sources",
- ":unittests_sources",
+ ":v8_unittests_sources",
":v8_heap_base_unittests_sources",
"../..:v8_for_testing",
"../..:v8_libbase",
@@ -223,7 +234,7 @@ v8_executable("unittests") {
]
}
-v8_source_set("unittests_sources") {
+v8_source_set("v8_unittests_sources") {
testonly = true
sources = [
@@ -288,90 +299,11 @@ v8_source_set("unittests_sources") {
"codegen/aligned-slot-allocator-unittest.cc",
"codegen/code-layout-unittest.cc",
"codegen/code-pages-unittest.cc",
- "codegen/code-stub-assembler-unittest.cc",
- "codegen/code-stub-assembler-unittest.h",
"codegen/factory-unittest.cc",
"codegen/register-configuration-unittest.cc",
"codegen/source-position-table-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-unittest.cc",
"compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc",
- "compiler/backend/instruction-selector-unittest.cc",
- "compiler/backend/instruction-selector-unittest.h",
- "compiler/backend/instruction-sequence-unittest.cc",
- "compiler/backend/instruction-sequence-unittest.h",
- "compiler/backend/instruction-unittest.cc",
- "compiler/branch-elimination-unittest.cc",
- "compiler/bytecode-analysis-unittest.cc",
- "compiler/checkpoint-elimination-unittest.cc",
- "compiler/codegen-tester.cc",
- "compiler/codegen-tester.h",
- "compiler/codegen-unittest.cc",
- "compiler/common-operator-reducer-unittest.cc",
- "compiler/common-operator-unittest.cc",
- "compiler/compiler-test-utils.h",
- "compiler/compiler-unittest.cc",
- "compiler/constant-folding-reducer-unittest.cc",
- "compiler/control-equivalence-unittest.cc",
- "compiler/control-flow-optimizer-unittest.cc",
- "compiler/csa-load-elimination-unittest.cc",
- "compiler/dead-code-elimination-unittest.cc",
- "compiler/decompression-optimizer-unittest.cc",
- "compiler/diamond-unittest.cc",
- "compiler/effect-control-linearizer-unittest.cc",
- "compiler/frame-unittest.cc",
- "compiler/function-tester.cc",
- "compiler/function-tester.h",
- "compiler/graph-reducer-unittest.cc",
- "compiler/graph-reducer-unittest.h",
- "compiler/graph-trimmer-unittest.cc",
- "compiler/graph-unittest.cc",
- "compiler/graph-unittest.h",
- "compiler/js-call-reducer-unittest.cc",
- "compiler/js-create-lowering-unittest.cc",
- "compiler/js-intrinsic-lowering-unittest.cc",
- "compiler/js-native-context-specialization-unittest.cc",
- "compiler/js-operator-unittest.cc",
- "compiler/js-typed-lowering-unittest.cc",
- "compiler/linkage-tail-call-unittest.cc",
- "compiler/load-elimination-unittest.cc",
- "compiler/loop-peeling-unittest.cc",
- "compiler/machine-operator-reducer-unittest.cc",
- "compiler/machine-operator-unittest.cc",
- "compiler/node-cache-unittest.cc",
- "compiler/node-matchers-unittest.cc",
- "compiler/node-properties-unittest.cc",
- "compiler/node-test-utils.cc",
- "compiler/node-test-utils.h",
- "compiler/node-unittest.cc",
- "compiler/opcodes-unittest.cc",
- "compiler/persistent-unittest.cc",
- "compiler/redundancy-elimination-unittest.cc",
- "compiler/regalloc/live-range-unittest.cc",
- "compiler/regalloc/mid-tier-register-allocator-unittest.cc",
- "compiler/regalloc/move-optimizer-unittest.cc",
- "compiler/regalloc/register-allocator-unittest.cc",
- "compiler/run-bytecode-graph-builder-unittest.cc",
- "compiler/run-deopt-unittest.cc",
- "compiler/run-jsbranches-unittest.cc",
- "compiler/run-jscalls-unittest.cc",
- "compiler/run-jsexceptions-unittest.cc",
- "compiler/run-jsobjects-unittest.cc",
- "compiler/run-jsops-unittest.cc",
- "compiler/run-tail-calls-unittest.cc",
- "compiler/schedule-unittest.cc",
- "compiler/scheduler-rpo-unittest.cc",
- "compiler/scheduler-unittest.cc",
- "compiler/simplified-lowering-unittest.cc",
- "compiler/simplified-operator-reducer-unittest.cc",
- "compiler/simplified-operator-unittest.cc",
- "compiler/sloppy-equality-unittest.cc",
- "compiler/state-values-utils-unittest.cc",
- "compiler/turboshaft/snapshot-table-unittest.cc",
- "compiler/typed-optimization-unittest.cc",
- "compiler/typer-unittest.cc",
- "compiler/types-unittest.cc",
- "compiler/value-numbering-reducer-unittest.cc",
- "compiler/zone-stats-unittest.cc",
"date/date-cache-unittest.cc",
"date/date-unittest.cc",
"debug/debug-property-iterator-unittest.cc",
@@ -393,12 +325,13 @@ v8_source_set("unittests_sources") {
"heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc",
"heap/code-object-registry-unittest.cc",
+ "heap/cppgc-js/embedder-roots-handler-unittest.cc",
"heap/cppgc-js/traced-reference-unittest.cc",
"heap/cppgc-js/unified-heap-snapshot-unittest.cc",
"heap/cppgc-js/unified-heap-unittest.cc",
"heap/cppgc-js/unified-heap-utils.cc",
"heap/cppgc-js/unified-heap-utils.h",
- "heap/embedder-tracing-unittest.cc",
+ "heap/cppgc-js/young-unified-heap-unittest.cc",
"heap/gc-idle-time-handler-unittest.cc",
"heap/gc-tracer-unittest.cc",
"heap/global-handles-unittest.cc",
@@ -441,8 +374,6 @@ v8_source_set("unittests_sources") {
"interpreter/bytecode-utils.h",
"interpreter/bytecodes-unittest.cc",
"interpreter/constant-array-builder-unittest.cc",
- "interpreter/interpreter-assembler-unittest.cc",
- "interpreter/interpreter-assembler-unittest.h",
"interpreter/interpreter-intrinsics-unittest.cc",
"interpreter/interpreter-tester.cc",
"interpreter/interpreter-tester.h",
@@ -534,12 +465,100 @@ v8_source_set("unittests_sources") {
"utils/sparse-bit-vector-unittest.cc",
"utils/utils-unittest.cc",
"utils/version-unittest.cc",
- "web-snapshot/web-snapshot-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
+ "zone/zone-vector-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [
+ "codegen/code-stub-assembler-unittest.cc",
+ "codegen/code-stub-assembler-unittest.h",
+ "compiler/backend/instruction-selector-unittest.cc",
+ "compiler/backend/instruction-selector-unittest.h",
+ "compiler/backend/instruction-sequence-unittest.cc",
+ "compiler/backend/instruction-sequence-unittest.h",
+ "compiler/backend/instruction-unittest.cc",
+ "compiler/branch-elimination-unittest.cc",
+ "compiler/bytecode-analysis-unittest.cc",
+ "compiler/checkpoint-elimination-unittest.cc",
+ "compiler/codegen-tester.cc",
+ "compiler/codegen-tester.h",
+ "compiler/codegen-unittest.cc",
+ "compiler/common-operator-reducer-unittest.cc",
+ "compiler/common-operator-unittest.cc",
+ "compiler/compiler-test-utils.h",
+ "compiler/compiler-unittest.cc",
+ "compiler/constant-folding-reducer-unittest.cc",
+ "compiler/control-equivalence-unittest.cc",
+ "compiler/control-flow-optimizer-unittest.cc",
+ "compiler/csa-load-elimination-unittest.cc",
+ "compiler/dead-code-elimination-unittest.cc",
+ "compiler/decompression-optimizer-unittest.cc",
+ "compiler/diamond-unittest.cc",
+ "compiler/effect-control-linearizer-unittest.cc",
+ "compiler/frame-unittest.cc",
+ "compiler/function-tester.cc",
+ "compiler/function-tester.h",
+ "compiler/graph-reducer-unittest.cc",
+ "compiler/graph-reducer-unittest.h",
+ "compiler/graph-trimmer-unittest.cc",
+ "compiler/graph-unittest.cc",
+ "compiler/graph-unittest.h",
+ "compiler/js-call-reducer-unittest.cc",
+ "compiler/js-create-lowering-unittest.cc",
+ "compiler/js-intrinsic-lowering-unittest.cc",
+ "compiler/js-native-context-specialization-unittest.cc",
+ "compiler/js-operator-unittest.cc",
+ "compiler/js-typed-lowering-unittest.cc",
+ "compiler/linkage-tail-call-unittest.cc",
+ "compiler/load-elimination-unittest.cc",
+ "compiler/loop-peeling-unittest.cc",
+ "compiler/machine-operator-reducer-unittest.cc",
+ "compiler/machine-operator-unittest.cc",
+ "compiler/node-cache-unittest.cc",
+ "compiler/node-matchers-unittest.cc",
+ "compiler/node-properties-unittest.cc",
+ "compiler/node-test-utils.cc",
+ "compiler/node-test-utils.h",
+ "compiler/node-unittest.cc",
+ "compiler/opcodes-unittest.cc",
+ "compiler/persistent-unittest.cc",
+ "compiler/redundancy-elimination-unittest.cc",
+ "compiler/regalloc/live-range-unittest.cc",
+ "compiler/regalloc/mid-tier-register-allocator-unittest.cc",
+ "compiler/regalloc/move-optimizer-unittest.cc",
+ "compiler/regalloc/register-allocator-unittest.cc",
+ "compiler/run-bytecode-graph-builder-unittest.cc",
+ "compiler/run-deopt-unittest.cc",
+ "compiler/run-jsbranches-unittest.cc",
+ "compiler/run-jscalls-unittest.cc",
+ "compiler/run-jsexceptions-unittest.cc",
+ "compiler/run-jsobjects-unittest.cc",
+ "compiler/run-jsops-unittest.cc",
+ "compiler/run-tail-calls-unittest.cc",
+ "compiler/schedule-unittest.cc",
+ "compiler/scheduler-rpo-unittest.cc",
+ "compiler/scheduler-unittest.cc",
+ "compiler/simplified-lowering-unittest.cc",
+ "compiler/simplified-operator-reducer-unittest.cc",
+ "compiler/simplified-operator-unittest.cc",
+ "compiler/sloppy-equality-unittest.cc",
+ "compiler/state-values-utils-unittest.cc",
+ "compiler/turboshaft/snapshot-table-unittest.cc",
+ "compiler/turboshaft/turboshaft-typer-unittest.cc",
+ "compiler/turboshaft/turboshaft-types-unittest.cc",
+ "compiler/typed-optimization-unittest.cc",
+ "compiler/typer-unittest.cc",
+ "compiler/types-unittest.cc",
+ "compiler/value-numbering-reducer-unittest.cc",
+ "compiler/zone-stats-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.h",
+ ]
+ }
+
if (v8_enable_runtime_call_stats) {
sources += [ "logging/runtime-call-stats-unittest.cc" ]
}
@@ -564,15 +583,21 @@ v8_source_set("unittests_sources") {
"wasm/simd-shuffle-unittest.cc",
"wasm/streaming-decoder-unittest.cc",
"wasm/string-builder-unittest.cc",
+ "wasm/struct-types-unittest.cc",
"wasm/subtyping-unittest.cc",
"wasm/wasm-code-manager-unittest.cc",
"wasm/wasm-compiler-unittest.cc",
+ "wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc",
+ "wasm/wasm-disassembler-unittest-gc.wasm.inc",
+ "wasm/wasm-disassembler-unittest-gc.wat.inc",
"wasm/wasm-disassembler-unittest-mvp.wasm.inc",
"wasm/wasm-disassembler-unittest-mvp.wat.inc",
"wasm/wasm-disassembler-unittest-names.wasm.inc",
"wasm/wasm-disassembler-unittest-names.wat.inc",
"wasm/wasm-disassembler-unittest-simd.wasm.inc",
"wasm/wasm-disassembler-unittest-simd.wat.inc",
+ "wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc",
+ "wasm/wasm-disassembler-unittest-too-many-ends.wat.inc",
"wasm/wasm-disassembler-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
@@ -581,28 +606,24 @@ v8_source_set("unittests_sources") {
}
if (v8_enable_wasm_simd256_revec) {
- sources += [ "compiler/linear-scheduler-unittest.cc" ]
+ sources += [
+ "compiler/linear-scheduler-unittest.cc",
+ "compiler/revec-unittest.cc",
+ ]
}
if (v8_enable_wasm_gdb_remote_debugging) {
sources += [ "wasm/wasm-gdbserver-unittest.cc" ]
}
- if (v8_enable_inner_pointer_resolution_osb) {
- sources += [ "heap/object-start-bitmap-unittest.cc" ]
- }
-
- if (v8_enable_inner_pointer_resolution_mb) {
- sources += [ "heap/marking-inner-pointer-resolution-unittest.cc" ]
- }
-
if (v8_enable_conservative_stack_scanning) {
sources += [ "heap/conservative-stack-visitor-unittest.cc" ]
+ sources += [ "heap/marking-inner-pointer-resolution-unittest.cc" ]
}
if (v8_enable_i18n_support) {
defines = [ "V8_INTL_SUPPORT" ]
- public_deps = [ "//third_party/icu" ]
+ public_deps = [ v8_icu_path ]
} else {
sources -= [ "objects/intl-unittest.cc" ]
}
@@ -610,70 +631,91 @@ v8_source_set("unittests_sources") {
if (v8_current_cpu == "arm") {
sources += [
"assembler/disasm-arm-unittest.cc",
- "assembler/turbo-assembler-arm-unittest.cc",
- "compiler/arm/instruction-selector-arm-unittest.cc",
+ "assembler/macro-assembler-arm-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ]
+ }
} else if (v8_current_cpu == "arm64") {
sources += [
"assembler/disasm-arm64-unittest.cc",
"assembler/macro-assembler-arm64-unittest.cc",
- "assembler/turbo-assembler-arm64-unittest.cc",
"codegen/pointer-auth-arm64-unittest.cc",
- "compiler/arm64/instruction-selector-arm64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/arm64/instruction-selector-arm64-unittest.cc" ]
+ }
+ if (v8_enable_webassembly && current_cpu == "arm64") {
+ sources += [ "wasm/trap-handler-x64-arm64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "x86") {
sources += [
"assembler/disasm-ia32-unittest.cc",
- "assembler/turbo-assembler-ia32-unittest.cc",
- "compiler/ia32/instruction-selector-ia32-unittest.cc",
+ "assembler/macro-assembler-ia32-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ]
+ }
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
"assembler/disasm-mips64-unittest.cc",
- "assembler/turbo-assembler-mips64-unittest.cc",
- "compiler/mips64/instruction-selector-mips64-unittest.cc",
+ "assembler/macro-assembler-mips64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "riscv64") {
sources += [
"assembler/disasm-riscv-unittest.cc",
- "assembler/turbo-assembler-riscv-unittest.cc",
- "compiler/riscv64/instruction-selector-riscv64-unittest.cc",
+ "assembler/macro-assembler-riscv-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/riscv64/instruction-selector-riscv64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "riscv32") {
sources += [
"assembler/disasm-riscv-unittest.cc",
- "assembler/turbo-assembler-riscv-unittest.cc",
- "compiler/riscv32/instruction-selector-riscv32-unittest.cc",
+ "assembler/macro-assembler-riscv-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/riscv32/instruction-selector-riscv32-unittest.cc" ]
+ }
} else if (v8_current_cpu == "x64") {
sources += [
"assembler/assembler-x64-unittest.cc",
"assembler/disasm-x64-unittest.cc",
"assembler/macro-assembler-x64-unittest.cc",
- "assembler/turbo-assembler-x64-unittest.cc",
- "compiler/x64/instruction-selector-x64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ]
+ }
if (v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-x64-unittest.cc" ]
+ sources += [ "wasm/trap-handler-x64-arm64-unittest.cc" ]
}
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [
"assembler/disasm-ppc-unittest.cc",
- "assembler/turbo-assembler-ppc-unittest.cc",
- "compiler/ppc/instruction-selector-ppc-unittest.cc",
+ "assembler/macro-assembler-ppc-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ]
+ }
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [
"assembler/disasm-s390-unittest.cc",
- "assembler/turbo-assembler-s390-unittest.cc",
- "compiler/s390/instruction-selector-s390-unittest.cc",
+ "assembler/macro-assembler-s390-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ]
+ }
} else if (v8_current_cpu == "loong64") {
sources += [
"assembler/disasm-loong64-unittest.cc",
- "assembler/turbo-assembler-loong64-unittest.cc",
- "compiler/loong64/instruction-selector-loong64-unittest.cc",
+ "assembler/macro-assembler-loong64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/loong64/instruction-selector-loong64-unittest.cc" ]
+ }
}
if (v8_enable_webassembly) {
diff --git a/deps/v8/test/unittests/api/api-wasm-unittest.cc b/deps/v8/test/unittests/api/api-wasm-unittest.cc
index 85174ced7f..ab1f22b8e3 100644
--- a/deps/v8/test/unittests/api/api-wasm-unittest.cc
+++ b/deps/v8/test/unittests/api/api-wasm-unittest.cc
@@ -13,6 +13,8 @@
#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/handles/global-handles.h"
+#include "src/wasm/wasm-features.h"
+#include "test/common/flag-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -156,4 +158,52 @@ TEST_F(ApiWasmTest, WasmStreamingSetCallback) {
Promise::kPending);
}
+TEST_F(ApiWasmTest, WasmEnableDisableGC) {
+ Local<Context> context_local = Context::New(isolate());
+ Context::Scope context_scope(context_local);
+ i::Handle<i::Context> context = v8::Utils::OpenHandle(*context_local);
+ // When using the flags, stringref and GC are controlled independently.
+ {
+ i::FlagScope<bool> flag_gc(&i::v8_flags.experimental_wasm_gc, false);
+ i::FlagScope<bool> flag_stringref(&i::v8_flags.experimental_wasm_stringref,
+ true);
+ EXPECT_FALSE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_TRUE(i_isolate()->IsWasmStringRefEnabled(context));
+ }
+ {
+ i::FlagScope<bool> flag_gc(&i::v8_flags.experimental_wasm_gc, true);
+ i::FlagScope<bool> flag_stringref(&i::v8_flags.experimental_wasm_stringref,
+ false);
+ EXPECT_TRUE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_FALSE(i_isolate()->IsWasmStringRefEnabled(context));
+ }
+ // When providing a callback, the callback will control GC, stringref,
+ // and inlining.
+ isolate()->SetWasmGCEnabledCallback([](auto) { return true; });
+ EXPECT_TRUE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_TRUE(i_isolate()->IsWasmStringRefEnabled(context));
+ EXPECT_TRUE(i_isolate()->IsWasmInliningEnabled(context));
+ {
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate());
+ EXPECT_TRUE(enabled_features.has_gc());
+ EXPECT_TRUE(enabled_features.has_stringref());
+ EXPECT_TRUE(enabled_features.has_typed_funcref());
+ EXPECT_TRUE(enabled_features.has_inlining());
+ }
+ isolate()->SetWasmGCEnabledCallback([](auto) { return false; });
+ EXPECT_FALSE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_FALSE(i_isolate()->IsWasmStringRefEnabled(context));
+ // TODO(crbug.com/1424350): Change (or just drop) this expectation when
+ // we enable inlining by default.
+ EXPECT_FALSE(i_isolate()->IsWasmInliningEnabled(context));
+ {
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate());
+ EXPECT_FALSE(enabled_features.has_gc());
+ EXPECT_FALSE(enabled_features.has_stringref());
+ EXPECT_FALSE(enabled_features.has_typed_funcref());
+ EXPECT_FALSE(enabled_features.has_inlining());
+ }
+ isolate()->SetWasmGCEnabledCallback(nullptr);
+}
+
} // namespace v8
diff --git a/deps/v8/test/unittests/api/deserialize-unittest.cc b/deps/v8/test/unittests/api/deserialize-unittest.cc
index f1d5299cbf..ab4d41f147 100644
--- a/deps/v8/test/unittests/api/deserialize-unittest.cc
+++ b/deps/v8/test/unittests/api/deserialize-unittest.cc
@@ -359,8 +359,6 @@ class MergeDeserializedCodeTest : public DeserializeTest {
}
}
- i::ScanStackModeScopeForTesting no_stack_scanning(
- i_isolate->heap(), i::Heap::ScanStackMode::kNone);
i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
@@ -411,6 +409,8 @@ class MergeDeserializedCodeTest : public DeserializeTest {
std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
IsolateAndContextScope scope(this);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
ScriptOrigin default_origin(isolate(), NewString(""));
i::Handle<i::WeakFixedArray> original_objects =
@@ -509,8 +509,6 @@ class MergeDeserializedCodeTest : public DeserializeTest {
// At this point, the original_objects array might still have pointers to
// some old discarded content, such as UncompiledData from flushed
// functions. GC again to clear it all out.
- i::ScanStackModeScopeForTesting no_stack_scanning(
- i_isolate->heap(), i::Heap::ScanStackMode::kNone);
i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
@@ -645,6 +643,9 @@ TEST_F(MergeDeserializedCodeTest, MergeWithNoFollowUpWork) {
std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
IsolateAndContextScope scope(this);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
+
ScriptOrigin default_origin(isolate(), NewString(""));
constexpr char kSourceCode[] = "function f() {}";
@@ -727,6 +728,8 @@ TEST_F(MergeDeserializedCodeTest, MergeThatCompilesLazyFunction) {
std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
IsolateAndContextScope scope(this);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
ScriptOrigin default_origin(isolate(), NewString(""));
constexpr char kSourceCode[] =
@@ -819,4 +822,98 @@ TEST_F(MergeDeserializedCodeTest, MergeThatCompilesLazyFunction) {
CHECK(expected->StrictEquals(actual));
}
+TEST_F(MergeDeserializedCodeTest, MergeThatStartsButDoesNotFinish) {
+ i::v8_flags.merge_background_deserialized_script_with_compilation_cache =
+ true;
+ constexpr int kSimultaneousScripts = 10;
+ std::vector<std::unique_ptr<v8::ScriptCompiler::CachedData>> cached_data;
+ IsolateAndContextScope scope(this);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ ScriptOrigin default_origin(isolate(), NewString(""));
+
+ // Compile the script for the first time to produce code cache data.
+ {
+ v8::HandleScope handle_scope(isolate());
+ Local<Script> script =
+ Script::Compile(context(), NewString(kSourceCode), &default_origin)
+ .ToLocalChecked();
+ CHECK(!script->Run(context()).IsEmpty());
+
+ // Create a bunch of copies of the code cache data.
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ cached_data.emplace_back(
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript()));
+ }
+
+ // Age the top-level bytecode so that the Isolate compilation cache will
+ // contain only the Script.
+ i::BytecodeArray bytecode =
+ GetSharedFunctionInfo(script).GetBytecodeArray(i_isolate);
+ for (int j = 0; j < i::v8_flags.bytecode_old_age; ++j) {
+ bytecode.MakeOlder();
+ }
+ }
+
+ i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
+
+ // A second round of GC is necessary in case incremental marking had already
+ // started before the bytecode was aged.
+ i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
+
+ // Start several background deserializations.
+ std::vector<std::unique_ptr<DeserializeThread>> deserialize_threads;
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ deserialize_threads.push_back(std::make_unique<DeserializeThread>(
+ ScriptCompiler::StartConsumingCodeCache(
+ isolate(), std::make_unique<ScriptCompiler::CachedData>(
+ cached_data[i]->data, cached_data[i]->length,
+ ScriptCompiler::CachedData::BufferNotOwned))));
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ CHECK(deserialize_threads[i]->Start());
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ deserialize_threads[i]->Join();
+ }
+
+ // Start background merges for all of those simultaneous scripts.
+ std::vector<std::unique_ptr<ScriptCompiler::ConsumeCodeCacheTask>> tasks;
+ std::vector<std::unique_ptr<MergeThread>> merge_threads;
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ tasks.push_back(deserialize_threads[i]->TakeTask());
+ tasks[i]->SourceTextAvailable(isolate(), NewString(kSourceCode),
+ default_origin);
+ CHECK(tasks[i]->ShouldMergeWithExistingScript());
+ merge_threads.push_back(std::make_unique<MergeThread>(tasks[i].get()));
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ CHECK(merge_threads[i]->Start());
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ merge_threads[i]->Join();
+ }
+
+ // Complete compilation of each script on the main thread. The first one will
+ // actually finish its merge; the others will abandon their in-progress merges
+ // and instead use the result from the first script since it will be in the
+ // Isolate compilation cache.
+ i::Handle<i::SharedFunctionInfo> first_script_sfi;
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ ScriptCompiler::Source source(NewString(kSourceCode), default_origin,
+ cached_data[i].release(), tasks[i].release());
+ Local<Script> script =
+ ScriptCompiler::Compile(context(), &source,
+ ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+ if (i == 0) {
+ first_script_sfi = i::handle(GetSharedFunctionInfo(script), i_isolate);
+ } else {
+ CHECK_EQ(*first_script_sfi, GetSharedFunctionInfo(script));
+ }
+ CHECK(!script->Run(context()).IsEmpty());
+ }
+}
+
} // namespace v8
diff --git a/deps/v8/test/unittests/api/exception-unittest.cc b/deps/v8/test/unittests/api/exception-unittest.cc
index 2455e4c78f..957aeb24fa 100644
--- a/deps/v8/test/unittests/api/exception-unittest.cc
+++ b/deps/v8/test/unittests/api/exception-unittest.cc
@@ -54,6 +54,8 @@ class V8_NODISCARD ScopedExposeGc {
};
TEST_F(APIExceptionTest, ExceptionMessageDoesNotKeepContextAlive) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
ScopedExposeGc expose_gc;
Persistent<Context> weak_context;
{
diff --git a/deps/v8/test/unittests/api/v8-script-unittest.cc b/deps/v8/test/unittests/api/v8-script-unittest.cc
index 98040cf662..79de0c2c67 100644
--- a/deps/v8/test/unittests/api/v8-script-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-script-unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "include/v8-context.h"
#include "include/v8-isolate.h"
#include "include/v8-local-handle.h"
@@ -157,5 +159,173 @@ TEST_F(ScriptTest, GetEmptyStalledTopLevelAwaitMessage) {
{});
}
+TEST_F(ScriptTest, ProduceCompileHints) {
+ const char* url = "http://www.foo.com/foo.js";
+ v8::ScriptOrigin origin(isolate(), NewString(url), 13, 0);
+
+ const char* code = "function lazy1() {} function lazy2() {} lazy1();";
+ v8::ScriptCompiler::Source script_source(NewString(code), origin);
+
+ // Test producing compile hints.
+ {
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(
+ v8_context(), &script_source,
+ v8::ScriptCompiler::CompileOptions::kProduceCompileHints)
+ .ToLocalChecked();
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate());
+ v8::MaybeLocal<v8::Value> result = script->Run(context);
+ EXPECT_FALSE(result.IsEmpty());
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(1u, compile_hints.size());
+ EXPECT_EQ(14, compile_hints[0]);
+ }
+
+ // The previous data is cleared if we retrieve compile hints again.
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+
+ // Call the other lazy function and retrieve compile hints again.
+ const char* code2 = "lazy2();";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+ EXPECT_FALSE(result2.IsEmpty());
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(1u, compile_hints.size());
+ EXPECT_EQ(34, compile_hints[0]);
+ }
+ }
+
+ // Test that compile hints are not produced unless the relevant compile option
+ // is set.
+ {
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source)
+ .ToLocalChecked();
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate());
+ v8::MaybeLocal<v8::Value> result = script->Run(context);
+ EXPECT_FALSE(result.IsEmpty());
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+ }
+}
+
+namespace {
+bool CompileHintsCallback(int position, void* data) {
+ std::vector<int>* hints = reinterpret_cast<std::vector<int>*>(data);
+ return std::find(hints->begin(), hints->end(), position) != hints->end();
+}
+} // namespace
+
+TEST_F(ScriptTest, LocalCompileHints) {
+ const char* url = "http://www.foo.com/foo.js";
+ v8::ScriptOrigin origin(isolate(), NewString(url), 13, 0);
+ v8::Local<v8::Context> context = v8::Context::New(isolate());
+
+ // Produce compile hints.
+ std::vector<int> compile_hints;
+ {
+ // Run the top level code.
+ const char* code = "function lazy1() {} function lazy2() {}";
+ v8::ScriptCompiler::Source script_source(NewString(code), origin);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(
+ v8_context(), &script_source,
+ v8::ScriptCompiler::CompileOptions::kProduceCompileHints)
+ .ToLocalChecked();
+
+ v8::MaybeLocal<v8::Value> result = script->Run(context);
+ EXPECT_FALSE(result.IsEmpty());
+
+ // Run lazy1.
+ const char* code2 = "lazy1();";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+ EXPECT_FALSE(result2.IsEmpty());
+
+ // Retrieve compile hints.
+ compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(1u, compile_hints.size());
+ }
+
+ // Consume compile hints. We use the produced compile hints to test that the
+ // positions of the requested compile hints match the positions of the
+ // produced compile hints.
+ {
+ // Artificially change the code so that the isolate cache won't hit.
+ const char* code = "function lazy1() {} function lazy2() {} //";
+ v8::ScriptCompiler::Source script_source(
+ NewString(code), origin, CompileHintsCallback,
+ reinterpret_cast<void*>(&compile_hints));
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(
+ v8_context(), &script_source,
+ v8::ScriptCompiler::CompileOptions::kConsumeCompileHints)
+ .ToLocalChecked();
+ USE(script);
+
+ // Retrieve the function object for lazy1.
+ {
+ const char* code2 = "lazy1";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+
+ auto function = i::Handle<i::JSFunction>::cast(
+ Utils::OpenHandle(*result2.ToLocalChecked()));
+ i::Builtin builtin = function->code().builtin_id();
+
+ // lazy1 was not compiled lazily (there was a compile hint for it).
+ EXPECT_NE(i::Builtin::kCompileLazy, builtin);
+ }
+
+ // Retrieve the function object for lazy2.
+ {
+ const char* code2 = "lazy2";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+
+ auto function = i::Handle<i::JSFunction>::cast(
+ Utils::OpenHandle(*result2.ToLocalChecked()));
+
+ i::Builtin builtin = function->code().builtin_id();
+
+ // lazy2 was compiled lazily (there was no compile hint for it).
+ EXPECT_EQ(i::Builtin::kCompileLazy, builtin);
+ }
+ }
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc
index a000cac6ec..5a3fd41549 100644
--- a/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc
@@ -717,7 +717,7 @@ TEST_F(AssemblerX64Test, AssemblerMultiByteNop) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
int res = f.Call();
CHECK_EQ(42, res);
}
@@ -774,7 +774,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Code> code =
Factory::CodeBuilder(i_isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(i_isolate, *code);
int res = f.Call();
args.GetReturnValue().Set(v8::Integer::New(isolate, res));
}
@@ -840,7 +840,7 @@ TEST_F(AssemblerX64Test, AssemblerX64Extractps) {
code->Print(os);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
uint64_t value1 = 0x1234'5678'8765'4321;
CHECK_EQ(0x12345678u, f.Call(base::uint64_to_double(value1)));
uint64_t value2 = 0x8765'4321'1234'5678;
@@ -875,7 +875,7 @@ TEST_F(AssemblerX64Test, AssemblerX64SSE) {
code->Print(os);
#endif
- auto f = GeneratedCode<F6>::FromCode(*code);
+ auto f = GeneratedCode<F6>::FromCode(isolate, *code);
CHECK_EQ(2, f.Call(1.0, 2.0));
}
@@ -905,7 +905,7 @@ TEST_F(AssemblerX64Test, AssemblerX64SSE3) {
code->Print(os);
#endif
- auto f = GeneratedCode<F6>::FromCode(*code);
+ auto f = GeneratedCode<F6>::FromCode(isolate, *code);
CHECK_EQ(4, f.Call(1.0, 2.0));
}
@@ -1126,7 +1126,7 @@ TEST_F(AssemblerX64Test, AssemblerX64FMA_sd) {
code->Print(os);
#endif
- auto f = GeneratedCode<F7>::FromCode(*code);
+ auto f = GeneratedCode<F7>::FromCode(isolate, *code);
CHECK_EQ(
0, f.Call(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
}
@@ -1348,7 +1348,7 @@ TEST_F(AssemblerX64Test, AssemblerX64FMA_ss) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call(9.26621069e-05f, -2.4607749f, -1.09587872f));
}
@@ -1421,7 +1421,7 @@ TEST_F(AssemblerX64Test, AssemblerX64SSE_ss) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
int res = f.Call(1.0f, 2.0f, 3.0f);
PrintF("f(1,2,3) = %d\n", res);
CHECK_EQ(6, res);
@@ -1505,7 +1505,7 @@ TEST_F(AssemblerX64Test, AssemblerX64AVX_ss) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
int res = f.Call(1.0f, 2.0f, 3.0f);
PrintF("f(1,2,3) = %d\n", res);
CHECK_EQ(6, res);
@@ -1743,7 +1743,7 @@ TEST_F(AssemblerX64Test, AssemblerX64AVX_sd) {
code->Print(os);
#endif
- auto f = GeneratedCode<F7>::FromCode(*code);
+ auto f = GeneratedCode<F7>::FromCode(isolate, *code);
int res = f.Call(1.0, 2.0, 3.0);
PrintF("f(1,2,3) = %d\n", res);
CHECK_EQ(6, res);
@@ -1933,7 +1933,7 @@ TEST_F(AssemblerX64Test, AssemblerX64BMI1) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -1991,7 +1991,7 @@ TEST_F(AssemblerX64Test, AssemblerX64LZCNT) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -2049,7 +2049,7 @@ TEST_F(AssemblerX64Test, AssemblerX64POPCNT) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -2310,7 +2310,7 @@ TEST_F(AssemblerX64Test, AssemblerX64BMI2) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -2352,7 +2352,7 @@ TEST_F(AssemblerX64Test, AssemblerX64JumpTables1) {
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int res = f.Call(i);
PrintF("f(%d) = %d\n", i, res);
@@ -2399,7 +2399,7 @@ TEST_F(AssemblerX64Test, AssemblerX64JumpTables2) {
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int res = f.Call(i);
PrintF("f(%d) = %d\n", i, res);
@@ -2455,7 +2455,7 @@ TEST_F(AssemblerX64Test, AssemblerX64vmovups) {
code->Print(os);
#endif
- auto f = GeneratedCode<F9>::FromCode(*code);
+ auto f = GeneratedCode<F9>::FromCode(isolate, *code);
CHECK_EQ(-1.5, f.Call(1.5, -1.5));
}
@@ -2624,6 +2624,10 @@ TEST_F(AssemblerX64Test, AssemblerX64FloatingPoint256bit) {
__ vcvtps2dq(ymm5, Operand(rbx, rcx, times_4, 10000));
__ vcvttpd2dq(xmm6, ymm8);
__ vcvttpd2dq(xmm10, Operand256(rbx, rcx, times_4, 10000));
+ __ vcvtdq2pd(ymm1, xmm2);
+ __ vcvtdq2pd(ymm1, Operand(rbx, rcx, times_4, 10000));
+ __ vcvttps2dq(ymm3, ymm2);
+ __ vcvttps2dq(ymm3, Operand256(rbx, rcx, times_4, 10000));
CodeDesc desc;
masm.GetCode(isolate, &desc);
@@ -2673,7 +2677,15 @@ TEST_F(AssemblerX64Test, AssemblerX64FloatingPoint256bit) {
// vcvttpd2dq xmm6, ymm8
0xC4, 0xC1, 0x7D, 0xE6, 0xF0,
// vcvttpd2dq xmm10, YMMWORD PTR [rbx+rcx*4+0x2710]
- 0xC5, 0x7D, 0xE6, 0x94, 0x8B, 0x10, 0x27, 0x00, 0x00};
+ 0xC5, 0x7D, 0xE6, 0x94, 0x8B, 0x10, 0x27, 0x00, 0x00,
+ // vcvtdq2pd ymm1, xmm2
+ 0xC5, 0xFE, 0xE6, 0xCA,
+ // vcvtdq2pd ymm1, XMMWORD PTR [rbx+rcx*4+0x2710]
+ 0xC5, 0xFE, 0xE6, 0x8C, 0x8B, 0x10, 0x27, 0x00, 0x00,
+ // vcvttps2dq ymm3, ymm2
+ 0xC5, 0xFE, 0x5B, 0xDA,
+ // vcvttps2dq ymm3, YMMWORD PTR [rbx+rcx*4+0x2710]
+ 0xC5, 0xFE, 0x5B, 0x9C, 0x8B, 0x10, 0x27, 0x00, 0x00};
CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
}
diff --git a/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc b/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc
index 3cf5bf0517..d845144e14 100644
--- a/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc
@@ -1449,6 +1449,153 @@ TEST_F(DisasmArm64Test, load_store_acquire_release) {
COMPARE(stlxrh(wzr, w1, sp), "stlxrh wzr, w1, [sp]");
COMPARE(stlxr(w2, wzr, sp), "stlxr w2, wzr, [sp]");
+ CpuFeatureScope feature_scope(assm, LSE,
+ CpuFeatureScope::kDontCheckSupported);
+
+ COMPARE(cas(w30, w0, MemOperand(x1)), "cas w30, w0, [x1]");
+ COMPARE(cas(w2, w3, MemOperand(sp)), "cas w2, w3, [sp]");
+ COMPARE(cas(x4, x5, MemOperand(x6)), "cas x4, x5, [x6]");
+ COMPARE(cas(x7, x8, MemOperand(sp)), "cas x7, x8, [sp]");
+ COMPARE(casa(w9, w10, MemOperand(x11)), "casa w9, w10, [x11]");
+ COMPARE(casa(w12, w13, MemOperand(sp)), "casa w12, w13, [sp]");
+ COMPARE(casa(x14, x15, MemOperand(x16)), "casa x14, x15, [x16]");
+ COMPARE(casa(x17, x18, MemOperand(sp)), "casa x17, x18, [sp]");
+ COMPARE(casl(w19, w20, MemOperand(x21)), "casl w19, w20, [x21]");
+ COMPARE(casl(w22, w23, MemOperand(sp)), "casl w22, w23, [sp]");
+ COMPARE(casl(x24, x25, MemOperand(x26)), "casl x24, x25, [x26]");
+ COMPARE(casl(x27, x28, MemOperand(sp)), "casl cp, x28, [sp]");
+ COMPARE(casal(w29, w30, MemOperand(x0)), "casal w29, w30, [x0]");
+ COMPARE(casal(w1, w2, MemOperand(sp)), "casal w1, w2, [sp]");
+ COMPARE(casal(x3, x4, MemOperand(x5)), "casal x3, x4, [x5]");
+ COMPARE(casal(x6, x7, MemOperand(sp)), "casal x6, x7, [sp]");
+ COMPARE(casb(w8, w9, MemOperand(x10)), "casb w8, w9, [x10]");
+ COMPARE(casb(w11, w12, MemOperand(sp)), "casb w11, w12, [sp]");
+ COMPARE(casab(w13, w14, MemOperand(x15)), "casab w13, w14, [x15]");
+ COMPARE(casab(w16, w17, MemOperand(sp)), "casab w16, w17, [sp]");
+ COMPARE(caslb(w18, w19, MemOperand(x20)), "caslb w18, w19, [x20]");
+ COMPARE(caslb(w21, w22, MemOperand(sp)), "caslb w21, w22, [sp]");
+ COMPARE(casalb(w23, w24, MemOperand(x25)), "casalb w23, w24, [x25]");
+ COMPARE(casalb(w26, w27, MemOperand(sp)), "casalb w26, w27, [sp]");
+ COMPARE(cash(w28, w29, MemOperand(x30)), "cash w28, w29, [lr]");
+ COMPARE(cash(w0, w1, MemOperand(sp)), "cash w0, w1, [sp]");
+ COMPARE(casah(w2, w3, MemOperand(x4)), "casah w2, w3, [x4]");
+ COMPARE(casah(w5, w6, MemOperand(sp)), "casah w5, w6, [sp]");
+ COMPARE(caslh(w7, w8, MemOperand(x9)), "caslh w7, w8, [x9]");
+ COMPARE(caslh(w10, w11, MemOperand(sp)), "caslh w10, w11, [sp]");
+ COMPARE(casalh(w12, w13, MemOperand(x14)), "casalh w12, w13, [x14]");
+ COMPARE(casalh(w15, w16, MemOperand(sp)), "casalh w15, w16, [sp]");
+ COMPARE(casp(w18, w19, w20, w21, MemOperand(x22)),
+ "casp w18, w19, w20, w21, [x22]");
+ COMPARE(casp(w24, w25, w26, w27, MemOperand(sp)),
+ "casp w24, w25, w26, w27, [sp]");
+ COMPARE(casp(x28, x29, x0, x1, MemOperand(x2)), "casp x28, fp, x0, x1, [x2]");
+ COMPARE(casp(x4, x5, x6, x7, MemOperand(sp)), "casp x4, x5, x6, x7, [sp]");
+ COMPARE(caspa(w8, w9, w10, w11, MemOperand(x12)),
+ "caspa w8, w9, w10, w11, [x12]");
+ COMPARE(caspa(w14, w15, w16, w17, MemOperand(sp)),
+ "caspa w14, w15, w16, w17, [sp]");
+ COMPARE(caspa(x18, x19, x20, x21, MemOperand(x22)),
+ "caspa x18, x19, x20, x21, [x22]");
+ COMPARE(caspa(x24, x25, x26, x27, MemOperand(sp)),
+ "caspa x24, x25, x26, cp, [sp]");
+ COMPARE(caspl(w28, w29, w0, w1, MemOperand(x2)),
+ "caspl w28, w29, w0, w1, [x2]");
+ COMPARE(caspl(w4, w5, w6, w7, MemOperand(sp)), "caspl w4, w5, w6, w7, [sp]");
+ COMPARE(caspl(x8, x9, x10, x11, MemOperand(x12)),
+ "caspl x8, x9, x10, x11, [x12]");
+ COMPARE(caspl(x14, x15, x16, x17, MemOperand(sp)),
+ "caspl x14, x15, x16, x17, [sp]");
+ COMPARE(caspal(w18, w19, w20, w21, MemOperand(x22)),
+ "caspal w18, w19, w20, w21, [x22]");
+ COMPARE(caspal(w24, w25, w26, w27, MemOperand(sp)),
+ "caspal w24, w25, w26, w27, [sp]");
+ COMPARE(caspal(x28, x29, x0, x1, MemOperand(x2)),
+ "caspal x28, fp, x0, x1, [x2]");
+ COMPARE(caspal(x4, x5, x6, x7, MemOperand(sp)),
+ "caspal x4, x5, x6, x7, [sp]");
+
+ CLEANUP();
+}
+
+#define ATOMIC_MEMORY_DISASM_LIST(V, DEF) \
+ V(DEF, add, "add") \
+ V(DEF, clr, "clr") \
+ V(DEF, eor, "eor") \
+ V(DEF, set, "set") \
+ V(DEF, smax, "smax") \
+ V(DEF, smin, "smin") \
+ V(DEF, umax, "umax") \
+ V(DEF, umin, "umin")
+
+#define ATOMIC_MEMORY_DISASM_STORE_X_MODES(V, NAME, STR) \
+ V(NAME, STR) \
+ V(NAME##l, STR "l")
+
+#define ATOMIC_MEMORY_DISASM_STORE_W_MODES(V, NAME, STR) \
+ ATOMIC_MEMORY_DISASM_STORE_X_MODES(V, NAME, STR) \
+ V(NAME##b, STR "b") \
+ V(NAME##lb, STR "lb") \
+ V(NAME##h, STR "h") \
+ V(NAME##lh, STR "lh")
+
+#define ATOMIC_MEMORY_DISASM_LOAD_X_MODES(V, NAME, STR) \
+ ATOMIC_MEMORY_DISASM_STORE_X_MODES(V, NAME, STR) \
+ V(NAME##a, STR "a") \
+ V(NAME##al, STR "al")
+
+#define ATOMIC_MEMORY_DISASM_LOAD_W_MODES(V, NAME, STR) \
+ ATOMIC_MEMORY_DISASM_LOAD_X_MODES(V, NAME, STR) \
+ V(NAME##ab, STR "ab") \
+ V(NAME##alb, STR "alb") \
+ V(NAME##ah, STR "ah") \
+ V(NAME##alh, STR "alh")
+
+TEST_F(DisasmArm64Test, atomic_memory) {
+ SET_UP_MASM();
+
+ CpuFeatureScope feature_scope(assm, LSE,
+ CpuFeatureScope::kDontCheckSupported);
+
+ // These macros generate tests for all the variations of the atomic memory
+ // operations, e.g. ldadd, ldadda, ldaddb, staddl, etc.
+
+#define AM_LOAD_X_TESTS(N, MN) \
+ COMPARE(ld##N(x0, x1, MemOperand(x2)), "ld" MN " x0, x1, [x2]"); \
+ COMPARE(ld##N(x3, x4, MemOperand(sp)), "ld" MN " x3, x4, [sp]");
+#define AM_LOAD_W_TESTS(N, MN) \
+ COMPARE(ld##N(w0, w1, MemOperand(x2)), "ld" MN " w0, w1, [x2]"); \
+ COMPARE(ld##N(w3, w4, MemOperand(sp)), "ld" MN " w3, w4, [sp]");
+#define AM_STORE_X_TESTS(N, MN) \
+ COMPARE(st##N(x0, MemOperand(x1)), "st" MN " x0, [x1]"); \
+ COMPARE(st##N(x2, MemOperand(sp)), "st" MN " x2, [sp]");
+#define AM_STORE_W_TESTS(N, MN) \
+ COMPARE(st##N(w0, MemOperand(x1)), "st" MN " w0, [x1]"); \
+ COMPARE(st##N(w2, MemOperand(sp)), "st" MN " w2, [sp]");
+
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_LOAD_X_MODES, AM_LOAD_X_TESTS)
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_LOAD_W_MODES, AM_LOAD_W_TESTS)
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_STORE_X_MODES,
+ AM_STORE_X_TESTS)
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_STORE_W_MODES,
+ AM_STORE_W_TESTS)
+
+#define AM_SWP_X_TESTS(N, MN) \
+ COMPARE(N(x0, x1, MemOperand(x2)), MN " x0, x1, [x2]"); \
+ COMPARE(N(x3, x4, MemOperand(sp)), MN " x3, x4, [sp]");
+#define AM_SWP_W_TESTS(N, MN) \
+ COMPARE(N(w0, w1, MemOperand(x2)), MN " w0, w1, [x2]"); \
+ COMPARE(N(w3, w4, MemOperand(sp)), MN " w3, w4, [sp]");
+
+ ATOMIC_MEMORY_DISASM_LOAD_X_MODES(AM_SWP_X_TESTS, swp, "swp")
+ ATOMIC_MEMORY_DISASM_LOAD_W_MODES(AM_SWP_W_TESTS, swp, "swp")
+
+#undef AM_LOAD_X_TESTS
+#undef AM_LOAD_W_TESTS
+#undef AM_STORE_X_TESTS
+#undef AM_STORE_W_TESTS
+#undef AM_SWP_X_TESTS
+#undef AM_SWP_W_TESTS
+
CLEANUP();
}
diff --git a/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc b/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc
index d35a7a23df..ecb4717013 100644
--- a/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc
@@ -288,13 +288,9 @@ TEST_F(DisasmIa320Test, DisasmIa320) {
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic = BUILTIN_CODE(isolate(), ArrayFrom);
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
__ jmp(&L1);
__ jmp(Operand(ebx, ecx, times_4, 10000));
- __ jmp(ic, RelocInfo::CODE_TARGET);
__ nop();
Label Ljcc;
@@ -988,8 +984,8 @@ TEST_F(DisasmIa320Test, DisasmIa320) {
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
- Address begin = code->raw_instruction_start();
- Address end = code->raw_instruction_end();
+ Address begin = code->InstructionStart();
+ Address end = code->InstructionEnd();
disasm::Disassembler::Disassemble(stdout, reinterpret_cast<byte*>(begin),
reinterpret_cast<byte*>(end));
#endif
diff --git a/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc b/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc
index 7bc9ca42cb..7bff5b9925 100644
--- a/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc
@@ -378,7 +378,7 @@ TEST_F(DisasmRiscv64Test, RV32D) {
COMPARE(fsgnjx_d(ft0, ft8, fa5), "22fe2053 fsgnjx.d ft0, ft8, fa5");
COMPARE(fmin_d(ft0, ft8, fa5), "2afe0053 fmin.d ft0, ft8, fa5");
COMPARE(fmax_d(ft0, ft8, fa5), "2afe1053 fmax.d ft0, ft8, fa5");
- COMPARE(fcvt_s_d(ft0, ft8, RDN), "401e2053 fcvt.s.d [RDN] ft0, t3");
+ COMPARE(fcvt_s_d(ft0, ft8, RDN), "401e2053 fcvt.s.d [RDN] ft0, ft8");
COMPARE(fcvt_d_s(ft0, fa0), "42050053 fcvt.d.s ft0, fa0");
COMPARE(feq_d(a0, ft8, fa5), "a2fe2553 feq.d a0, ft8, fa5");
COMPARE(flt_d(a0, ft8, fa5), "a2fe1553 flt.d a0, ft8, fa5");
diff --git a/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc b/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc
index 2ac718c366..e309fe7ebf 100644
--- a/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc
@@ -49,7 +49,7 @@ using DisasmX64Test = TestWithIsolate;
namespace {
-Handle<CodeT> CreateDummyCode(Isolate* isolate) {
+Handle<Code> CreateDummyCode(Isolate* isolate) {
i::byte buffer[128];
Assembler assm(AssemblerOptions{},
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
@@ -59,7 +59,7 @@ Handle<CodeT> CreateDummyCode(Isolate* isolate) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- return ToCodeT(code, isolate);
+ return code;
}
} // namespace
@@ -82,7 +82,7 @@ TEST_F(DisasmX64Test, DisasmX64) {
__ bind(&L2);
__ call(rcx);
__ nop();
- Handle<CodeT> ic = CreateDummyCode(isolate());
+ Handle<Code> ic = CreateDummyCode(isolate());
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
@@ -305,8 +305,8 @@ TEST_F(DisasmX64Test, DisasmX64) {
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
- Address begin = code->raw_instruction_start();
- Address end = code->raw_instruction_end();
+ Address begin = code->InstructionStart();
+ Address end = code->InstructionStart();
disasm::Disassembler::Disassemble(stdout, reinterpret_cast<byte*>(begin),
reinterpret_cast<byte*>(end));
#endif
@@ -568,6 +568,8 @@ TEST_F(DisasmX64Test, DisasmX64CheckOutput) {
COMPARE("4885948b10270000 REX.W testq rdx,[rbx+rcx*4+0x2710]",
testq(Operand(rbx, rcx, times_4, 10000), rdx));
+ COMPARE("48f7ac8b10270000 REX.W imulq [rbx+rcx*4+0x2710]",
+ imulq(Operand(rbx, rcx, times_4, 10000)));
COMPARE("486bd10c REX.W imulq rdx,rcx,0xc",
imulq(rdx, rcx, Immediate(12)));
COMPARE("4869d1e8030000 REX.W imulq rdx,rcx,0x3e8",
@@ -1445,6 +1447,13 @@ TEST_F(DisasmX64Test, DisasmX64YMMRegister) {
COMPARE("c5fe16ca vmovshdup ymm1,ymm2", vmovshdup(ymm1, ymm2));
COMPARE("c5f4c6da73 vshufps ymm3,ymm1,ymm2,0x73",
vshufps(ymm3, ymm1, ymm2, 115));
+ COMPARE("c5fee6ca vcvtdq2pd ymm1,xmm2", vcvtdq2pd(ymm1, xmm2));
+ COMPARE("c5fee68c8b10270000 vcvtdq2pd ymm1,[rbx+rcx*4+0x2710]",
+ vcvtdq2pd(ymm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("c5fe5bda vcvttps2dq ymm3,ymm2",
+ vcvttps2dq(ymm3, ymm2));
+ COMPARE("c5fe5b9c8b10270000 vcvttps2dq ymm3,[rbx+rcx*4+0x2710]",
+ vcvttps2dq(ymm3, Operand256(rbx, rcx, times_4, 10000)));
// vcmp
COMPARE("c5dcc2e900 vcmpps ymm5,ymm4,ymm1, (eq)",
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-arm-unittest.cc
index 6fa1bd5927..f7ec44e77f 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-arm-unittest.cc
@@ -13,7 +13,7 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// If we are running on android and the output is not redirected (i.e. ends up
// in the android log) then we cannot find the error message in the output. This
@@ -28,11 +28,11 @@ namespace internal {
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -40,7 +40,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -48,9 +48,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -62,7 +62,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
@@ -102,17 +102,17 @@ const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
template <typename T>
-class TurboAssemblerTestWithParam : public TurboAssemblerTest,
+class MacroAssemblerTestWithParam : public MacroAssemblerTest,
public ::testing::WithParamInterface<T> {};
-using TurboAssemblerTestMoveObjectAndSlot =
- TurboAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
+using MacroAssemblerTestMoveObjectAndSlot =
+ MacroAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
-TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
+TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
const MoveObjectAndSlotTestCase test_case = GetParam();
TRACED_FOREACH(int32_t, offset, kOffsets) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ Push(r0);
__ Move(test_case.object, r1);
@@ -143,7 +143,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
__ RecordComment("--");
// The `result` pointer was saved on the stack.
- UseScratchRegisterScope temps(&tasm);
+ UseScratchRegisterScope temps(&masm);
Register scratch = temps.Acquire();
__ Pop(scratch);
__ str(dst_object, MemOperand(scratch));
@@ -152,7 +152,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ masm.GetCode(nullptr, &desc);
if (v8_flags.print_code) {
Handle<Code> code =
Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build();
@@ -179,8 +179,8 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
}
}
-INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest,
- TurboAssemblerTestMoveObjectAndSlot,
+INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest,
+ MacroAssemblerTestMoveObjectAndSlot,
::testing::ValuesIn(kMoveObjectAndSlotTestCases));
#undef __
diff --git a/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc
index 021b0423f3..3bbbc49096 100644
--- a/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc
@@ -1,129 +1,254 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/deoptimizer/deoptimizer.h"
-#include "src/heap/factory.h"
-#include "src/objects/objects-inl.h"
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
#include "src/utils/ostreams.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
namespace v8 {
namespace internal {
-namespace test_macro_assembler_arm64 {
-using MacroAssemblerArm64Test = TestWithIsolate;
+#define __ masm.
-using F0 = int();
+// If we are running on android and the output is not redirected (i.e. ends up
+// in the android log) then we cannot find the error message in the output. This
+// macro just returns the empty string in that case.
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define ERROR_MESSAGE(msg) ""
+#else
+#define ERROR_MESSAGE(msg) msg
+#endif
-#define __ masm.
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
-TEST_F(MacroAssemblerArm64Test, EmbeddedObj) {
-#ifdef V8_COMPRESS_POINTERS
- Isolate* isolate = i_isolate();
- HandleScope handles(isolate);
+class MacroAssemblerTest : public TestWithIsolate {};
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- Handle<HeapObject> old_array = isolate->factory()->NewFixedArray(2000);
- Handle<HeapObject> my_array = isolate->factory()->NewFixedArray(1000);
- __ Mov(w4, Immediate(my_array, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
- __ Mov(x5, old_array);
- __ ret(x5);
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
-#ifdef DEBUG
- StdoutStream os;
- code->Print(os);
-#endif
+ {
+ AssemblerBufferWriteScope rw_scope(*buffer);
- // Collect garbage to ensure reloc info can be walked by the heap.
- CollectAllGarbage();
- CollectAllGarbage();
- CollectAllGarbage();
-
- PtrComprCageBase cage_base(isolate);
-
- // Test the user-facing reloc interface.
- const int mode_mask = RelocInfo::EmbeddedObjectModeMask();
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsCompressedEmbeddedObject(mode)) {
- CHECK_EQ(*my_array, it.rinfo()->target_object(cage_base));
- } else {
- CHECK(RelocInfo::IsFullEmbeddedObject(mode));
- CHECK_EQ(*old_array, it.rinfo()->target_object(cage_base));
- }
+ __ CodeEntry();
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
}
-#endif // V8_COMPRESS_POINTERS
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
}
-TEST_F(MacroAssemblerArm64Test, DeoptExitSizeIsFixed) {
- Isolate* isolate = i_isolate();
- HandleScope handles(isolate);
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ {
+ AssemblerBufferWriteScope rw_scope(*buffer);
+
+ __ CodeEntry();
+
+ // Fail if the first parameter is 17.
+ __ Mov(w1, Immediate(17));
+ __ Cmp(w0, w1); // 1st parameter is in {w0}.
+ __ Check(Condition::ne, AbortReason::kNoReason);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ }
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
+}
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
- for (int i = 0; i < kDeoptimizeKindCount; i++) {
- DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
- Label before_exit;
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- // Mirroring logic in code-generator.cc.
- if (kind == DeoptimizeKind::kLazy) {
- // CFI emits an extra instruction here.
- masm.BindExceptionHandler(&before_exit);
- } else {
- masm.bind(&before_exit);
+TEST_F(MacroAssemblerTest, CompareAndBranch) {
+ const int kTestCases[] = {-42, 0, 42};
+ static_assert(Condition::eq == 0);
+ static_assert(Condition::le == 13);
+ TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv
+ Condition cond = static_cast<Condition>(cc);
+ TRACED_FOREACH(int, imm, kTestCases) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate(), AssemblerOptions{},
+ CodeObjectRequired::kNo, buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ {
+ AssemblerBufferWriteScope rw_scope(*buffer);
+
+ __ CodeEntry();
+
+ Label start, lab;
+ __ Bind(&start);
+ __ CompareAndBranch(x0, Immediate(imm), cond, &lab);
+ if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) ||
+ (cond == ls))) { // One instruction generated
+ ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start));
+ } else { // Two instructions generated
+ ASSERT_EQ(static_cast<uint8_t>(2 * kInstrSize),
+ __ SizeOfCodeGeneratedSince(&start));
+ }
+ __ Cmp(x0, Immediate(imm));
+ __ Check(NegateCondition(cond),
+ AbortReason::kNoReason); // cond must not hold
+ __ Ret();
+ __ Bind(&lab); // Branch leads here
+ __ Cmp(x0, Immediate(imm));
+ __ Check(cond, AbortReason::kNoReason); // cond must hold
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ }
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ TRACED_FOREACH(int, n, kTestCases) { f.Call(n); }
}
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- &before_exit);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kEagerDeoptExitSize);
}
}
+struct MoveObjectAndSlotTestCase {
+ const char* comment;
+ Register dst_object;
+ Register dst_slot;
+ Register object;
+ Register offset_register = no_reg;
+};
+
+const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
+ {"no overlap", x0, x1, x2},
+ {"no overlap", x0, x1, x2, x3},
+
+ {"object == dst_object", x2, x1, x2},
+ {"object == dst_object", x2, x1, x2, x3},
+
+ {"object == dst_slot", x1, x2, x2},
+ {"object == dst_slot", x1, x2, x2, x3},
+
+ {"offset == dst_object", x0, x1, x2, x0},
+
+ {"offset == dst_object && object == dst_slot", x0, x1, x1, x0},
+
+ {"offset == dst_slot", x0, x1, x2, x1},
+
+ {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}};
+
+// Make sure we include offsets that cannot be encoded in an add instruction.
+const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
+
+template <typename T>
+class MacroAssemblerTestWithParam : public MacroAssemblerTest,
+ public ::testing::WithParamInterface<T> {};
+
+using MacroAssemblerTestMoveObjectAndSlot =
+ MacroAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
+
+TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
+ const MoveObjectAndSlotTestCase test_case = GetParam();
+ TRACED_FOREACH(int32_t, offset, kOffsets) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+
+ {
+ AssemblerBufferWriteScope rw_buffer_scope(*buffer);
+
+ __ CodeEntry();
+ __ Push(x0, padreg);
+ __ Mov(test_case.object, x1);
+
+ Register src_object = test_case.object;
+ Register dst_object = test_case.dst_object;
+ Register dst_slot = test_case.dst_slot;
+
+ Operand offset_operand(0);
+ if (test_case.offset_register == no_reg) {
+ offset_operand = Operand(offset);
+ } else {
+ __ Mov(test_case.offset_register, Operand(offset));
+ offset_operand = Operand(test_case.offset_register);
+ }
+
+ std::stringstream comment;
+ comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
+ << dst_object << ", " << dst_slot << ", " << src_object << ", ";
+ if (test_case.offset_register == no_reg) {
+ comment << "#" << offset;
+ } else {
+ comment << test_case.offset_register;
+ }
+ comment << ") --";
+ __ RecordComment(comment.str().c_str());
+ __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand);
+ __ RecordComment("--");
+
+ // The `result` pointer was saved on the stack.
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Pop(padreg, scratch);
+ __ Str(dst_object, MemOperand(scratch));
+ __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize));
+
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+ if (v8_flags.print_code) {
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
+ StdoutStream os;
+ code->Print(os);
+ }
+ }
+
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, byte**, byte*>::FromBuffer(isolate(),
+ buffer->start());
+
+ byte* object = new byte[offset];
+ byte* result[] = {nullptr, nullptr};
+
+ f.Call(result, object);
+
+ // The first element must be the address of the object, and the second the
+ // slot addressed by `offset`.
+ EXPECT_EQ(result[0], &object[0]);
+ EXPECT_EQ(result[1], &object[offset]);
+
+ delete[] object;
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest,
+ MacroAssemblerTestMoveObjectAndSlot,
+ ::testing::ValuesIn(kMoveObjectAndSlotTestCases));
+
#undef __
+#undef ERROR_MESSAGE
-} // namespace test_macro_assembler_arm64
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-ia32-unittest.cc
index f0cb96d47d..cbf628ba88 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-ia32-unittest.cc
@@ -11,17 +11,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -29,16 +29,16 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ ret(0);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-loong64-unittest.cc
index 5334fb4be3..a2cc213cae 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-loong64-unittest.cc
@@ -12,33 +12,33 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the loong64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-mips64-unittest.cc
index c954ffcc65..92e3b1d6f8 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-mips64-unittest.cc
@@ -12,17 +12,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-ppc-unittest.cc
index 93ae7abafc..aabb988b29 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-ppc-unittest.cc
@@ -12,17 +12,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the ppc assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
@@ -62,23 +62,24 @@ TEST_F(TurboAssemblerTest, TestCheck) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, ReverseBitsU64) {
+TEST_F(MacroAssemblerTest, ReverseBitsU64) {
struct {
- uint64_t expected; uint64_t input;
+ uint64_t expected;
+ uint64_t input;
} values[] = {
- {0x0000000000000000, 0x0000000000000000},
- {0xffffffffffffffff, 0xffffffffffffffff},
- {0x8000000000000000, 0x0000000000000001},
- {0x0000000000000001, 0x8000000000000000},
- {0x800066aa22cc4488, 0x1122334455660001},
- {0x1122334455660001, 0x800066aa22cc4488},
- {0xffffffff00000000, 0x00000000ffffffff},
- {0x00000000ffffffff, 0xffffffff00000000},
- {0xff01020304050607, 0xe060a020c04080ff},
- {0xe060a020c04080ff, 0xff01020304050607},
+ {0x0000000000000000, 0x0000000000000000},
+ {0xffffffffffffffff, 0xffffffffffffffff},
+ {0x8000000000000000, 0x0000000000000001},
+ {0x0000000000000001, 0x8000000000000000},
+ {0x800066aa22cc4488, 0x1122334455660001},
+ {0x1122334455660001, 0x800066aa22cc4488},
+ {0xffffffff00000000, 0x00000000ffffffff},
+ {0x00000000ffffffff, 0xffffffff00000000},
+ {0xff01020304050607, 0xe060a020c04080ff},
+ {0xe060a020c04080ff, 0xff01020304050607},
};
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -87,28 +88,26 @@ TEST_F(TurboAssemblerTest, ReverseBitsU64) {
__ Pop(r4, r5);
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(),
- buffer->start());
- for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) {
+ auto f =
+ GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(), buffer->start());
+ for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) {
CHECK_EQ(values[i].expected, f.Call(values[i].input));
}
}
-TEST_F(TurboAssemblerTest, ReverseBitsU32) {
+TEST_F(MacroAssemblerTest, ReverseBitsU32) {
struct {
- uint64_t expected; uint64_t input;
+ uint64_t expected;
+ uint64_t input;
} values[] = {
- {0x00000000, 0x00000000},
- {0xffffffff, 0xffffffff},
- {0x00000001, 0x80000000},
- {0x80000000, 0x00000001},
- {0x22334455, 0xaa22cc44},
- {0xaa22cc44, 0x22334455},
+ {0x00000000, 0x00000000}, {0xffffffff, 0xffffffff},
+ {0x00000001, 0x80000000}, {0x80000000, 0x00000001},
+ {0x22334455, 0xaa22cc44}, {0xaa22cc44, 0x22334455},
};
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -117,11 +116,11 @@ TEST_F(TurboAssemblerTest, ReverseBitsU32) {
__ Pop(r4, r5);
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(),
- buffer->start());
- for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) {
+ auto f =
+ GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(), buffer->start());
+ for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) {
CHECK_EQ(values[i].expected, f.Call(values[i].input));
}
}
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-riscv-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-riscv-unittest.cc
index afda8d3603..8e74ae692c 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-riscv-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-riscv-unittest.cc
@@ -12,33 +12,33 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ masm.GetCode(nullptr, &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ masm.GetCode(nullptr, &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-s390-unittest.cc
index d86a09f67c..b371c841c5 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-s390-unittest.cc
@@ -12,17 +12,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the s390 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc
index b7e5b0ffbe..9924b620ee 100644
--- a/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc
@@ -40,6 +40,57 @@
namespace v8 {
namespace internal {
+
+#define __ masm.
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+using MacroAssemblerX64Test = TestWithIsolate;
+
+TEST_F(MacroAssemblerX64Test, TestHardAbort) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(MacroAssemblerX64Test, TestCheck) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ movl(rax, Immediate(17));
+ __ cmpl(rax, arg_reg_1);
+ __ Check(Condition::not_equal, AbortReason::kNoReason);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
namespace test_macro_assembler_x64 {
// Test the x64 assembler by compiling some simple functions into
@@ -51,8 +102,6 @@ namespace test_macro_assembler_x64 {
// This calling convention is used on Linux, with GCC, and on Mac OS,
// with GCC. A different convention is used on 64-bit windows.
-using MacroAssemblerX64Test = TestWithIsolate;
-
using F0 = int();
#define __ masm->
@@ -60,14 +109,14 @@ using F0 = int();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ pushq(kRootRegister);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
__ pushq(kPtrComprCageBaseRegister);
#endif
__ InitializeRootRegister();
}
static void ExitCode(MacroAssembler* masm) {
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
__ popq(kPtrComprCageBaseRegister);
#endif
__ popq(kRootRegister);
@@ -468,7 +517,7 @@ TEST_F(MacroAssemblerX64Test, EmbeddedObj) {
code->Print(os);
#endif
using myF0 = Address();
- auto f = GeneratedCode<myF0>::FromAddress(isolate, code->entry());
+ auto f = GeneratedCode<myF0>::FromAddress(isolate, code->code_entry_point());
Object result = Object(f.Call());
CHECK_EQ(old_array->ptr(), result.ptr());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
deleted file mode 100644
index 77123ef565..0000000000
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/simulator.h"
-#include "src/utils/ostreams.h"
-#include "test/common/assembler-tester.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gtest-support.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ tasm.
-
-// If we are running on android and the output is not redirected (i.e. ends up
-// in the android log) then we cannot find the error message in the output. This
-// macro just returns the empty string in that case.
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-#define ERROR_MESSAGE(msg) ""
-#else
-#define ERROR_MESSAGE(msg) msg
-#endif
-
-// Test the x64 assembler by compiling some simple functions into
-// a buffer and executing them. These tests do not initialize the
-// V8 library, create a context, or use any V8 objects.
-
-class TurboAssemblerTest : public TestWithIsolate {};
-
-TEST_F(TurboAssemblerTest, TestHardAbort) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- {
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- __ CodeEntry();
-
- __ Abort(AbortReason::kNoReason);
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- }
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
-
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
-}
-
-TEST_F(TurboAssemblerTest, TestCheck) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- {
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- __ CodeEntry();
-
- // Fail if the first parameter is 17.
- __ Mov(w1, Immediate(17));
- __ Cmp(w0, w1); // 1st parameter is in {w0}.
- __ Check(Condition::ne, AbortReason::kNoReason);
- __ Ret();
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- }
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
-
- f.Call(0);
- f.Call(18);
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
-}
-
-TEST_F(TurboAssemblerTest, CompareAndBranch) {
- const int kTestCases[] = {-42, 0, 42};
- static_assert(Condition::eq == 0);
- static_assert(Condition::le == 13);
- TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv
- Condition cond = static_cast<Condition>(cc);
- TRACED_FOREACH(int, imm, kTestCases) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{},
- CodeObjectRequired::kNo, buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- {
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- __ CodeEntry();
-
- Label start, lab;
- __ Bind(&start);
- __ CompareAndBranch(x0, Immediate(imm), cond, &lab);
- if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) ||
- (cond == ls))) { // One instruction generated
- ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start));
- } else { // Two instructions generated
- ASSERT_EQ(static_cast<uint8_t>(2 * kInstrSize),
- __ SizeOfCodeGeneratedSince(&start));
- }
- __ Cmp(x0, Immediate(imm));
- __ Check(NegateCondition(cond),
- AbortReason::kNoReason); // cond must not hold
- __ Ret();
- __ Bind(&lab); // Branch leads here
- __ Cmp(x0, Immediate(imm));
- __ Check(cond, AbortReason::kNoReason); // cond must hold
- __ Ret();
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- }
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
-
- TRACED_FOREACH(int, n, kTestCases) { f.Call(n); }
- }
- }
-}
-
-struct MoveObjectAndSlotTestCase {
- const char* comment;
- Register dst_object;
- Register dst_slot;
- Register object;
- Register offset_register = no_reg;
-};
-
-const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
- {"no overlap", x0, x1, x2},
- {"no overlap", x0, x1, x2, x3},
-
- {"object == dst_object", x2, x1, x2},
- {"object == dst_object", x2, x1, x2, x3},
-
- {"object == dst_slot", x1, x2, x2},
- {"object == dst_slot", x1, x2, x2, x3},
-
- {"offset == dst_object", x0, x1, x2, x0},
-
- {"offset == dst_object && object == dst_slot", x0, x1, x1, x0},
-
- {"offset == dst_slot", x0, x1, x2, x1},
-
- {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}};
-
-// Make sure we include offsets that cannot be encoded in an add instruction.
-const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
-
-template <typename T>
-class TurboAssemblerTestWithParam : public TurboAssemblerTest,
- public ::testing::WithParamInterface<T> {};
-
-using TurboAssemblerTestMoveObjectAndSlot =
- TurboAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
-
-TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
- const MoveObjectAndSlotTestCase test_case = GetParam();
- TRACED_FOREACH(int32_t, offset, kOffsets) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
-
- {
- AssemblerBufferWriteScope rw_buffer_scope(*buffer);
-
- __ CodeEntry();
- __ Push(x0, padreg);
- __ Mov(test_case.object, x1);
-
- Register src_object = test_case.object;
- Register dst_object = test_case.dst_object;
- Register dst_slot = test_case.dst_slot;
-
- Operand offset_operand(0);
- if (test_case.offset_register == no_reg) {
- offset_operand = Operand(offset);
- } else {
- __ Mov(test_case.offset_register, Operand(offset));
- offset_operand = Operand(test_case.offset_register);
- }
-
- std::stringstream comment;
- comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
- << dst_object << ", " << dst_slot << ", " << src_object << ", ";
- if (test_case.offset_register == no_reg) {
- comment << "#" << offset;
- } else {
- comment << test_case.offset_register;
- }
- comment << ") --";
- __ RecordComment(comment.str().c_str());
- __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand);
- __ RecordComment("--");
-
- // The `result` pointer was saved on the stack.
- UseScratchRegisterScope temps(&tasm);
- Register scratch = temps.AcquireX();
- __ Pop(padreg, scratch);
- __ Str(dst_object, MemOperand(scratch));
- __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize));
-
- __ Ret();
-
- CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
- if (v8_flags.print_code) {
- Handle<Code> code =
- Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING)
- .Build();
- StdoutStream os;
- code->Print(os);
- }
- }
-
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, byte**, byte*>::FromBuffer(isolate(),
- buffer->start());
-
- byte* object = new byte[offset];
- byte* result[] = {nullptr, nullptr};
-
- f.Call(result, object);
-
- // The first element must be the address of the object, and the second the
- // slot addressed by `offset`.
- EXPECT_EQ(result[0], &object[0]);
- EXPECT_EQ(result[1], &object[offset]);
-
- delete[] object;
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest,
- TurboAssemblerTestMoveObjectAndSlot,
- ::testing::ValuesIn(kMoveObjectAndSlotTestCases));
-
-#undef __
-#undef ERROR_MESSAGE
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
deleted file mode 100644
index 43dd6b79d6..0000000000
--- a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/simulator.h"
-#include "test/common/assembler-tester.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gtest-support.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ tasm.
-
-// Test the x64 assembler by compiling some simple functions into
-// a buffer and executing them. These tests do not initialize the
-// V8 library, create a context, or use any V8 objects.
-
-class TurboAssemblerTest : public TestWithIsolate {};
-
-TEST_F(TurboAssemblerTest, TestHardAbort) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- __ Abort(AbortReason::kNoReason);
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- buffer->MakeExecutable();
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
-
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
-}
-
-TEST_F(TurboAssemblerTest, TestCheck) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- // Fail if the first parameter is 17.
- __ movl(rax, Immediate(17));
- __ cmpl(rax, arg_reg_1);
- __ Check(Condition::not_equal, AbortReason::kNoReason);
- __ ret(0);
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- buffer->MakeExecutable();
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
-
- f.Call(0);
- f.Call(18);
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/base/ieee754-unittest.cc b/deps/v8/test/unittests/base/ieee754-unittest.cc
index dbba16a313..e22f3b86b2 100644
--- a/deps/v8/test/unittests/base/ieee754-unittest.cc
+++ b/deps/v8/test/unittests/base/ieee754-unittest.cc
@@ -131,6 +131,175 @@ TEST(Ieee754, Atanh) {
EXPECT_DOUBLE_EQ(0.54930614433405478, atanh(0.5));
}
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+TEST(Ieee754, LibmCos) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(libm_cos(kQNaN), IsNaN());
+ EXPECT_THAT(libm_cos(kSNaN), IsNaN());
+ EXPECT_THAT(libm_cos(kInfinity), IsNaN());
+ EXPECT_THAT(libm_cos(-kInfinity), IsNaN());
+
+ // Tests for cos for |x| < pi/4
+ EXPECT_EQ(1.0, 1 / libm_cos(-0.0));
+ EXPECT_EQ(1.0, 1 / libm_cos(0.0));
+ // cos(x) = 1 for |x| < 2^-27
+ EXPECT_EQ(1, libm_cos(2.3283064365386963e-10));
+ EXPECT_EQ(1, libm_cos(-2.3283064365386963e-10));
+ // Test KERNELCOS for |x| < 0.3.
+ // cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+ EXPECT_EQ(0.9876883405951378, libm_cos(0.15707963267948966));
+ // Test KERNELCOS for x ~= 0.78125
+ EXPECT_EQ(0.7100335477927638, libm_cos(0.7812504768371582));
+ EXPECT_EQ(0.7100338835660797, libm_cos(0.78125));
+ // Test KERNELCOS for |x| > 0.3.
+ // cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+ EXPECT_EQ(0.9238795325112867, libm_cos(0.39269908169872414));
+ // Test KERNELTAN for |x| < 0.67434.
+ EXPECT_EQ(0.9238795325112867, libm_cos(-0.39269908169872414));
+
+ // Tests for cos.
+ EXPECT_EQ(1, libm_cos(3.725290298461914e-9));
+ // Cover different code paths in KERNELCOS.
+ EXPECT_EQ(0.9689124217106447, libm_cos(0.25));
+ EXPECT_EQ(0.8775825618903728, libm_cos(0.5));
+ EXPECT_EQ(0.7073882691671998, libm_cos(0.785));
+ // Test that cos(Math.PI/2) != 0 since Math.PI is not exact.
+ EXPECT_EQ(6.123233995736766e-17, libm_cos(1.5707963267948966));
+ // Test cos for various phases.
+ EXPECT_EQ(0.7071067811865474, libm_cos(7.0 / 4 * kPI));
+ EXPECT_EQ(0.7071067811865477, libm_cos(9.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865467, libm_cos(11.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865471, libm_cos(13.0 / 4 * kPI));
+ EXPECT_EQ(0.9367521275331447, libm_cos(1000000.0));
+ EXPECT_EQ(-3.435757038074824e-12, libm_cos(1048575.0 / 2 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.9258790228548379e0, libm_cos(kTwo120));
+ EXPECT_EQ(-0.9258790228548379e0, libm_cos(-kTwo120));
+}
+
+TEST(Ieee754, LibmSin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(libm_sin(kQNaN), IsNaN());
+ EXPECT_THAT(libm_sin(kSNaN), IsNaN());
+ EXPECT_THAT(libm_sin(kInfinity), IsNaN());
+ EXPECT_THAT(libm_sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, Divide(1.0, libm_sin(-0.0)));
+ EXPECT_EQ(kInfinity, Divide(1.0, libm_sin(0.0)));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, libm_sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, libm_sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, libm_sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, libm_sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, libm_sin(0.5));
+ EXPECT_EQ(-0.479425538604203, libm_sin(-0.5));
+ EXPECT_EQ(1, libm_sin(kPI / 2.0));
+ EXPECT_EQ(-1, libm_sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, libm_sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, libm_sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, libm_sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, libm_sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, libm_sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, libm_sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, libm_sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, libm_sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, libm_sin(-kTwo120));
+}
+
+TEST(Ieee754, FdlibmCos) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(fdlibm_cos(kQNaN), IsNaN());
+ EXPECT_THAT(fdlibm_cos(kSNaN), IsNaN());
+ EXPECT_THAT(fdlibm_cos(kInfinity), IsNaN());
+ EXPECT_THAT(fdlibm_cos(-kInfinity), IsNaN());
+
+ // Tests for cos for |x| < pi/4
+ EXPECT_EQ(1.0, 1 / fdlibm_cos(-0.0));
+ EXPECT_EQ(1.0, 1 / fdlibm_cos(0.0));
+ // cos(x) = 1 for |x| < 2^-27
+ EXPECT_EQ(1, fdlibm_cos(2.3283064365386963e-10));
+ EXPECT_EQ(1, fdlibm_cos(-2.3283064365386963e-10));
+ // Test KERNELCOS for |x| < 0.3.
+ // cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+ EXPECT_EQ(0.9876883405951378, fdlibm_cos(0.15707963267948966));
+ // Test KERNELCOS for x ~= 0.78125
+ EXPECT_EQ(0.7100335477927638, fdlibm_cos(0.7812504768371582));
+ EXPECT_EQ(0.7100338835660797, fdlibm_cos(0.78125));
+ // Test KERNELCOS for |x| > 0.3.
+ // cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+ EXPECT_EQ(0.9238795325112867, fdlibm_cos(0.39269908169872414));
+ // Test KERNELTAN for |x| < 0.67434.
+ EXPECT_EQ(0.9238795325112867, fdlibm_cos(-0.39269908169872414));
+
+ // Tests for cos.
+ EXPECT_EQ(1, fdlibm_cos(3.725290298461914e-9));
+ // Cover different code paths in KERNELCOS.
+ EXPECT_EQ(0.9689124217106447, fdlibm_cos(0.25));
+ EXPECT_EQ(0.8775825618903728, fdlibm_cos(0.5));
+ EXPECT_EQ(0.7073882691671998, fdlibm_cos(0.785));
+ // Test that cos(Math.PI/2) != 0 since Math.PI is not exact.
+ EXPECT_EQ(6.123233995736766e-17, fdlibm_cos(1.5707963267948966));
+ // Test cos for various phases.
+ EXPECT_EQ(0.7071067811865474, fdlibm_cos(7.0 / 4 * kPI));
+ EXPECT_EQ(0.7071067811865477, fdlibm_cos(9.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865467, fdlibm_cos(11.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865471, fdlibm_cos(13.0 / 4 * kPI));
+ EXPECT_EQ(0.9367521275331447, fdlibm_cos(1000000.0));
+ EXPECT_EQ(-3.435757038074824e-12, fdlibm_cos(1048575.0 / 2 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.9258790228548379e0, fdlibm_cos(kTwo120));
+ EXPECT_EQ(-0.9258790228548379e0, fdlibm_cos(-kTwo120));
+}
+
+TEST(Ieee754, FdlibmSin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(fdlibm_sin(kQNaN), IsNaN());
+ EXPECT_THAT(fdlibm_sin(kSNaN), IsNaN());
+ EXPECT_THAT(fdlibm_sin(kInfinity), IsNaN());
+ EXPECT_THAT(fdlibm_sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, Divide(1.0, fdlibm_sin(-0.0)));
+ EXPECT_EQ(kInfinity, Divide(1.0, fdlibm_sin(0.0)));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, fdlibm_sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, fdlibm_sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, fdlibm_sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, fdlibm_sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, fdlibm_sin(0.5));
+ EXPECT_EQ(-0.479425538604203, fdlibm_sin(-0.5));
+ EXPECT_EQ(1, fdlibm_sin(kPI / 2.0));
+ EXPECT_EQ(-1, fdlibm_sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, fdlibm_sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, fdlibm_sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, fdlibm_sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, fdlibm_sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, fdlibm_sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, fdlibm_sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, fdlibm_sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, fdlibm_sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, fdlibm_sin(-kTwo120));
+}
+
+#else
+
TEST(Ieee754, Cos) {
// Test values mentioned in the EcmaScript spec.
EXPECT_THAT(cos(kQNaN), IsNaN());
@@ -177,6 +346,45 @@ TEST(Ieee754, Cos) {
EXPECT_EQ(-0.9258790228548379e0, cos(-kTwo120));
}
+TEST(Ieee754, Sin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(sin(kQNaN), IsNaN());
+ EXPECT_THAT(sin(kSNaN), IsNaN());
+ EXPECT_THAT(sin(kInfinity), IsNaN());
+ EXPECT_THAT(sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, Divide(1.0, sin(-0.0)));
+ EXPECT_EQ(kInfinity, Divide(1.0, sin(0.0)));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, sin(0.5));
+ EXPECT_EQ(-0.479425538604203, sin(-0.5));
+ EXPECT_EQ(1, sin(kPI / 2.0));
+ EXPECT_EQ(-1, sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, sin(-kTwo120));
+}
+
+#endif
+
TEST(Ieee754, Cosh) {
// Test values mentioned in the EcmaScript spec.
EXPECT_THAT(cosh(kQNaN), IsNaN());
@@ -306,43 +514,6 @@ TEST(Ieee754, Cbrt) {
EXPECT_EQ(46.415888336127786, cbrt(100000));
}
-TEST(Ieee754, Sin) {
- // Test values mentioned in the EcmaScript spec.
- EXPECT_THAT(sin(kQNaN), IsNaN());
- EXPECT_THAT(sin(kSNaN), IsNaN());
- EXPECT_THAT(sin(kInfinity), IsNaN());
- EXPECT_THAT(sin(-kInfinity), IsNaN());
-
- // Tests for sin for |x| < pi/4
- EXPECT_EQ(-kInfinity, Divide(1.0, sin(-0.0)));
- EXPECT_EQ(kInfinity, Divide(1.0, sin(0.0)));
- // sin(x) = x for x < 2^-27
- EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
- EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
- // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
- EXPECT_EQ(0.3826834323650898, sin(0.39269908169872414));
- EXPECT_EQ(-0.3826834323650898, sin(-0.39269908169872414));
-
- // Tests for sin.
- EXPECT_EQ(0.479425538604203, sin(0.5));
- EXPECT_EQ(-0.479425538604203, sin(-0.5));
- EXPECT_EQ(1, sin(kPI / 2.0));
- EXPECT_EQ(-1, sin(-kPI / 2.0));
- // Test that sin(Math.PI) != 0 since Math.PI is not exact.
- EXPECT_EQ(1.2246467991473532e-16, sin(kPI));
- EXPECT_EQ(-7.047032979958965e-14, sin(2200.0 * kPI));
- // Test sin for various phases.
- EXPECT_EQ(-0.7071067811865477, sin(7.0 / 4.0 * kPI));
- EXPECT_EQ(0.7071067811865474, sin(9.0 / 4.0 * kPI));
- EXPECT_EQ(0.7071067811865483, sin(11.0 / 4.0 * kPI));
- EXPECT_EQ(-0.7071067811865479, sin(13.0 / 4.0 * kPI));
- EXPECT_EQ(-3.2103381051568376e-11, sin(1048576.0 / 4 * kPI));
-
- // Test Hayne-Panek reduction.
- EXPECT_EQ(0.377820109360752e0, sin(kTwo120));
- EXPECT_EQ(-0.377820109360752e0, sin(-kTwo120));
-}
-
TEST(Ieee754, Sinh) {
// Test values mentioned in the EcmaScript spec.
EXPECT_THAT(sinh(kQNaN), IsNaN());
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index 4858e08544..26d740ca52 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -498,29 +498,39 @@ TEST(ThreadTicks, MAYBE_ThreadNow) {
EXPECT_GT(begin_thread, ThreadTicks());
int iterations_count = 0;
+#if V8_OS_WIN && V8_HOST_ARCH_ARM64
+ // The implementation of ThreadTicks::Now() is quite imprecise on arm64
+ // Windows, so the following test often fails with the default 10ms. By
+ // increasing to 100ms, we can make the test reliable.
+ const int limit_ms = 100;
+#else
+ const int limit_ms = 10;
+#endif
+ const int limit_us = limit_ms * 1000;
+
// Some systems have low resolution thread timers, this code makes sure
// that thread time has progressed by at least one tick.
// Limit waiting to 10ms to prevent infinite loops.
while (ThreadTicks::Now() == begin_thread &&
- ((TimeTicks::Now() - begin).InMicroseconds() < 10000)) {
+ ((TimeTicks::Now() - begin).InMicroseconds() < limit_us)) {
}
EXPECT_GT(ThreadTicks::Now(), begin_thread);
do {
// Sleep for 10 milliseconds to get the thread de-scheduled.
- OS::Sleep(base::TimeDelta::FromMilliseconds(10));
+ OS::Sleep(base::TimeDelta::FromMilliseconds(limit_ms));
end_thread = ThreadTicks::Now();
end = TimeTicks::Now();
delta = end - begin;
EXPECT_LE(++iterations_count, 2); // fail after 2 attempts.
} while (delta.InMicroseconds() <
- 10000); // Make sure that the OS did sleep for at least 10 ms.
+ limit_us); // Make sure that the OS did sleep for at least 10 ms.
TimeDelta delta_thread = end_thread - begin_thread;
// Make sure that some thread time have elapsed.
EXPECT_GT(delta_thread.InMicroseconds(), 0);
// But the thread time is at least 9ms less than clock time.
TimeDelta difference = delta - delta_thread;
- EXPECT_GE(difference.InMicroseconds(), 9000);
+ EXPECT_GE(difference.InMicroseconds(), limit_us * 9 / 10);
}
}
diff --git a/deps/v8/test/unittests/base/threaded-list-unittest.cc b/deps/v8/test/unittests/base/threaded-list-unittest.cc
index 2af95c93f6..cd519d852c 100644
--- a/deps/v8/test/unittests/base/threaded-list-unittest.cc
+++ b/deps/v8/test/unittests/base/threaded-list-unittest.cc
@@ -316,5 +316,42 @@ TEST_F(ThreadedListTest, ConstIterComp) {
CHECK(found_first);
}
+TEST_F(ThreadedListTest, RemoveAt) {
+ auto it = list.begin();
+
+ // Removing first
+ ThreadedListTestNode* to_remove = list.first();
+ it = list.RemoveAt(it);
+ EXPECT_EQ(to_remove, &nodes[0]);
+ EXPECT_EQ(list.first(), &nodes[1]);
+ EXPECT_EQ(it, list.begin());
+ EXPECT_EQ(*it, &nodes[1]);
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(to_remove), nullptr);
+ EXPECT_FALSE(list.Contains(to_remove));
+ EXPECT_EQ(list.LengthForTest(), 4);
+ list.Verify();
+
+ // Removing in the middle
+ ++it;
+ to_remove = *it;
+ it = list.RemoveAt(it);
+ EXPECT_EQ(*it, &nodes[3]);
+ EXPECT_FALSE(list.Contains(to_remove));
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(to_remove), nullptr);
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(&nodes[1]), &nodes[3]);
+ EXPECT_EQ(list.LengthForTest(), 3);
+ list.Verify();
+
+ // Removing last
+ ++it;
+ to_remove = *it;
+ it = list.RemoveAt(it);
+ EXPECT_EQ(it, list.end());
+ EXPECT_FALSE(list.Contains(to_remove));
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(&nodes[4]), nullptr);
+ EXPECT_EQ(list.LengthForTest(), 2);
+ list.Verify();
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/vector-unittest.cc b/deps/v8/test/unittests/base/vector-unittest.cc
index 32058e42af..d7c6fc37f7 100644
--- a/deps/v8/test/unittests/base/vector-unittest.cc
+++ b/deps/v8/test/unittests/base/vector-unittest.cc
@@ -60,7 +60,7 @@ TEST(VectorTest, Equals) {
EXPECT_TRUE(vec3_char != vec1_const_char);
}
-TEST(OwnedVectorConstruction, Equals) {
+TEST(OwnedVectorTest, Equals) {
auto int_vec = base::OwnedVector<int>::New(4);
EXPECT_EQ(4u, int_vec.size());
auto find_non_zero = [](int i) { return i != 0; };
@@ -76,6 +76,31 @@ TEST(OwnedVectorConstruction, Equals) {
EXPECT_EQ(init_vec1.as_vector(), init_vec2.as_vector());
}
+TEST(OwnedVectorTest, MoveConstructionAndAssignment) {
+ constexpr int kValues[] = {4, 11, 3};
+ auto int_vec = base::OwnedVector<int>::Of(kValues);
+ EXPECT_EQ(3u, int_vec.size());
+
+ auto move_constructed_vec = std::move(int_vec);
+ EXPECT_EQ(move_constructed_vec.as_vector(), base::ArrayVector(kValues));
+
+ auto move_assigned_to_empty = base::OwnedVector<int>{};
+ move_assigned_to_empty = std::move(move_constructed_vec);
+ EXPECT_EQ(move_assigned_to_empty.as_vector(), base::ArrayVector(kValues));
+
+ auto move_assigned_to_non_empty = base::OwnedVector<int>::New(2);
+ move_assigned_to_non_empty = std::move(move_assigned_to_empty);
+ EXPECT_EQ(move_assigned_to_non_empty.as_vector(), base::ArrayVector(kValues));
+
+ // All but the last vector must be empty (length 0, nullptr data).
+ EXPECT_TRUE(int_vec.empty());
+ EXPECT_TRUE(int_vec.begin() == nullptr);
+ EXPECT_TRUE(move_constructed_vec.empty());
+ EXPECT_TRUE(move_constructed_vec.begin() == nullptr);
+ EXPECT_TRUE(move_assigned_to_empty.empty());
+ EXPECT_TRUE(move_assigned_to_empty.begin() == nullptr);
+}
+
// Test that the constexpr factory methods work.
TEST(VectorTest, ConstexprFactories) {
static constexpr int kInit1[] = {4, 11, 3};
diff --git a/deps/v8/test/unittests/codegen/code-layout-unittest.cc b/deps/v8/test/unittests/codegen/code-layout-unittest.cc
index 1586c3a27d..40dbdea5dc 100644
--- a/deps/v8/test/unittests/codegen/code-layout-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-layout-unittest.cc
@@ -47,11 +47,10 @@ TEST_F(CodeLayoutTest, CodeLayoutWithoutUnwindingInfo) {
.Build();
CHECK(!code->has_unwinding_info());
- CHECK_EQ(code->raw_instruction_size(), buffer_size);
- CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->raw_instruction_start()),
- buffer, buffer_size));
- CHECK_EQ(static_cast<int>(code->raw_instruction_end() -
- code->raw_instruction_start()),
+ CHECK_EQ(code->InstructionSize(), buffer_size);
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->InstructionStart()), buffer,
+ buffer_size));
+ CHECK_EQ(static_cast<int>(code->InstructionEnd() - code->InstructionStart()),
buffer_size);
}
@@ -94,16 +93,16 @@ TEST_F(CodeLayoutTest, CodeLayoutWithUnwindingInfo) {
.Build();
CHECK(code->has_unwinding_info());
- CHECK_EQ(code->raw_body_size(), buffer_size + unwinding_info_size);
- CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->raw_instruction_start()),
- buffer, buffer_size));
+ CHECK_EQ(code->body_size(), buffer_size + unwinding_info_size);
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->InstructionStart()), buffer,
+ buffer_size));
CHECK_EQ(code->unwinding_info_size(), unwinding_info_size);
CHECK_EQ(memcmp(reinterpret_cast<void*>(code->unwinding_info_start()),
unwinding_info, unwinding_info_size),
0);
- CHECK_EQ(static_cast<int>(code->unwinding_info_end() -
- code->raw_instruction_start()),
- buffer_size + unwinding_info_size);
+ CHECK_EQ(
+ static_cast<int>(code->unwinding_info_end() - code->InstructionStart()),
+ buffer_size + unwinding_info_size);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/codegen/code-pages-unittest.cc b/deps/v8/test/unittests/codegen/code-pages-unittest.cc
index 6fc67e4ed0..e8a156581e 100644
--- a/deps/v8/test/unittests/codegen/code-pages-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-pages-unittest.cc
@@ -149,11 +149,11 @@ TEST_F(CodePagesTest, OptimizedCodeWithCodeRange) {
Handle<JSFunction> foo =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*local_foo));
- CodeT codet = foo->code();
+ Code code = foo->code();
// We don't produce optimized code when run with --no-turbofan and
// --no-maglev.
- if (!codet.is_optimized_code()) return;
- Code foo_code = FromCodeT(codet);
+ if (!code.is_optimized_code()) return;
+ InstructionStream foo_code = FromCode(code);
EXPECT_TRUE(i_isolate()->heap()->InSpace(foo_code, CODE_SPACE));
@@ -199,11 +199,11 @@ TEST_F(CodePagesTest, OptimizedCodeWithCodePages) {
EXPECT_TRUE(v8_flags.always_sparkplug);
return;
}
- CodeT codet = foo->code();
+ Code code = foo->code();
// We don't produce optimized code when run with --no-turbofan and
// --no-maglev.
- if (!codet.is_optimized_code()) return;
- Code foo_code = FromCodeT(codet);
+ if (!code.is_optimized_code()) return;
+ InstructionStream foo_code = FromCode(code);
EXPECT_TRUE(i_isolate()->heap()->InSpace(foo_code, CODE_SPACE));
@@ -268,6 +268,8 @@ TEST_F(CodePagesTest, LargeCodeObject) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope(i_isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
if (!i_isolate()->RequiresCodeRange() && !kHaveCodePages) return;
@@ -293,18 +295,20 @@ TEST_F(CodePagesTest, LargeCodeObject) {
Handle<Code> foo_code =
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
.Build();
+ Handle<InstructionStream> foo_istream(foo_code->instruction_stream(),
+ i_isolate());
- EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_code, CODE_LO_SPACE));
+ EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_istream, CODE_LO_SPACE));
std::vector<MemoryRange>* pages = i_isolate()->GetCodePages();
if (i_isolate()->RequiresCodeRange()) {
- EXPECT_TRUE(PagesContainsAddress(pages, foo_code->address()));
+ EXPECT_TRUE(PagesContainsAddress(pages, foo_istream->address()));
} else {
- EXPECT_TRUE(PagesHasExactPage(pages, foo_code->address()));
+ EXPECT_TRUE(PagesHasExactPage(pages, foo_istream->address()));
}
- stale_code_address = foo_code->address();
+ stale_code_address = foo_istream->address();
}
// Delete the large code object.
@@ -383,6 +387,8 @@ TEST_F(CodePagesTest, LargeCodeObjectWithSignalHandler) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope(i_isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
if (!i_isolate()->RequiresCodeRange() && !kHaveCodePages) return;
@@ -417,8 +423,10 @@ TEST_F(CodePagesTest, LargeCodeObjectWithSignalHandler) {
Handle<Code> foo_code =
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
.Build();
+ Handle<InstructionStream> foo_istream(foo_code->instruction_stream(),
+ i_isolate());
- EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_code, CODE_LO_SPACE));
+ EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_istream, CODE_LO_SPACE));
// Do a synchronous sample to ensure that we capture the state with the
// extra code page.
@@ -429,12 +437,12 @@ TEST_F(CodePagesTest, LargeCodeObjectWithSignalHandler) {
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate());
if (i_isolate()->RequiresCodeRange()) {
- EXPECT_TRUE(PagesContainsAddress(&pages, foo_code->address()));
+ EXPECT_TRUE(PagesContainsAddress(&pages, foo_istream->address()));
} else {
- EXPECT_TRUE(PagesHasExactPage(&pages, foo_code->address()));
+ EXPECT_TRUE(PagesHasExactPage(&pages, foo_istream->address()));
}
- stale_code_address = foo_code->address();
+ stale_code_address = foo_istream->address();
}
// Start async sampling again to detect threading issues.
@@ -459,6 +467,8 @@ TEST_F(CodePagesTest, Sorted) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope(i_isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
if (!i_isolate()->RequiresCodeRange() && !kHaveCodePages) return;
@@ -487,11 +497,14 @@ TEST_F(CodePagesTest, Sorted) {
};
{
HandleScope outer_scope(i_isolate());
- Handle<Code> code1, code3;
+ Handle<InstructionStream> code1, code3;
Address code2_address;
- code1 = Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
- .Build();
+ code1 =
+ handle(Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
+ .Build()
+ ->instruction_stream(),
+ i_isolate());
EXPECT_TRUE(i_isolate()->heap()->InSpace(*code1, CODE_LO_SPACE));
{
@@ -499,12 +512,17 @@ TEST_F(CodePagesTest, Sorted) {
// Create three large code objects, we'll delete the middle one and check
// everything is still sorted.
- Handle<Code> code2 =
+ Handle<InstructionStream> code2 = handle(
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
- .Build();
+ .Build()
+ ->instruction_stream(),
+ i_isolate());
EXPECT_TRUE(i_isolate()->heap()->InSpace(*code2, CODE_LO_SPACE));
- code3 = Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
- .Build();
+ code3 = handle(
+ Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
+ .Build()
+ ->instruction_stream(),
+ i_isolate());
EXPECT_TRUE(i_isolate()->heap()->InSpace(*code3, CODE_LO_SPACE));
code2_address = code2->address();
diff --git a/deps/v8/test/unittests/codegen/factory-unittest.cc b/deps/v8/test/unittests/codegen/factory-unittest.cc
index 3fb9140d1b..7d6f6ea45c 100644
--- a/deps/v8/test/unittests/codegen/factory-unittest.cc
+++ b/deps/v8/test/unittests/codegen/factory-unittest.cc
@@ -35,7 +35,8 @@ TEST_F(FactoryCodeBuilderTest, Factory_CodeBuilder) {
Handle<Code> code =
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION).Build();
- CHECK(i_isolate()->heap()->InSpace(*code, CODE_LO_SPACE));
+ CHECK(
+ i_isolate()->heap()->InSpace(code->instruction_stream(), CODE_LO_SPACE));
#if VERIFY_HEAP
code->ObjectVerify(i_isolate());
#endif
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 1eb4320041..57b22c11ac 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -448,19 +448,28 @@ TEST_F(BytecodeAnalysisTest, SuspendPoint) {
interpreter::BytecodeJumpTable* gen_jump_table =
builder.AllocateJumpTable(1, 0);
+ builder.SwitchOnGeneratorState(reg_gen, gen_jump_table);
+ expected_liveness.emplace_back("..L.", "..L.");
+
+ builder.LoadUndefined();
+ expected_liveness.emplace_back("....", "...L");
+
+ // Store some arbitrary value into the generator register so that this
+ // register is dead by the time we reach SwitchOnGeneratorState (this matches
+ // real generator bytecode and is DCHECKed in the bytecode analysis).
builder.StoreAccumulatorInRegister(reg_gen);
- expected_liveness.emplace_back("L..L", "L.LL");
+ expected_liveness.emplace_back("...L", "..L.");
- // Note: technically, r0 should be dead here since the resume will write it,
- // but in practice the bytecode analysis doesn't bother to special case it,
- // since the generator switch is close to the top of the function anyway.
- builder.SwitchOnGeneratorState(reg_gen, gen_jump_table);
- expected_liveness.emplace_back("L.LL", "L.LL");
+ builder.LoadUndefined();
+ expected_liveness.emplace_back("..L.", "..LL");
+ // Reg 0 is read after the resume, so should be live up to here (and is killed
+ // here).
builder.StoreAccumulatorInRegister(reg_0);
expected_liveness.emplace_back("..LL", "L.LL");
- // Reg 1 is never read, so should be dead.
+ // Reg 1 is never read, so should be dead already and this store shouldn't
+ // change it.
builder.StoreAccumulatorInRegister(reg_1);
expected_liveness.emplace_back("L.LL", "L.LL");
diff --git a/deps/v8/test/unittests/compiler/codegen-tester.h b/deps/v8/test/unittests/compiler/codegen-tester.h
index 22ceedd382..146d9907f7 100644
--- a/deps/v8/test/unittests/compiler/codegen-tester.h
+++ b/deps/v8/test/unittests/compiler/codegen-tester.h
@@ -77,20 +77,16 @@ class RawMachineAssemblerTester : public CallHelper<ReturnType>,
return code_.ToHandleChecked();
}
- Handle<CodeT> GetCodeT() { return ToCodeT(GetCode(), isolate_); }
-
protected:
Address Generate() override {
if (code_.is_null()) {
- Schedule* schedule = this->ExportForTest();
- auto call_descriptor = this->call_descriptor();
- Graph* graph = this->graph();
+ Schedule* schedule = ExportForTest();
OptimizedCompilationInfo info(base::ArrayVector("testing"), zone_, kind_);
code_ = Pipeline::GenerateCodeForTesting(
- &info, isolate_, call_descriptor, graph,
+ &info, isolate_, call_descriptor(), graph(),
AssemblerOptions::Default(isolate_), schedule);
}
- return this->code_.ToHandleChecked()->entry();
+ return code_.ToHandleChecked()->code_entry_point();
}
Zone* zone() { return zone_; }
diff --git a/deps/v8/test/unittests/compiler/compiler-unittest.cc b/deps/v8/test/unittests/compiler/compiler-unittest.cc
index abef44976c..420ed5edef 100644
--- a/deps/v8/test/unittests/compiler/compiler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/compiler-unittest.cc
@@ -613,10 +613,12 @@ TEST_F(CompilerTest, CompileFunctionScriptOrigin) {
v8::ScriptCompiler::CompileFunction(context(), &script_source)
.ToLocalChecked();
EXPECT_TRUE(!fun.IsEmpty());
- v8::Local<v8::UnboundScript> script =
- fun->GetUnboundScript().ToLocalChecked();
- EXPECT_TRUE(!script.IsEmpty());
- EXPECT_TRUE(script->GetScriptName()->StrictEquals(NewString("test")));
+ auto fun_i = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*fun));
+ EXPECT_TRUE(fun_i->shared().IsSharedFunctionInfo());
+ EXPECT_TRUE(
+ Utils::ToLocal(i::handle(i::Script::cast(fun_i->shared().script()).name(),
+ i_isolate()))
+ ->StrictEquals(NewString("test")));
v8::TryCatch try_catch(isolate());
isolate()->SetCaptureStackTraceForUncaughtExceptions(true);
EXPECT_TRUE(fun->Call(context(), context()->Global(), 0, nullptr).IsEmpty());
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index 863ddd8f50..fe37c015fd 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -93,7 +93,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
TEST_F(ConstantFoldingReducerTest, ParameterWithMinusZero) {
{
Node* node = Parameter(
- Type::Constant(broker(), factory()->minus_zero_value(), zone()));
+ Type::Constant(broker(), broker()->minus_zero_value(), zone()));
Node* use_value = UseValue(node);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -148,7 +148,7 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithNaN) {
}
{
Node* node =
- Parameter(Type::Constant(broker(), factory()->nan_value(), zone()));
+ Parameter(Type::Constant(broker(), broker()->nan_value(), zone()));
Node* use_value = UseValue(node);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -215,7 +215,7 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
Type::Union(
Type::Undetectable(),
Type::Union(
- Type::Constant(broker(), factory()->false_value(),
+ Type::Constant(broker(), broker()->false_value(),
zone()),
Type::Range(0.0, 0.0, zone()), zone()),
zone()),
@@ -234,7 +234,7 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
TEST_F(ConstantFoldingReducerTest, ToBooleanWithTruish) {
Node* input = Parameter(
Type::Union(
- Type::Constant(broker(), factory()->true_value(), zone()),
+ Type::Constant(broker(), broker()->true_value(), zone()),
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
zone()),
0);
diff --git a/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
index 567c276ba0..bd28d1dbca 100644
--- a/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
@@ -29,7 +29,8 @@ class CsaLoadEliminationTest : public GraphTest {
machine()),
reducer_(zone(), graph(), tick_counter(), broker()),
csa_(reducer(), jsgraph(), zone()),
- mcr_(reducer(), jsgraph()) {
+ mcr_(reducer(), jsgraph(),
+ MachineOperatorReducer::kPropagateSignallingNan) {
reducer()->AddReducer(&csa_);
reducer()->AddReducer(&mcr_);
}
diff --git a/deps/v8/test/unittests/compiler/function-tester.cc b/deps/v8/test/unittests/compiler/function-tester.cc
index d6951da6f7..d7e7356c76 100644
--- a/deps/v8/test/unittests/compiler/function-tester.cc
+++ b/deps/v8/test/unittests/compiler/function-tester.cc
@@ -61,12 +61,9 @@ FunctionTester::FunctionTester(Isolate* isolate, Handle<Code> code,
flags_(0) {
CHECK(!code.is_null());
Compile(function);
- function->set_code(ToCodeT(*code), kReleaseStore);
+ function->set_code(*code, kReleaseStore);
}
-FunctionTester::FunctionTester(Isolate* isolate, Handle<Code> code)
- : FunctionTester(isolate, code, 0) {}
-
void FunctionTester::CheckThrows(Handle<Object> a) {
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
MaybeHandle<Object> no_result = Call(a);
@@ -192,11 +189,9 @@ Handle<JSFunction> FunctionTester::Optimize(
CHECK(info.shared_info()->HasBytecodeArray());
JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
- Handle<CodeT> code = ToCodeT(
+ Handle<Code> code =
compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
- .ToHandleChecked(),
- isolate);
- info.native_context().AddOptimizedCode(*code);
+ .ToHandleChecked();
function->set_code(*code, v8::kReleaseStore);
return function;
}
diff --git a/deps/v8/test/unittests/compiler/function-tester.h b/deps/v8/test/unittests/compiler/function-tester.h
index aededaa4ee..a5af93e9af 100644
--- a/deps/v8/test/unittests/compiler/function-tester.h
+++ b/deps/v8/test/unittests/compiler/function-tester.h
@@ -23,10 +23,12 @@ class FunctionTester {
FunctionTester(Isolate* i_isolate, Graph* graph, int param_count);
+ FunctionTester(Isolate* i_isolate, Handle<InstructionStream> code,
+ int param_count);
FunctionTester(Isolate* i_isolate, Handle<Code> code, int param_count);
// Assumes VoidDescriptor call interface.
- explicit FunctionTester(Isolate* i_isolate, Handle<Code> code);
+ explicit FunctionTester(Isolate* i_isolate, Handle<InstructionStream> code);
Isolate* isolate;
CanonicalHandleScope canonical;
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.h b/deps/v8/test/unittests/compiler/graph-reducer-unittest.h
index eb9d8f9199..ce9739b432 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.h
@@ -15,6 +15,7 @@ namespace compiler {
struct MockAdvancedReducerEditor : public AdvancedReducer::Editor {
MOCK_METHOD(void, Revisit, (Node*), (override));
MOCK_METHOD(void, Replace, (Node*, Node*), (override));
+ MOCK_METHOD(void, Replace, (Node*, Node*, NodeId), (override));
MOCK_METHOD(void, ReplaceWithValue, (Node*, Node*, Node*, Node*), (override));
};
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 10fa5cbd59..cf0d3ba2a9 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -19,6 +19,7 @@ GraphTest::GraphTest(int num_parameters)
common_(zone()),
graph_(zone()),
broker_(isolate(), zone()),
+ current_broker_(&broker_),
source_positions_(&graph_),
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 18878f456b..4c3b6bdfc6 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -74,6 +74,7 @@ class GraphTest : public TestWithNativeContextAndZone {
CommonOperatorBuilder common_;
Graph graph_;
JSHeapBroker broker_;
+ CurrentHeapBrokerScope current_broker_;
SourcePositionTable source_positions_;
NodeOriginTable node_origins_;
TickCounter tick_counter_;
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 685c14aa14..e2a05badd6 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -15,11 +15,14 @@
#include "src/compiler/node.h"
#include "src/compiler/wasm-compiler.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
+#if V8_TARGET_ARCH_32_BIT
+
using testing::AllOf;
using testing::Capture;
using testing::CaptureEq;
@@ -50,13 +53,11 @@ class Int64LoweringTest : public GraphTest {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
- nullptr, signature);
+ signature);
lowering.LowerGraph();
}
- void LowerGraphWithSpecialCase(
- Node* node, std::unique_ptr<Int64LoweringSpecialCase> special_case,
- MachineRepresentation rep) {
+ void LowerGraphWithSpecialCase(Node* node, MachineRepresentation rep) {
Node* zero = graph()->NewNode(common()->Int32Constant(0));
Node* ret = graph()->NewNode(common()->Return(), zero, node,
graph()->start(), graph()->start());
@@ -69,8 +70,7 @@ class Int64LoweringTest : public GraphTest {
sig_builder.AddReturn(rep);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
- nullptr, sig_builder.Build(),
- std::move(special_case));
+ sig_builder.Build());
lowering.LowerGraph();
}
@@ -287,7 +287,7 @@ TEST_F(Int64LoweringTest, Int64LoadImmutable) {
NodeProperties::MergeControlToEnd(graph(), common(), ret); \
\
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(), \
- nullptr, sig_builder.Build()); \
+ sig_builder.Build()); \
lowering.LowerGraph(); \
\
STORE_VERIFY(kStore, kRep32)
@@ -321,7 +321,7 @@ TEST_F(Int64LoweringTest, Int32Store) {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
- nullptr, sig_builder.Build());
+ sig_builder.Build());
lowering.LowerGraph();
EXPECT_THAT(
@@ -430,8 +430,6 @@ TEST_F(Int64LoweringTest, ParameterWithJSClosureParam) {
// two assumptions:
// - Pointers are 32 bit and therefore pointers do not get lowered.
// - 64-bit rol/ror/clz/ctz instructions have a control input.
-// TODO(wasm): We can find an alternative to re-activate these tests.
-#if V8_TARGET_ARCH_32_BIT
TEST_F(Int64LoweringTest, CallI64Return) {
int32_t function = 0x9999;
Node* context_address = Int32Constant(0);
@@ -660,7 +658,6 @@ TEST_F(Int64LoweringTest, I64Ror_43) {
IsInt32Constant(21))),
start(), start()));
}
-#endif
TEST_F(Int64LoweringTest, Int64Sub) {
LowerGraph(graph()->NewNode(machine()->Int64Sub(), Int64Constant(value(0)),
@@ -1035,37 +1032,20 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
Node* target = Int32Constant(1);
Node* context = Int32Constant(2);
Node* bigint = Int32Constant(4);
+ WasmCallDescriptors* descriptors = wasm::GetWasmEngine()->call_descriptors();
CallDescriptor* bigint_to_i64_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- BigIntToI64Descriptor(), // descriptor
- BigIntToI64Descriptor::GetStackParameterCount(), // stack parameter
- // count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
+ descriptors->GetBigIntToI64Descriptor(StubCallMode::kCallBuiltinPointer,
+ false);
CallDescriptor* bigint_to_i32_pair_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- BigIntToI32PairDescriptor(), // descriptor
- BigIntToI32PairDescriptor::
- GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
-
- auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
- lowering_special_case->replacements.insert(
- {bigint_to_i64_call_descriptor, bigint_to_i32_pair_call_descriptor});
+ descriptors->GetLoweredCallDescriptor(bigint_to_i64_call_descriptor);
Node* call_node =
graph()->NewNode(common()->Call(bigint_to_i64_call_descriptor), target,
bigint, context, start(), start());
- LowerGraphWithSpecialCase(call_node, std::move(lowering_special_case),
- MachineRepresentation::kWord64);
+ LowerGraphWithSpecialCase(call_node, MachineRepresentation::kWord64);
Capture<Node*> call;
Matcher<Node*> call_matcher =
@@ -1081,36 +1061,18 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
Node* target = Int32Constant(1);
Node* i64 = Int64Constant(value(0));
+ WasmCallDescriptors* descriptors = wasm::GetWasmEngine()->call_descriptors();
CallDescriptor* i64_to_bigint_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- I64ToBigIntDescriptor(), // descriptor
- I64ToBigIntDescriptor::GetStackParameterCount(), // stack parameter
- // count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
+ descriptors->GetI64ToBigIntDescriptor(StubCallMode::kCallBuiltinPointer);
CallDescriptor* i32_pair_to_bigint_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- I32PairToBigIntDescriptor(), // descriptor
- I32PairToBigIntDescriptor::
- GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
-
- auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
- lowering_special_case->replacements.insert(
- {i64_to_bigint_call_descriptor, i32_pair_to_bigint_call_descriptor});
+ descriptors->GetLoweredCallDescriptor(i64_to_bigint_call_descriptor);
Node* call = graph()->NewNode(common()->Call(i64_to_bigint_call_descriptor),
target, i64, start(), start());
- LowerGraphWithSpecialCase(call, std::move(lowering_special_case),
- MachineRepresentation::kTaggedPointer);
+ LowerGraphWithSpecialCase(call, MachineRepresentation::kTaggedPointer);
EXPECT_THAT(
graph()->end()->InputAt(1),
@@ -1123,3 +1085,5 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
} // namespace compiler
} // namespace internal
} // namespace v8
+
+#endif // V8_TARGET_ARCH_32_BIT
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 1ecf511149..7dc31d92ac 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -101,18 +101,8 @@ class JSCallReducerTest : public TypedGraphTest {
const Operator* Call(int arity) {
FeedbackVectorSpec spec(zone());
spec.AddCallICSlot();
- Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfoForBuiltin(
- isolate()->factory()->empty_string(), Builtin::kIllegal);
- // Set the raw feedback metadata to circumvent checks that we are not
- // overwriting existing metadata.
- shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
- ClosureFeedbackCellArray::New(isolate(), shared);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate()));
- Handle<FeedbackVector> vector = FeedbackVector::New(
- isolate(), shared, closure_feedback_cell_array, &is_compiled_scope);
+ Handle<FeedbackVector> vector =
+ FeedbackVector::NewForTesting(isolate(), &spec);
FeedbackSource feedback(vector, FeedbackSlot(0));
return javascript()->Call(JSCallNode::ArityForArgc(arity), CallFrequency(),
feedback, ConvertReceiverMode::kAny,
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 00242cdd7f..62c5bba17e 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -39,8 +39,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
GraphReducer graph_reducer(zone(), graph(), tick_counter(), broker());
- JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, broker(),
- zone());
+ JSCreateLowering reducer(&graph_reducer, &jsgraph, broker(), zone());
return reducer.Reduce(node);
}
@@ -150,10 +149,10 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CreateFunctionContext(
- MakeRef(broker(), ScopeInfo::Empty(isolate())), 8, FUNCTION_SCOPE),
- context, effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CreateFunctionContext(
+ broker()->empty_scope_info(), 8, FUNCTION_SCOPE),
+ context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
@@ -166,8 +165,7 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
// JSCreateWithContext
TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
- ScopeInfoRef scope_info =
- MakeRef(broker(), ReadOnlyRoots(isolate()).empty_function_scope_info());
+ ScopeInfoRef scope_info = broker()->empty_function_scope_info();
Node* const object = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
@@ -188,8 +186,7 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
// JSCreateCatchContext
TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
- ScopeInfoRef scope_info =
- MakeRef(broker(), ReadOnlyRoots(isolate()).empty_function_scope_info());
+ ScopeInfoRef scope_info = broker()->empty_function_scope_info();
Node* const exception = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 49dd7d9cc2..bb740dd956 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -393,7 +393,7 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
- NameRef name = MakeRef(broker(), factory()->length_string());
+ NameRef name = broker()->length_string();
Node* const receiver = Parameter(Type::String(), 0);
Node* const feedback = UndefinedConstant();
Node* const context = UndefinedConstant();
diff --git a/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
index c69d4324f2..941e9d95ba 100644
--- a/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
@@ -175,35 +175,35 @@ const MachInst1 kAddSubOneInstructions[] = {
// ----------------------------------------------------------------------------
const IntCmp kCmpInstructions[] = {
- {{&RawMachineAssembler::WordEqual, "WordEqual", kLoong64Cmp,
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kLoong64Cmp64,
MachineType::Int64()},
1U},
- {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kLoong64Cmp,
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kLoong64Cmp64,
MachineType::Int64()},
1U},
- {{&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp32,
MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kLoong64Cmp,
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kLoong64Cmp32,
MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp32,
MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kLoong64Cmp, MachineType::Int32()},
+ kLoong64Cmp32, MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kLoong64Cmp,
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kLoong64Cmp32,
MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
- kLoong64Cmp, MachineType::Int32()},
+ kLoong64Cmp32, MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp32,
MachineType::Uint32()},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kLoong64Cmp, MachineType::Uint32()},
+ kLoong64Cmp32, MachineType::Uint32()},
1U}};
// ----------------------------------------------------------------------------
@@ -235,16 +235,16 @@ const Conversion kConversionInstructions[] = {
// LOONG64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = {
- {&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp32,
MachineType::Uint32()},
- {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp32,
MachineType::Uint32()},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kLoong64Cmp, MachineType::Uint32()},
- {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
+ kLoong64Cmp32, MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp32,
MachineType::Uint32()},
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kLoong64Cmp, MachineType::Uint32()},
+ kLoong64Cmp32, MachineType::Uint32()},
};
} // namespace
@@ -1378,7 +1378,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -1390,7 +1390,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -1405,7 +1405,7 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp64, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -1417,7 +1417,7 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp64, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 1c2a73d486..669f941148 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -4,12 +4,15 @@
#include "src/compiler/machine-operator-reducer.h"
+#include <cstdint>
#include <limits>
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
+#include "src/builtins/builtins.h"
+#include "src/common/globals.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/numbers/conversions-inl.h"
@@ -45,7 +48,9 @@ class MachineOperatorReducerTest : public GraphTest {
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
&machine_);
- MachineOperatorReducer reducer(&graph_reducer_, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer_, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
return reducer.Reduce(node);
}
@@ -69,6 +74,26 @@ class MachineOperatorReducerTest : public GraphTest {
IsWord32Shr(dividend_matcher, IsInt32Constant(31)));
}
+ Matcher<Node*> IsTruncatingDiv64(const Matcher<Node*>& dividend_matcher,
+ const int64_t divisor) {
+ base::MagicNumbersForDivision<uint64_t> const mag =
+ base::SignedDivisionByConstant(base::bit_cast<uint64_t>(divisor));
+ int64_t const multiplier = base::bit_cast<int64_t>(mag.multiplier);
+ int64_t const shift = base::bit_cast<int32_t>(mag.shift);
+ Matcher<Node*> quotient_matcher =
+ IsInt64MulHigh(dividend_matcher, IsInt64Constant(multiplier));
+ if (divisor > 0 && multiplier < 0) {
+ quotient_matcher = IsInt64Add(quotient_matcher, dividend_matcher);
+ } else if (divisor < 0 && multiplier > 0) {
+ quotient_matcher = IsInt64Sub(quotient_matcher, dividend_matcher);
+ }
+ if (shift) {
+ quotient_matcher = IsWord64Sar(quotient_matcher, IsInt64Constant(shift));
+ }
+ return IsInt64Add(quotient_matcher,
+ IsWord64Shr(dividend_matcher, IsInt64Constant(63)));
+ }
+
MachineOperatorBuilder* machine() { return &machine_; }
private:
@@ -1375,6 +1400,21 @@ TEST_F(MachineOperatorReducerTest,
}
}
+TEST_F(MachineOperatorReducerTest, Word32EqualWithAddAndConstant) {
+ // (x+k1)==k2 => x==(k2-k1)
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, k1, kInt32Values) {
+ TRACED_FOREACH(int32_t, k2, kInt32Values) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(k1)),
+ Int32Constant(k2));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Word64Equal
@@ -1413,6 +1453,21 @@ TEST_F(MachineOperatorReducerTest,
}
}
+TEST_F(MachineOperatorReducerTest, Word64EqualWithAddAndConstant) {
+ // (x+k1)==k2 => x==(k2-k1)
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int64_t, k1, kInt64Values) {
+ TRACED_FOREACH(int64_t, k2, kInt64Values) {
+ Node* node = graph()->NewNode(
+ machine()->Word64Equal(),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)),
+ Int64Constant(k2));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Branch
@@ -1466,10 +1521,8 @@ TEST_F(MachineOperatorReducerTest, Int32SubWithConstant) {
}
}
-
// -----------------------------------------------------------------------------
-// Int32Div
-
+// Int32Div, Int64Div
TEST_F(MachineOperatorReducerTest, Int32DivWithConstant) {
Node* const p0 = Parameter(0);
@@ -1556,6 +1609,93 @@ TEST_F(MachineOperatorReducerTest, Int32DivWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Int64DivWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement(), p0);
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(-1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Sub(IsInt64Constant(0), p0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(2), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord64Sar(IsInt64Add(IsWord64Shr(p0, IsInt64Constant(63)), p0),
+ IsInt64Constant(1)));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(-2), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt64Sub(
+ IsInt64Constant(0),
+ IsWord64Sar(IsInt64Add(IsWord64Shr(p0, IsInt64Constant(63)), p0),
+ IsInt64Constant(1))));
+ }
+ TRACED_FORRANGE(int64_t, shift, 2, 62) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int64Div(), p0,
+ Int64Constant(int64_t{1} << shift), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord64Sar(IsInt64Add(IsWord64Shr(IsWord64Sar(p0, IsInt64Constant(63)),
+ IsInt64Constant(64 - shift)),
+ p0),
+ IsInt64Constant(shift)));
+ }
+ TRACED_FORRANGE(int64_t, shift, 2, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(Shl(int64_t{-1}, shift)),
+ graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt64Sub(
+ IsInt64Constant(0),
+ IsWord64Sar(
+ IsInt64Add(IsWord64Shr(IsWord64Sar(p0, IsInt64Constant(63)),
+ IsInt64Constant(64 - shift)),
+ p0),
+ IsInt64Constant(shift))));
+ }
+ TRACED_FOREACH(int64_t, divisor, kInt64Values) {
+ if (divisor < 0) {
+ if (divisor == std::numeric_limits<int64_t>::min() ||
+ base::bits::IsPowerOfTwo(-divisor)) {
+ continue;
+ }
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Sub(IsInt64Constant(0),
+ IsTruncatingDiv64(p0, -divisor)));
+ } else if (divisor > 0) {
+ if (base::bits::IsPowerOfTwo(divisor)) continue;
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTruncatingDiv64(p0, divisor));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest, Int32DivWithParameters) {
Node* const p0 = Parameter(0);
@@ -1567,10 +1707,8 @@ TEST_F(MachineOperatorReducerTest, Int32DivWithParameters) {
IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
}
-
// -----------------------------------------------------------------------------
-// Uint32Div
-
+// Uint32Div, Uint64Div
TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
Node* const p0 = Parameter(0);
@@ -1613,6 +1751,46 @@ TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Uint64DivWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), Int64Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement(), p0);
+ }
+ TRACED_FOREACH(uint64_t, dividend, kUint64Values) {
+ TRACED_FOREACH(uint64_t, divisor, kUint64Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint64Div(), Uint64Constant(dividend),
+ Uint64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Constant(base::bit_cast<int64_t>(
+ base::bits::UnsignedDiv64(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint64_t, shift, 1, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), p0, Uint64Constant(uint64_t{1} << shift),
+ graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord64Shr(p0, IsInt64Constant(static_cast<int64_t>(shift))));
+ }
+}
TEST_F(MachineOperatorReducerTest, Uint32DivWithParameters) {
Node* const p0 = Parameter(0);
@@ -1624,10 +1802,8 @@ TEST_F(MachineOperatorReducerTest, Uint32DivWithParameters) {
IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
}
-
// -----------------------------------------------------------------------------
-// Int32Mod
-
+// Int32Mod, Uint64Mod
TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
Node* const p0 = Parameter(0);
@@ -1714,6 +1890,90 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Int64ModWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), Int64Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(-1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ TRACED_FOREACH(int64_t, dividend, kInt64Values) {
+ TRACED_FOREACH(int64_t, divisor, kInt64Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int64Mod(), Int64Constant(dividend),
+ Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Constant(base::bits::SignedMod64(dividend, divisor)));
+ }
+ }
+ TRACED_FORRANGE(int64_t, shift, 1, 62) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int64Mod(), p0,
+ Int64Constant(int64_t{1} << shift), graph()->start()));
+ int64_t const mask = (int64_t{1} << shift) - 1;
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord64,
+ IsInt64Sub(IsInt64Constant(0),
+ IsWord64And(IsInt64Sub(IsInt64Constant(0), p0),
+ IsInt64Constant(mask))),
+ IsWord64And(p0, IsInt64Constant(mask)),
+ IsMerge(IsIfTrue(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())),
+ IsIfFalse(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())))));
+ }
+ TRACED_FORRANGE(int64_t, shift, 1, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(Shl(int64_t{-1}, shift)),
+ graph()->start()));
+ int64_t const mask = static_cast<int64_t>((uint64_t{1} << shift) - 1U);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord64,
+ IsInt64Sub(IsInt64Constant(0),
+ IsWord64And(IsInt64Sub(IsInt64Constant(0), p0),
+ IsInt64Constant(mask))),
+ IsWord64And(p0, IsInt64Constant(mask)),
+ IsMerge(IsIfTrue(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())),
+ IsIfFalse(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())))));
+ }
+ TRACED_FOREACH(int64_t, divisor, kInt64Values) {
+ if (divisor == 0 || base::bits::IsPowerOfTwo(Abs(divisor))) continue;
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Sub(p0, IsInt64Mul(IsTruncatingDiv64(p0, Abs(divisor)),
+ IsInt64Constant(Abs(divisor)))));
+ }
+}
TEST_F(MachineOperatorReducerTest, Int32ModWithParameters) {
Node* const p0 = Parameter(0);
@@ -1723,10 +1983,8 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithParameters) {
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
-
// -----------------------------------------------------------------------------
-// Uint32Mod
-
+// Uint32Mod, Uint64Mod
TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
Node* const p0 = Parameter(0);
@@ -1770,6 +2028,47 @@ TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Uint64ModWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), Int64Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ TRACED_FOREACH(uint64_t, dividend, kUint64Values) {
+ TRACED_FOREACH(uint64_t, divisor, kUint64Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint64Mod(), Uint64Constant(dividend),
+ Uint64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Constant(base::bit_cast<int64_t>(
+ base::bits::UnsignedMod64(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint64_t, shift, 1, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), p0, Uint64Constant(uint64_t{1} << shift),
+ graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord64And(p0, IsInt64Constant(static_cast<int64_t>(
+ (uint64_t{1} << shift) - 1u))));
+ }
+}
TEST_F(MachineOperatorReducerTest, Uint32ModWithParameters) {
Node* const p0 = Parameter(0);
@@ -2317,6 +2616,49 @@ TEST_F(MachineOperatorReducerTest, Uint64LessThanWithUint32Reduction) {
}
}
+TEST_F(MachineOperatorReducerTest, Uint64LessThanWithInt64AddDontReduce) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FOREACH(uint64_t, k1, kUint64Values) {
+ TRACED_FOREACH(uint64_t, k2, kUint64Values) {
+ Node* node = graph()->NewNode(
+ machine()->Uint64LessThan(),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)),
+ Int64Constant(k2));
+ Reduction r = Reduce(node);
+ // Don't reduce because of potential overflow
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+}
+
+TEST_F(MachineOperatorReducerTest,
+ Uint64LessThanOrEqualWithInt64AddDontReduce) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FOREACH(uint64_t, k1, kUint64Values) {
+ TRACED_FOREACH(uint64_t, k2, kUint64Values) {
+ uint64_t k1 = 0;
+ uint64_t k2 = 18446744073709551615u;
+ Node* node = graph()->NewNode(
+ machine()->Uint64LessThanOrEqual(),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)),
+ Int64Constant(k2));
+ Reduction r = Reduce(node);
+ if (k2 == 0) {
+ // x <= 0 => x == 0
+ ASSERT_TRUE(r.Changed());
+ } else if (k2 == std::numeric_limits<uint64_t>::max()) {
+ // x <= Max => true
+ ASSERT_TRUE(r.Changed());
+ } else {
+ // Don't reduce because of potential overflow
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Int64LessThan
@@ -2574,7 +2916,7 @@ TEST_F(MachineOperatorReducerTest, Float64CosWithConstant) {
Reduce(graph()->NewNode(machine()->Float64Cos(), Float64Constant(x)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::cos(x))));
+ IsFloat64Constant(NanSensitiveDoubleEq(COS_IMPL(x))));
}
}
@@ -2673,7 +3015,7 @@ TEST_F(MachineOperatorReducerTest, Float64SinWithConstant) {
Reduce(graph()->NewNode(machine()->Float64Sin(), Float64Constant(x)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::sin(x))));
+ IsFloat64Constant(NanSensitiveDoubleEq(SIN_IMPL(x))));
}
}
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index c98b13e40d..3611ed8c54 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -2263,6 +2263,7 @@ IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Div)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Int64Mul)
+IS_BINOP_MATCHER(Int64MulHigh)
IS_BINOP_MATCHER(Int64LessThan)
IS_BINOP_MATCHER(Uint64LessThan)
IS_BINOP_MATCHER(JSAdd)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index f727a14c34..db5059dfb8 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -419,6 +419,8 @@ Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64MulHigh(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Div(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64LessThan(const Matcher<Node*>& lhs_matcher,
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index ac33110995..b7113563d3 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -6,6 +6,7 @@
#include "src/codegen/tick-counter.h"
#include "src/compiler/feedback-source.h"
+#include "src/compiler/js-graph.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -22,8 +23,12 @@ class RedundancyEliminationTest : public GraphTest {
public:
explicit RedundancyEliminationTest(int num_parameters = 4)
: GraphTest(num_parameters),
- reducer_(&editor_, zone()),
- simplified_(zone()) {
+ javascript_(zone()),
+ simplified_(zone()),
+ machine_(zone()),
+ jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
+ &machine_),
+ reducer_(&editor_, &jsgraph_, zone()) {
// Initialize the {reducer_} state for the Start node.
reducer_.Reduce(graph()->start());
@@ -31,16 +36,8 @@ class RedundancyEliminationTest : public GraphTest {
FeedbackVectorSpec spec(zone());
FeedbackSlot slot1 = spec.AddCallICSlot();
FeedbackSlot slot2 = spec.AddCallICSlot();
- Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfoForBuiltin(
- isolate()->factory()->empty_string(), Builtin::kIllegal);
- shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
- ClosureFeedbackCellArray::New(isolate(), shared);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate()));
- Handle<FeedbackVector> feedback_vector = FeedbackVector::New(
- isolate(), shared, closure_feedback_cell_array, &is_compiled_scope);
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::NewForTesting(isolate(), &spec);
vector_slot_pairs_.push_back(FeedbackSource());
vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot1));
vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot2));
@@ -59,8 +56,11 @@ class RedundancyEliminationTest : public GraphTest {
NiceMock<MockAdvancedReducerEditor> editor_;
std::vector<FeedbackSource> vector_slot_pairs_;
FeedbackSource feedback2_;
- RedundancyElimination reducer_;
+ JSOperatorBuilder javascript_;
SimplifiedOperatorBuilder simplified_;
+ MachineOperatorBuilder machine_;
+ JSGraph jsgraph_;
+ RedundancyElimination reducer_;
};
namespace {
diff --git a/deps/v8/test/unittests/compiler/revec-unittest.cc b/deps/v8/test/unittests/compiler/revec-unittest.cc
new file mode 100644
index 0000000000..01e14c26bf
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/revec-unittest.cc
@@ -0,0 +1,239 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/machine-type.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/machine-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/revectorizer.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/wasm-module.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class RevecTest : public TestWithIsolateAndZone {
+ public:
+ RevecTest()
+ : TestWithIsolateAndZone(kCompressGraphZone),
+ graph_(zone()),
+ common_(zone()),
+ machine_(zone(), MachineRepresentation::kWord64,
+ MachineOperatorBuilder::Flag::kAllOptionalOps),
+ mcgraph_(&graph_, &common_, &machine_) {}
+
+ Graph* graph() { return &graph_; }
+ CommonOperatorBuilder* common() { return &common_; }
+ MachineOperatorBuilder* machine() { return &machine_; }
+ MachineGraph* mcgraph() { return &mcgraph_; }
+
+ private:
+ Graph graph_;
+ CommonOperatorBuilder common_;
+ MachineOperatorBuilder machine_;
+ MachineGraph mcgraph_;
+};
+
+// Create a graph which add two 256 bit vectors(a, b), store the result in c:
+// simd128 *a,*b,*c;
+// *c = *a + *b;
+// *(c+1) = *(a+1) + *(b+1);
+// In Revectorization, two simd 128 nodes can be combined into one 256 node:
+// simd256 *d, *e, *f;
+// *f = *d + *e;
+TEST_F(RevecTest, F32x8Add) {
+ if (!CpuFeatures::IsSupported(AVX2)) return;
+
+ Node* start = graph()->NewNode(common()->Start(5));
+ graph()->SetStart(start);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* sixteen = graph()->NewNode(common()->Int64Constant(16));
+ // offset of memory start field in WASM instance object.
+ Node* offset = graph()->NewNode(common()->Int64Constant(23));
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* p2 = graph()->NewNode(common()->Parameter(2), start);
+ Node* p3 = graph()->NewNode(common()->Parameter(3), start);
+
+ StoreRepresentation store_rep(MachineRepresentation::kSimd128,
+ WriteBarrierKind::kNoWriteBarrier);
+ LoadRepresentation load_rep(MachineType::Simd128());
+ Node* load0 = graph()->NewNode(machine()->Load(MachineType::Int64()), p0,
+ offset, start, start);
+ Node* mem_buffer1 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_buffer2 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_store = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* load1 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p1,
+ load0, start);
+ Node* load2 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer1, p1, load1, start);
+ Node* load3 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p2,
+ load2, start);
+ Node* load4 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer2, p2, load3, start);
+ Node* add1 = graph()->NewNode(machine()->F32x4Add(), load1, load3);
+ Node* add2 = graph()->NewNode(machine()->F32x4Add(), load2, load4);
+ Node* store1 = graph()->NewNode(machine()->Store(store_rep), load0, p3, add1,
+ load4, start);
+ Node* store2 = graph()->NewNode(machine()->Store(store_rep), mem_store, p3,
+ add2, store1, start);
+ Node* ret = graph()->NewNode(common()->Return(0), zero, store2, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+ graph()->SetEnd(end);
+
+ graph()->RecordSimdStore(store1);
+ graph()->RecordSimdStore(store2);
+ graph()->SetSimd(true);
+
+ // Test whether the graph can be revectorized
+ Revectorizer revec(zone(), graph(), mcgraph());
+ EXPECT_TRUE(revec.TryRevectorize(nullptr));
+
+ // Test whether the graph has been revectorized
+ Node* store_256 = ret->InputAt(1);
+ EXPECT_EQ(StoreRepresentationOf(store_256->op()).representation(),
+ MachineRepresentation::kSimd256);
+}
+
+// Create a graph which multiplies a F32x8 vector with the first element of
+// vector b and store the result to a F32x8 vector c:
+// float *a, *b, *c;
+// c[0123] = a[0123] * b[0000];
+// c[4567] = a[4567] * b[0000];
+//
+// After the revectorization phase, two consecutive 128-bit loads and multiplies
+// can be coalesced using 256-bit vectors:
+// c[01234567] = a[01234567] * b[00000000];
+TEST_F(RevecTest, F32x8Mul) {
+ if (!CpuFeatures::IsSupported(AVX2)) return;
+
+ Node* start = graph()->NewNode(common()->Start(4));
+ graph()->SetStart(start);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* sixteen = graph()->NewNode(common()->Int64Constant(16));
+ Node* offset = graph()->NewNode(common()->Int64Constant(23));
+
+ // Wasm array base address
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ // Load base address a*
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ // LoadTransfrom base address b*
+ Node* p2 = graph()->NewNode(common()->Parameter(2), start);
+ // Store base address c*
+ Node* p3 = graph()->NewNode(common()->Parameter(3), start);
+
+ LoadRepresentation load_rep(MachineType::Simd128());
+ StoreRepresentation store_rep(MachineRepresentation::kSimd128,
+ WriteBarrierKind::kNoWriteBarrier);
+ Node* base = graph()->NewNode(machine()->Load(MachineType::Int64()), p0,
+ offset, start, start);
+ Node* base16 = graph()->NewNode(machine()->Int64Add(), base, sixteen);
+ Node* base16_store = graph()->NewNode(machine()->Int64Add(), base, sixteen);
+ Node* load0 = graph()->NewNode(machine()->ProtectedLoad(load_rep), base, p1,
+ base, start);
+ Node* load1 = graph()->NewNode(machine()->ProtectedLoad(load_rep), base16, p1,
+ load0, start);
+ Node* load2 = graph()->NewNode(
+ machine()->LoadTransform(MemoryAccessKind::kProtected,
+ LoadTransformation::kS128Load32Splat),
+ base, p2, load1, start);
+ Node* mul0 = graph()->NewNode(machine()->F32x4Mul(), load0, load2);
+ Node* mul1 = graph()->NewNode(machine()->F32x4Mul(), load1, load2);
+ Node* store0 = graph()->NewNode(machine()->Store(store_rep), base, p3, mul0,
+ load2, start);
+ Node* store1 = graph()->NewNode(machine()->Store(store_rep), base16_store, p3,
+ mul1, store0, start);
+ Node* ret = graph()->NewNode(common()->Return(0), zero, store1, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+ graph()->SetEnd(end);
+
+ graph()->RecordSimdStore(store0);
+ graph()->RecordSimdStore(store1);
+ graph()->SetSimd(true);
+
+ Revectorizer revec(zone(), graph(), mcgraph());
+ EXPECT_TRUE(revec.TryRevectorize(nullptr));
+
+ // Test whether the graph has been revectorized
+ Node* store_256 = ret->InputAt(1);
+ EXPECT_EQ(StoreRepresentationOf(store_256->op()).representation(),
+ MachineRepresentation::kSimd256);
+}
+
+// Create a graph with load chain that can not be packed due to effect
+// dependency:
+// [Load4] -> [Load3] -> [Load2] -> [Irrelevant Load] -> [Load1]
+//
+// After reordering, no effect dependency will be broken so the graph can be
+// revectorized:
+// [Load4] -> [Load3] -> [Load2] -> [Load1] -> [Irrelevant Load]
+TEST_F(RevecTest, ReorderLoadChain) {
+ if (!CpuFeatures::IsSupported(AVX2)) return;
+
+ Node* start = graph()->NewNode(common()->Start(5));
+ graph()->SetStart(start);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* sixteen = graph()->NewNode(common()->Int64Constant(16));
+ // offset of memory start field in WASM instance object.
+ Node* offset = graph()->NewNode(common()->Int64Constant(23));
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* p2 = graph()->NewNode(common()->Parameter(2), start);
+ Node* p3 = graph()->NewNode(common()->Parameter(3), start);
+
+ StoreRepresentation store_rep(MachineRepresentation::kSimd128,
+ WriteBarrierKind::kNoWriteBarrier);
+ LoadRepresentation load_rep(MachineType::Simd128());
+ Node* load0 = graph()->NewNode(machine()->Load(MachineType::Int64()), p0,
+ offset, start, start);
+ Node* mem_buffer1 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_buffer2 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_store = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* load1 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p1,
+ load0, start);
+ Node* irrelevant_load = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer1, p1, load1, start);
+ Node* load2 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer1, p1, irrelevant_load, start);
+ Node* load3 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p2,
+ load2, start);
+ Node* load4 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer2, p2, load3, start);
+ Node* add1 = graph()->NewNode(machine()->F32x4Add(), load1, load3);
+ Node* add2 = graph()->NewNode(machine()->F32x4Add(), load2, load4);
+ Node* store1 = graph()->NewNode(machine()->Store(store_rep), load0, p3, add1,
+ load4, start);
+ Node* store2 = graph()->NewNode(machine()->Store(store_rep), mem_store, p3,
+ add2, store1, start);
+ Node* ret = graph()->NewNode(common()->Return(0), zero, store2, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+ graph()->SetEnd(end);
+
+ graph()->RecordSimdStore(store1);
+ graph()->RecordSimdStore(store2);
+ graph()->SetSimd(true);
+
+ // Test whether the graph can be revectorized
+ Revectorizer revec(zone(), graph(), mcgraph());
+ EXPECT_TRUE(revec.TryRevectorize(nullptr));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc b/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc
index f9380ce8cc..5769a980ee 100644
--- a/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc
@@ -910,8 +910,8 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -928,10 +928,10 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
- EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
diff --git a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
index 8458e4e7d5..13de26ab4b 100644
--- a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
@@ -1298,8 +1298,8 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -1316,10 +1316,10 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
- EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
diff --git a/deps/v8/test/unittests/compiler/run-deopt-unittest.cc b/deps/v8/test/unittests/compiler/run-deopt-unittest.cc
index 2c75b0455f..d2dd46efc4 100644
--- a/deps/v8/test/unittests/compiler/run-deopt-unittest.cc
+++ b/deps/v8/test/unittests/compiler/run-deopt-unittest.cc
@@ -12,7 +12,8 @@ namespace internal {
namespace compiler {
static void IsOptimized(const v8::FunctionCallbackInfo<v8::Value>& args) {
- JavaScriptFrameIterator it(reinterpret_cast<Isolate*>(args.GetIsolate()));
+ JavaScriptStackFrameIterator it(
+ reinterpret_cast<Isolate*>(args.GetIsolate()));
JavaScriptFrame* frame = it.frame();
return args.GetReturnValue().Set(frame->is_turbofan());
}
diff --git a/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc b/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc
index 049faff5c5..930bd4b387 100644
--- a/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc
+++ b/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc
@@ -43,8 +43,7 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
- Handle<CodeT> code =
- ToCodeT(BuildCallee(isolate, callee_descriptor), isolate);
+ Handle<Code> code = BuildCallee(isolate, callee_descriptor);
params.push_back(__ HeapConstant(code));
int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
for (int i = 0; i < param_slots; ++i) {
@@ -60,12 +59,12 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
Handle<Code> BuildSetupFunction(Isolate* isolate,
CallDescriptor* caller_descriptor,
CallDescriptor* callee_descriptor) {
- CodeAssemblerTester tester(isolate, 0);
+ CodeAssemblerTester tester(isolate, JSParameterCount(0));
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
- Handle<CodeT> code = ToCodeT(
- BuildCaller(isolate, caller_descriptor, callee_descriptor), isolate);
+ Handle<Code> code =
+ BuildCaller(isolate, caller_descriptor, callee_descriptor);
params.push_back(__ HeapConstant(code));
// Set up arguments for "Caller".
int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
diff --git a/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc b/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc
index 34cdb0041f..465bebc54d 100644
--- a/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc
+++ b/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc
@@ -26,170 +26,166 @@ TEST_F(SnapshotTableTest, BasicTest) {
Key k3 = table.NewKey(3);
Key k4 = table.NewKey(4);
- base::Optional<Snapshot> s1;
- {
- SnapshotTable<int>::Scope scope(table);
- EXPECT_EQ(scope.Get(k1), 1);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 3);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k1, 10);
- scope.Set(k2, 20);
- scope.Set(k4, 4);
- EXPECT_EQ(scope.Get(k1), 10);
- EXPECT_EQ(scope.Get(k2), 20);
- EXPECT_EQ(scope.Get(k3), 3);
- EXPECT_EQ(scope.Get(k4), 4);
- s1 = scope.Seal();
- }
-
- base::Optional<Snapshot> s2;
- {
- SnapshotTable<int>::Scope scope(table);
- EXPECT_EQ(scope.Get(k1), 1);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 3);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k1, 11);
- scope.Set(k3, 33);
- EXPECT_EQ(scope.Get(k1), 11);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 33);
- EXPECT_EQ(scope.Get(k4), 4);
- s2 = scope.Seal();
- }
-
- {
- SnapshotTable<int>::Scope scope(table, *s2);
- // Assignments of the same value are ignored.
- EXPECT_EQ(scope.Get(k1), 11);
- scope.Set(k1, 11);
- // An empty scope does not produce a new snapshot.
- EXPECT_EQ(scope.Seal(), *s2);
- }
-
- base::Optional<Snapshot> s3;
- {
- SnapshotTable<int>::Scope scope(
- table, {*s1, *s2}, [&](Key key, base::Vector<const int> values) {
- if (key == k1) {
- EXPECT_EQ(values[0], 10);
- EXPECT_EQ(values[1], 11);
- } else if (key == k2) {
- EXPECT_EQ(values[0], 20);
- EXPECT_EQ(values[1], 2);
- } else if (key == k3) {
- EXPECT_EQ(values[0], 3);
- EXPECT_EQ(values[1], 33);
- } else {
- EXPECT_TRUE(false);
- }
- return values[0] + values[1];
- });
- EXPECT_EQ(scope.Get(k1), 21);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k1, 40);
- EXPECT_EQ(scope.Get(k1), 40);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- s3 = scope.Seal();
- }
-
- base::Optional<Snapshot> s4;
- {
- SnapshotTable<int>::Scope scope(table, *s2);
- EXPECT_EQ(scope.Get(k1), 11);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 33);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k3, 30);
- EXPECT_EQ(scope.Get(k3), 30);
- s4 = scope.Seal();
- }
-
- base::Optional<Snapshot> s5;
- {
- SnapshotTable<int>::Scope scope(
- table, {*s4, *s2}, [&](Key key, base::Vector<const int> values) {
- if (key == k3) {
- EXPECT_EQ(values[0], 30);
- EXPECT_EQ(values[1], 33);
- } else {
- EXPECT_TRUE(false);
- }
- return values[0] + values[1];
- });
- EXPECT_EQ(scope.Get(k1), 11);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 63);
- EXPECT_EQ(scope.Get(k4), 4);
- s5 = scope.Seal();
- }
-
- base::Optional<Key> k5;
- base::Optional<Snapshot> s6;
- {
- SnapshotTable<int>::Scope scope(table, *s2);
- scope.Set(k1, 5);
- // Creating a new key while the SnapshotTable is already in use, in the
- // middle of a scope. This is the same as creating the key in the beginning.
- k5 = table.NewKey(-1);
- EXPECT_EQ(scope.Get(*k5), -1);
- scope.Set(*k5, 42);
- EXPECT_EQ(scope.Get(*k5), 42);
- EXPECT_EQ(scope.Get(k1), 5);
- s6 = scope.Seal();
- }
-
- base::Optional<Snapshot> s7;
- {
- // We're merging {s6} and {s1}, to make sure that {s1}'s behavior is correct
- // with regard to {k5}, which wasn't created yet when {s1} was sealed.
- SnapshotTable<int>::Scope scope(
- table, {*s6, *s1}, [&](Key key, base::Vector<const int> values) {
- if (key == k1) {
- EXPECT_EQ(values[1], 10);
- EXPECT_EQ(values[0], 5);
- } else if (key == k2) {
- EXPECT_EQ(values[1], 20);
- EXPECT_EQ(values[0], 2);
- } else if (key == k3) {
- EXPECT_EQ(values[1], 3);
- EXPECT_EQ(values[0], 33);
- } else if (key == *k5) {
- EXPECT_EQ(values[0], 42);
- EXPECT_EQ(values[1], -1);
- return 127;
- } else {
- EXPECT_TRUE(false);
- }
- return values[0] + values[1];
- });
- EXPECT_EQ(scope.Get(k1), 15);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- EXPECT_EQ(scope.Get(*k5), 127);
- // We're not setting anything else, but the merges should produce entries in
- // the log.
- s7 = scope.Seal();
- }
-
- base::Optional<Snapshot> s8;
- {
- SnapshotTable<int>::Scope scope(table, *s7);
- // We're checking that {s7} did indeed capture the merge entries, despite
- // that we didn't do any explicit Set.
- EXPECT_EQ(scope.Get(k1), 15);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- EXPECT_EQ(scope.Get(*k5), 127);
- s8 = scope.Seal();
- }
+ table.StartNewSnapshot();
+ EXPECT_EQ(table.Get(k1), 1);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k1, 10);
+ table.Set(k2, 20);
+ table.Set(k4, 4);
+ EXPECT_EQ(table.Get(k1), 10);
+ EXPECT_EQ(table.Get(k2), 20);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ Snapshot s1 = table.Seal();
+
+ table.StartNewSnapshot();
+ EXPECT_EQ(table.Get(k1), 1);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k1, 11);
+ table.Set(k3, 33);
+ EXPECT_EQ(table.Get(k1), 11);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 33);
+ EXPECT_EQ(table.Get(k4), 4);
+ Snapshot s2 = table.Seal();
+
+ table.StartNewSnapshot(s2);
+ // Assignments of the same value are ignored.
+ EXPECT_EQ(table.Get(k1), 11);
+ table.Set(k1, 11);
+ // Sealing an empty snapshot does not produce a new snapshot.
+ EXPECT_EQ(table.Seal(), s2);
+
+ table.StartNewSnapshot({s1, s2},
+ [&](Key key, base::Vector<const int> values) {
+ if (key == k1) {
+ EXPECT_EQ(values[0], 10);
+ EXPECT_EQ(values[1], 11);
+ } else if (key == k2) {
+ EXPECT_EQ(values[0], 20);
+ EXPECT_EQ(values[1], 2);
+ } else if (key == k3) {
+ EXPECT_EQ(values[0], 3);
+ EXPECT_EQ(values[1], 33);
+ } else {
+ EXPECT_TRUE(false);
+ }
+ return values[0] + values[1];
+ });
+ EXPECT_EQ(table.Get(k1), 21);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k1, 40);
+ EXPECT_EQ(table.Get(k1), 40);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 0), 10);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 1), 11);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 0), 20);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 1), 2);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 0), 3);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 1), 33);
+ table.Seal();
+
+ table.StartNewSnapshot({s1, s2});
+ EXPECT_EQ(table.Get(k1), 1);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Seal();
+
+ table.StartNewSnapshot(s2);
+ EXPECT_EQ(table.Get(k1), 11);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 33);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k3, 30);
+ EXPECT_EQ(table.Get(k3), 30);
+ Snapshot s4 = table.Seal();
+
+ table.StartNewSnapshot({s4, s2},
+ [&](Key key, base::Vector<const int> values) {
+ if (key == k3) {
+ EXPECT_EQ(values[0], 30);
+ EXPECT_EQ(values[1], 33);
+ } else {
+ EXPECT_TRUE(false);
+ }
+ return values[0] + values[1];
+ });
+ EXPECT_EQ(table.Get(k1), 11);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 63);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 0), 30);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 1), 33);
+ table.Seal();
+
+ table.StartNewSnapshot(s2);
+ table.Set(k1, 5);
+ // Creating a new key while the SnapshotTable is already in use. This is the
+ // same as creating the key at the beginning.
+ Key k5 = table.NewKey(-1);
+ EXPECT_EQ(table.Get(k5), -1);
+ table.Set(k5, 42);
+ EXPECT_EQ(table.Get(k5), 42);
+ EXPECT_EQ(table.Get(k1), 5);
+ Snapshot s6 = table.Seal();
+
+ // We're merging {s6} and {s1}, to make sure that {s1}'s behavior is correct
+ // with regard to {k5}, which wasn't created yet when {s1} was sealed.
+ table.StartNewSnapshot({s6, s1},
+ [&](Key key, base::Vector<const int> values) {
+ if (key == k1) {
+ EXPECT_EQ(values[1], 10);
+ EXPECT_EQ(values[0], 5);
+ } else if (key == k2) {
+ EXPECT_EQ(values[1], 20);
+ EXPECT_EQ(values[0], 2);
+ } else if (key == k3) {
+ EXPECT_EQ(values[1], 3);
+ EXPECT_EQ(values[0], 33);
+ } else if (key == k5) {
+ EXPECT_EQ(values[0], 42);
+ EXPECT_EQ(values[1], -1);
+ return 127;
+ } else {
+ EXPECT_TRUE(false);
+ }
+ return values[0] + values[1];
+ });
+ EXPECT_EQ(table.Get(k1), 15);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.Get(k5), 127);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 0), 5);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 1), 10);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 0), 2);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 1), 20);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 0), 33);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 1), 3);
+ EXPECT_EQ(table.GetPredecessorValue(k5, 0), 42);
+ EXPECT_EQ(table.GetPredecessorValue(k5, 1), -1);
+ // We're not setting anything else, but the merges should produce entries in
+ // the log.
+ Snapshot s7 = table.Seal();
+
+ table.StartNewSnapshot(s7);
+ // We're checking that {s7} did indeed capture the merge entries, despite
+ // that we didn't do any explicit Set.
+ EXPECT_EQ(table.Get(k1), 15);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.Get(k5), 127);
+ table.Seal();
}
TEST_F(SnapshotTableTest, KeyData) {
diff --git a/deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc
new file mode 100644
index 0000000000..84ee558abb
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc
@@ -0,0 +1,346 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/common/globals.h"
+#include "src/compiler/turboshaft/typer.h"
+#include "src/handles/handles.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+template <typename T>
+class WordTyperTest : public TestWithNativeContextAndZone {
+ public:
+ CanonicalHandleScope canonical;
+ using word_t = typename T::word_t;
+ static constexpr size_t Bits = sizeof(word_t) * kBitsPerByte;
+
+ WordTyperTest() : TestWithNativeContextAndZone(), canonical(isolate()) {}
+};
+
+template <typename T>
+class FloatTyperTest : public TestWithNativeContextAndZone {
+ public:
+ CanonicalHandleScope canonical;
+ using float_t = typename T::float_t;
+ static constexpr size_t Bits = sizeof(float_t) * kBitsPerByte;
+
+ FloatTyperTest() : TestWithNativeContextAndZone(), canonical(isolate()) {}
+};
+
+template <typename T>
+struct Slices {
+ Slices(std::initializer_list<T> slices) : slices(slices) {}
+
+ std::vector<T> slices;
+};
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, const Slices<T>& slices) {
+ os << "Slices{";
+ for (const auto& s : slices.slices) os << s << ", ";
+ return os << "}";
+}
+
+// We define operator<= here for Type so that we can use gtest's EXPECT_LE to
+// check for subtyping and have the default printing.
+inline bool operator<=(const Type& lhs, const Type& rhs) {
+ return lhs.IsSubtypeOf(rhs);
+}
+template <typename T>
+inline bool operator<=(const Slices<T>& lhs, const T& rhs) {
+ for (const auto& s : lhs.slices) {
+ if (!s.IsSubtypeOf(rhs)) return false;
+ }
+ return true;
+}
+
+using WordTypes = ::testing::Types<Word32Type, Word64Type>;
+TYPED_TEST_SUITE(WordTyperTest, WordTypes);
+
+#define DEFINE_TEST_HELPERS() \
+ using T = TypeParam; \
+ using word_t = typename TestFixture::word_t; \
+ using Slices = Slices<T>; \
+ constexpr word_t max = std::numeric_limits<word_t>::max(); \
+ auto Constant = [&](word_t value) { return T::Constant(value); }; \
+ auto Set = [&](std::initializer_list<word_t> elements) { \
+ return WordOperationTyper<TestFixture::Bits>::FromElements(elements, \
+ this->zone()); \
+ }; \
+ auto Range = [&](word_t from, word_t to) { \
+ return T::Range(from, to, this->zone()); \
+ }; \
+ USE(Slices{}, Constant, Set, Range);
+
+TYPED_TEST(WordTyperTest, Add) {
+ DEFINE_TEST_HELPERS()
+#define EXPECT_ADD(lhs, rhs, result) \
+ EXPECT_LE(result, WordOperationTyper<TestFixture::Bits>::Add(lhs, rhs, \
+ this->zone())); \
+ EXPECT_LE(result, WordOperationTyper<TestFixture::Bits>::Add(rhs, lhs, \
+ this->zone()))
+
+ // Adding any.
+ {
+ // Any + Any
+ EXPECT_ADD(T::Any(), T::Any(), T::Any());
+ // c + Any
+ EXPECT_ADD(Constant(42), T::Any(), T::Any());
+ // {x1, ..., xn} + Any
+ EXPECT_ADD(Set({8, 11, 922}), T::Any(), T::Any());
+ // [a, b] + Any
+ EXPECT_ADD(Range(800, 1020), T::Any(), T::Any());
+ }
+
+ // Adding constants.
+ {
+ // c' + c
+ EXPECT_ADD(Constant(8), Constant(10003), Constant(8 + 10003));
+ EXPECT_ADD(Constant(max), Constant(0), Constant(max));
+ EXPECT_ADD(Constant(max - 8), Constant(12), Constant(3));
+ EXPECT_ADD(Constant(max), Constant(max), Constant(max - 1));
+ // {x1, ..., xn} + c
+ auto set1 = Set({0, 87});
+ EXPECT_ADD(set1, Constant(0), set1);
+ EXPECT_ADD(set1, Constant(2005), Set({2005, 2092}));
+ EXPECT_ADD(set1, Constant(max - 4), Set({82, max - 4}));
+ EXPECT_ADD(set1, Constant(max), Set({86, max}));
+ auto set2 = Set({15, 25025, max - 99});
+ EXPECT_ADD(set2, Constant(0), set2);
+ EXPECT_ADD(set2, Constant(4), Set({19, 25029, max - 95}));
+ EXPECT_ADD(set2, Constant(max - 50), Set({24974, max - 150, max - 35}));
+ EXPECT_ADD(set2, Constant(max), Set({14, 25024, max - 100}));
+ // [a, b](non-wrapping) + c
+ auto range1 = Range(13, 288);
+ EXPECT_ADD(range1, Constant(0), range1);
+ EXPECT_ADD(range1, Constant(812), Range(825, 1100));
+ EXPECT_ADD(range1, Constant(max - 103), Range(max - 90, 184));
+ EXPECT_ADD(range1, Constant(max - 5), Range(7, 282));
+ EXPECT_ADD(range1, Constant(max), Range(12, 287));
+ // [a, b](wrapping) + c
+ auto range2 = Range(max - 100, 70);
+ EXPECT_ADD(range2, Constant(0), range2);
+ EXPECT_ADD(range2, Constant(14), Range(max - 86, 84));
+ EXPECT_ADD(range2, Constant(101), Range(0, 171));
+ EXPECT_ADD(range2, Constant(200), Range(99, 270));
+ EXPECT_ADD(range2, Constant(max), Range(max - 101, 69));
+ }
+
+ // Adding sets.
+ {
+ // {y1, ..., ym} + {x1, ..., xn}
+ auto set1 = Set({0, 87});
+ EXPECT_ADD(set1, set1, Set({0, 87, (87 + 87)}));
+ EXPECT_ADD(set1, Set({3, 4, 5}), Set({3, 4, 5, 90, 91}));
+ EXPECT_ADD(set1, Set({3, 7, 11, 114}),
+ Set({3, 7, 11, 90, 94, 98, 114, 201}));
+ EXPECT_ADD(set1, Set({0, 1, 87, 200, max}),
+ Set({0, 1, 86, 87, 88, 174, 200, 287, max}));
+ EXPECT_ADD(set1, Set({max - 86, max - 9, max}),
+ Set({0, 77, 86, max - 86, max - 9, max}));
+ // [a, b](non-wrapping) + {x1, ..., xn}
+ auto range1 = Range(400, 991);
+ EXPECT_ADD(range1, Set({0, 55}), Range(400, 1046));
+ EXPECT_ADD(range1, Set({49, 110, 100009}), Range(449, 101000));
+ EXPECT_ADD(
+ range1, Set({112, max - 10094, max - 950}),
+ Slices({Range(0, 40), Range(512, 1103), Range(max - 9694, max)}));
+ EXPECT_ADD(range1, Set({112, max - 850}),
+ Slices({Range(512, 1103), Range(max - 450, 140)}));
+ EXPECT_ADD(range1, Set({max - 3, max - 1, max}), Range(396, 990));
+ // [a,b](wrapping) + {x1, ..., xn}
+ auto range2 = Range(max - 30, 82);
+ EXPECT_ADD(range2, Set({0, 20}),
+ Slices({Range(max - 30, 82), Range(max - 10, 102)}));
+ EXPECT_ADD(range2, Set({20, 30, 32, max}),
+ Slices({Range(max - 10, 101), Range(0, 112), Range(1, 114),
+ Range(max - 31, 81)}));
+ EXPECT_ADD(range2, Set({1000, 2000}),
+ Slices({Range(969, 1082), Range(1969, 2082)}));
+ EXPECT_ADD(range2, Set({max - 8, max - 2}),
+ Slices({Range(max - 39, 73), Range(max - 33, 79)}));
+ }
+
+ // Adding ranges.
+ {
+ // [a, b](non-wrapping) + [c, d](non-wrapping)
+ auto range1 = Range(30, 990);
+ EXPECT_ADD(range1, Range(0, 2), Range(30, 992));
+ EXPECT_ADD(range1, Range(1000, 22000), Range(1030, 22990));
+ EXPECT_ADD(range1, Range(0, max - 1000), Range(30, max - 10));
+ EXPECT_ADD(range1, Range(max - 800, max - 700), Range(max - 770, 289));
+ EXPECT_ADD(range1, Range(max - 5, max), Range(24, 989));
+ // [a, b](wrapping) + [c, d](non-wrapping)
+ auto range2 = Range(max - 40, 40);
+ EXPECT_ADD(range2, Range(0, 8), Range(max - 40, 48));
+ EXPECT_ADD(range2, Range(2000, 90000), Range(1959, 90040));
+ EXPECT_ADD(range2, Range(max - 400, max - 200),
+ Range(max - 441, max - 160));
+ EXPECT_ADD(range2, Range(0, max - 82), Range(max - 40, max - 42));
+ EXPECT_ADD(range2, Range(0, max - 81), T::Any());
+ EXPECT_ADD(range2, Range(20, max - 20), T::Any());
+ // [a, b](wrapping) + [c, d](wrapping)
+ EXPECT_ADD(range2, range2, Range(max - 81, 80));
+ EXPECT_ADD(range2, Range(max - 2, 2), Range(max - 43, 42));
+ EXPECT_ADD(range2, Range(1000, 100), Range(959, 140));
+ }
+
+#undef EXPECT_ADD
+}
+
+TYPED_TEST(WordTyperTest, WidenExponential) {
+ DEFINE_TEST_HELPERS()
+
+ auto SizeOf = [&](const T& type) -> word_t {
+ DCHECK(!type.is_any());
+ if (type.is_set()) return type.set_size();
+ if (type.is_wrapping()) {
+ return type.range_to() + (max - type.range_from()) + word_t{2};
+ }
+ return type.range_to() - type.range_from() + word_t{1};
+ };
+ auto DoubledInSize = [&](const T& old_type, const T& new_type) {
+ // If the `new_type` is any, we accept it.
+ if (new_type.is_any()) return true;
+ return SizeOf(old_type) <= 2 * SizeOf(new_type);
+ };
+
+#define EXPECT_WEXP(old_type, new_type) \
+ { \
+ const T ot = old_type; \
+ const T nt = new_type; \
+ auto result = WordOperationTyper<TestFixture::Bits>::WidenExponential( \
+ ot, nt, this->zone()); \
+ EXPECT_LE(ot, result); \
+ EXPECT_LE(nt, result); \
+ EXPECT_TRUE(DoubledInSize(ot, result)); \
+ }
+
+ // c W set
+ EXPECT_WEXP(Constant(0), Set({0, 1}));
+ EXPECT_WEXP(Constant(0), Set({0, 3}));
+ EXPECT_WEXP(Constant(0), Set({0, 1, max}));
+ EXPECT_WEXP(Constant(0), Set({0, 1, 2, max - 2, max - 1, max}));
+ EXPECT_WEXP(Constant(max), Set({0, 1, 2, max - 2, max}));
+ // c W range
+ EXPECT_WEXP(Constant(0), Range(0, 100));
+ EXPECT_WEXP(Constant(100), Range(50, 100));
+ EXPECT_WEXP(Constant(100), Range(50, 150));
+ EXPECT_WEXP(Constant(0), Range(max - 10, 0));
+ EXPECT_WEXP(Constant(0), Range(max - 10, 10));
+ EXPECT_WEXP(Constant(50), Range(max - 10000, 100));
+ EXPECT_WEXP(Constant(max), T::Any());
+ // set W set
+ EXPECT_WEXP(Set({0, 1}), Set({0, 1, 2}));
+ EXPECT_WEXP(Set({0, 1}), Set({0, 1, 2, 3, 4}));
+ EXPECT_WEXP(Set({0, max}), Set({0, 1, max}));
+ EXPECT_WEXP(Set({8, max - 8}), Set({7, 8, max - 8, max - 7}));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Set({2, 3, 5, 7, 11}));
+ // set W range
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(3, 11));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(0, 11));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(3, 100));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(max, 11));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(max - 100, 100));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), T::Any());
+ // range W range
+ EXPECT_WEXP(Range(0, 20), Range(0, 21));
+ EXPECT_WEXP(Range(0, 20), Range(0, 220));
+ EXPECT_WEXP(Range(0, 20), Range(max, 20));
+ EXPECT_WEXP(Range(0, 20), Range(max - 200, 20));
+ EXPECT_WEXP(Range(0, 20), T::Any());
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 101, max - 80));
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 100, max - 79));
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 101, max - 79));
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 200, 20));
+ EXPECT_WEXP(Range(max - 100, max - 80), T::Any());
+ EXPECT_WEXP(Range(max - 20, 0), Range(max - 20, 1));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 20, 21));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 21, 20));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 21, 21));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 2000, 2000));
+ EXPECT_WEXP(Range(max - 20, 20), T::Any());
+
+#undef EXPECT_WEXP
+}
+
+#undef DEFINE_TEST_HELPERS
+
+using FloatTypes = ::testing::Types<Float32Type, Float64Type>;
+TYPED_TEST_SUITE(FloatTyperTest, FloatTypes);
+
+#define DEFINE_TEST_HELPERS() \
+ using T = TypeParam; \
+ using float_t = typename TestFixture::float_t; \
+ using Slices = Slices<T>; \
+ auto Constant = [&](float_t value) { return T::Constant(value); }; \
+ auto Set = [&](std::initializer_list<float_t> elements, \
+ uint32_t special_values = 0) { \
+ return T::Set(elements, special_values, this->zone()); \
+ }; \
+ auto Range = [&](float_t from, float_t to, uint32_t special_values = 0) { \
+ return T::Range(from, to, special_values, this->zone()); \
+ }; \
+ constexpr uint32_t kNaN = T::kNaN; \
+ constexpr uint32_t kMZ = T::kMinusZero; \
+ constexpr float_t nan = nan_v<TestFixture::Bits>; \
+ constexpr float_t inf = std::numeric_limits<float_t>::infinity(); \
+ USE(Slices{}, Constant, Set, Range); \
+ USE(kNaN, kMZ, nan, inf);
+
+TYPED_TEST(FloatTyperTest, Divide) {
+ DEFINE_TEST_HELPERS()
+#define EXPECT_DIV(lhs, rhs, result) \
+ EXPECT_LE(result, FloatOperationTyper<TestFixture::Bits>::Divide( \
+ lhs, rhs, this->zone()))
+
+ // 0 / x
+ EXPECT_DIV(Constant(0.0), T::Any(), Set({0}, kNaN | kMZ));
+ EXPECT_DIV(T::MinusZero(), T::Any(), Set({0}, kNaN | kMZ));
+ EXPECT_DIV(Constant(0.0), Range(0.001, inf), Constant(0));
+ EXPECT_DIV(T::MinusZero(), Range(0.001, inf), T::MinusZero());
+ EXPECT_DIV(Constant(0.0), Range(-inf, -0.001), T::MinusZero());
+ EXPECT_DIV(T::MinusZero(), Range(-inf, -0.001), Constant(0));
+ EXPECT_DIV(Set({0.0}, kMZ), Constant(3), Set({0}, kMZ));
+ EXPECT_DIV(Set({0.0}), Set({-2.5, 0.0, 1.5}), Set({0.0}, kNaN | kMZ));
+ EXPECT_DIV(Set({0.0}, kMZ), Set({-2.5, 0.0, 1.5}), Set({0.0}, kNaN | kMZ));
+ EXPECT_DIV(Set({0.0}), Set({1.5}, kMZ), Set({0.0}, kNaN));
+ EXPECT_DIV(Set({0.0}, kMZ), Set({1.5}, kMZ), Set({0.0}, kNaN | kMZ));
+
+ // x / 0
+ EXPECT_DIV(Constant(1.0), Constant(0), Constant(inf));
+ EXPECT_DIV(Constant(1.0), T::MinusZero(), Constant(-inf));
+ EXPECT_DIV(Constant(inf), Constant(0), Constant(inf));
+ EXPECT_DIV(Constant(inf), T::MinusZero(), Constant(-inf));
+ EXPECT_DIV(Constant(-1.0), Constant(0), Constant(-inf));
+ EXPECT_DIV(Constant(-1.0), T::MinusZero(), Constant(inf));
+ EXPECT_DIV(Constant(-inf), Constant(0), Constant(-inf));
+ EXPECT_DIV(Constant(-inf), T::MinusZero(), Constant(inf));
+ EXPECT_DIV(Constant(1.5), Set({0.0}, kMZ), Set({-inf, inf}));
+ EXPECT_DIV(Constant(-1.5), Set({0.0}, kMZ), Set({-inf, inf}));
+ EXPECT_DIV(Set({1.5}, kMZ), Set({0.0}, kMZ), Set({-inf, inf}, kNaN));
+ EXPECT_DIV(Set({-1.5}, kMZ), Set({0.0}, kMZ), Set({-inf, inf}, kNaN));
+
+ // 0 / 0
+ EXPECT_DIV(Constant(0), Constant(0), T::NaN());
+ EXPECT_DIV(Constant(0), T::MinusZero(), T::NaN());
+ EXPECT_DIV(T::MinusZero(), Constant(0), T::NaN());
+ EXPECT_DIV(T::MinusZero(), T::MinusZero(), T::NaN());
+ EXPECT_DIV(Set({0}, kMZ), Set({1}, kMZ), Set({0}, kNaN | kMZ));
+
+ // inf / inf
+ EXPECT_DIV(Constant(inf), Constant(inf), T::NaN());
+ EXPECT_DIV(Constant(inf), Constant(-inf), T::NaN());
+ EXPECT_DIV(Constant(-inf), Constant(inf), T::NaN());
+ EXPECT_DIV(Constant(-inf), Constant(-inf), T::NaN());
+ EXPECT_DIV(Set({-inf, inf}), Constant(inf), T::NaN());
+ EXPECT_DIV(Set({-inf, inf}), Constant(-inf), T::NaN());
+ EXPECT_DIV(Set({-inf, inf}), Set({-inf, inf}), T::NaN());
+}
+
+#undef DEFINE_TEST_HELPERS
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc
new file mode 100644
index 0000000000..284a711967
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc
@@ -0,0 +1,787 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/types.h"
+#include "src/handles/handles.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+class TurboshaftTypesTest : public TestWithNativeContextAndZone {
+ public:
+ using Kind = Type::Kind;
+ CanonicalHandleScope canonical;
+
+ TurboshaftTypesTest()
+ : TestWithNativeContextAndZone(), canonical(isolate()) {}
+};
+
+TEST_F(TurboshaftTypesTest, Word32) {
+ const auto max_value = std::numeric_limits<Word32Type::word_t>::max();
+
+ // Complete range
+ {
+ Word32Type t = Word32Type::Any();
+ EXPECT_TRUE(Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({3, 9, max_value - 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(0, 10, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(800, 1200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(1, max_value - 1, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(0, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(max_value - 20, 20, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(1000, 999, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (non-wrapping)
+ {
+ Word32Type t = Word32Type::Range(100, 300, zone());
+ EXPECT_TRUE(!Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(99).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(100).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(250).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(300).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(301).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 150}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({99, 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({100, 105}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({150, 200, 250}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({150, 300}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({300, 301}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(50, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(99, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(100, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(150, 250, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(250, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(250, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(99, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(800, 9000, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(max_value - 100, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(250, 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (wrapping)
+ {
+ const auto large_value = max_value - 1000;
+ Word32Type t = Word32Type::Range(large_value, 800, zone());
+ EXPECT_TRUE(Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(801).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(5000).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(large_value - 1).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(large_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(large_value + 5).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, 800}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 801}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 600, 900}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({100, max_value - 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({large_value - 1, large_value + 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word32Type::Set({large_value, large_value + 5, max_value - 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(0, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(100, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(0, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(200, max_value - 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(large_value - 1, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word32Type::Range(large_value, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(large_value + 100, max_value - 100, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(large_value, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word32Type::Range(large_value + 100, 700, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(large_value - 1, 799, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(large_value + 1, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(5000, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set
+ {
+ CHECK_GT(Word32Type::kMaxSetSize, 2);
+ Word32Type t = Word32Type::Set({4, 890}, zone());
+ EXPECT_TRUE(!Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(3).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(4).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(5).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(889).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(890).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 4}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({4, 90}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({4, 890, 1000}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({890, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(0, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(4, 890, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(800, 900, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(800, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(890, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(max_value - 5, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Word64) {
+ const auto max_value = std::numeric_limits<Word64Type::word_t>::max();
+
+ // Complete range
+ {
+ Word64Type t = Word64Type::Any();
+ EXPECT_TRUE(Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({3, 9, max_value - 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(0, 10, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(800, 1200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(1, max_value - 1, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(0, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(max_value - 20, 20, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(1000, 999, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (non-wrapping)
+ {
+ Word64Type t = Word64Type::Range(100, 300, zone());
+ EXPECT_TRUE(!Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(99).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(100).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(250).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(300).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(301).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 150}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({99, 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({100, 105}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({150, 200, 250}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({150, 300}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({300, 301}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(50, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(99, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(100, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(150, 250, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(250, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(250, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(99, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(800, 9000, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(max_value - 100, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(250, 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (wrapping)
+ {
+ const auto large_value = max_value - 1000;
+ Word64Type t = Word64Type::Range(large_value, 800, zone());
+ EXPECT_TRUE(Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(801).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(5000).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(large_value - 1).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(large_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(large_value + 5).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, 800}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 801}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 600, 900}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({100, max_value - 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({large_value - 1, large_value + 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word64Type::Set({large_value, large_value + 5, max_value - 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(0, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(100, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(0, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(200, max_value - 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(large_value - 1, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word64Type::Range(large_value, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(large_value + 100, max_value - 100, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(large_value, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word64Type::Range(large_value + 100, 700, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(large_value - 1, 799, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(large_value + 1, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(5000, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set
+ {
+ CHECK_GT(Word64Type::kMaxSetSize, 2);
+ Word64Type t = Word64Type::Set({4, 890}, zone());
+ EXPECT_TRUE(!Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(3).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(4).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(5).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(889).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(890).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 4}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({4, 90}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({4, 890, 1000}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({890, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(0, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(4, 890, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(800, 900, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(800, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(890, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(max_value - 5, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float32) {
+ const auto large_value =
+ std::numeric_limits<Float32Type::float_t>::max() * 0.99f;
+ const auto inf = std::numeric_limits<Float32Type::float_t>::infinity();
+ const auto kNaN = Float32Type::kNaN;
+ const auto kMinusZero = Float32Type::kMinusZero;
+ const auto kNoSpecialValues = Float32Type::kNoSpecialValues;
+
+ // Complete range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t = Float32Type::Any(kNaN | kMinusZero);
+ EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(391.113f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({0.13f, 91.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Float32Type::Set({-100.4f, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(0.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(-inf, 12.3f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Complete range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t = Float32Type::Any(kMinusZero);
+ EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(391.113f).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({0.13f, 91.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(
+ !with_nan,
+ Float32Type::Set({-100.4f, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(0.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(-inf, 12.3f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t =
+ Float32Type::Range(-1.0f, 3.14159f, kNaN | kMinusZero, zone());
+ EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-0.99f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.15f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-0.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.1f, 1.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-0.9f, 1.88f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({0.0f, 3.142f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.3f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, 0.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(-1.0f, 1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(0.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(0.0f, 3.142f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t = Float32Type::Range(-1.0f, 3.14159f, kMinusZero, zone());
+ EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-0.99f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.15f).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float32Type::Set({-0.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.1f, 1.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({-0.9f, 1.88f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({0.0f, 3.142f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.3f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, 0.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(-1.0f, 1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(0.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(0.0f, 3.142f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float32Type t = Float32Type::Set({-1.0f, 3.14159f}, kNaN, zone());
+ EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.1415f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.0f, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-1.0f, 3.14159f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Set({3.14159f, 3.1416f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Range(-1.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.14159f, 4.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float32Type t =
+ Float32Type::Set({-1.0f, 3.14159f}, kNoSpecialValues, zone());
+ EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.1415f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.0f, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({-1.0f, 3.14159f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Set({3.14159f, 3.1416f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Range(-1.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.14159f, 4.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // -0.0f corner cases
+ {
+ EXPECT_TRUE(!Float32Type::MinusZero().IsSubtypeOf(
+ Float32Type::Set({0.0f, 1.0f}, zone())));
+ EXPECT_TRUE(
+ !Float32Type::Constant(0.0f).IsSubtypeOf(Float32Type::MinusZero()));
+ EXPECT_TRUE(
+ Float32Type::Set({3.2f}, kMinusZero, zone())
+ .IsSubtypeOf(Float32Type::Range(0.0f, 4.0f, kMinusZero, zone())));
+ EXPECT_TRUE(!Float32Type::Set({-1.0f, 0.0f}, kMinusZero, zone())
+ .IsSubtypeOf(Float32Type::Range(-inf, 0.0f, zone())));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float64) {
+ const auto large_value =
+ std::numeric_limits<Float64Type::float_t>::max() * 0.99;
+ const auto inf = std::numeric_limits<Float64Type::float_t>::infinity();
+ const auto kNaN = Float64Type::kNaN;
+ const auto kMinusZero = Float64Type::kMinusZero;
+ const auto kNoSpecialValues = Float64Type::kNoSpecialValues;
+
+ // Complete range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t = Float64Type::Any(kNaN | kMinusZero);
+ EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(391.113).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({0.13, 91.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Float64Type::Set({-100.4, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(0.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(-inf, 12.3, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Complete range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t = Float64Type::Any(kMinusZero);
+ EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(391.113).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({0.13, 91.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(
+ !with_nan,
+ Float64Type::Set({-100.4, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(0.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(-inf, 12.3, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t =
+ Float64Type::Range(-1.0, 3.14159, kNaN | kMinusZero, zone());
+ EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-0.99).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.15).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-0.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.1, 1.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-0.9, 1.88}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({0.0, 3.142}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.3}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, 0.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(-1.0, 1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(0.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(0.0, 3.142, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t = Float64Type::Range(-1.0, 3.14159, kMinusZero, zone());
+ EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-0.99).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.15).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float64Type::Set({-0.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.1, 1.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({-0.9, 1.88}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({0.0, 3.142}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.3}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, 0.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(-1.0, 1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(0.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(0.0, 3.142, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float64Type t = Float64Type::Set({-1.0, 3.14159}, kNaN, zone());
+ EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.1415).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.0, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-1.0, 3.14159}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float64Type::Set({3.14159, 3.1416}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.14159, 4.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float64Type t = Float64Type::Set({-1.0, 3.14159}, kNoSpecialValues, zone());
+ EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.1415).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.0, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({-1.0, 3.14159}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float64Type::Set({3.14159, 3.1416}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.14159, 4.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // -0.0 corner cases
+ {
+ EXPECT_TRUE(!Float64Type::MinusZero().IsSubtypeOf(
+ Float64Type::Set({0.0, 1.0}, zone())));
+ EXPECT_TRUE(
+ !Float64Type::Constant(0.0).IsSubtypeOf(Float64Type::MinusZero()));
+ EXPECT_TRUE(
+ Float64Type::Set({3.2}, kMinusZero, zone())
+ .IsSubtypeOf(Float64Type::Range(0.0, 4.0, kMinusZero, zone())));
+ EXPECT_TRUE(
+ Float64Type::Set({0.0}, kMinusZero, zone())
+ .IsSubtypeOf(Float64Type::Range(-inf, 0.0, kMinusZero, zone())));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Word32LeastUpperBound) {
+ auto CheckLubIs = [&](const Word32Type& lhs, const Word32Type& rhs,
+ const Word32Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Word32Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+
+ {
+ const auto lhs = Word32Type::Range(100, 400, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word32Type::Range(50, 350, zone()),
+ Word32Type::Range(50, 400, zone()));
+ CheckLubIs(lhs, Word32Type::Range(150, 600, zone()),
+ Word32Type::Range(100, 600, zone()));
+ CheckLubIs(lhs, Word32Type::Range(150, 350, zone()), lhs);
+ CheckLubIs(lhs, Word32Type::Range(350, 0, zone()),
+ Word32Type::Range(100, 0, zone()));
+ CheckLubIs(lhs, Word32Type::Range(400, 100, zone()), Word32Type::Any());
+ CheckLubIs(lhs, Word32Type::Range(600, 0, zone()),
+ Word32Type::Range(600, 400, zone()));
+ CheckLubIs(lhs, Word32Type::Range(300, 150, zone()), Word32Type::Any());
+ }
+
+ {
+ const auto lhs = Word32Type::Constant(18);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word32Type::Constant(1119),
+ Word32Type::Set({18, 1119}, zone()));
+ CheckLubIs(lhs, Word32Type::Constant(0), Word32Type::Set({0, 18}, zone()));
+ CheckLubIs(lhs, Word32Type::Range(40, 100, zone()),
+ Word32Type::Range(18, 100, zone()));
+ CheckLubIs(lhs, Word32Type::Range(4, 90, zone()),
+ Word32Type::Range(4, 90, zone()));
+ CheckLubIs(lhs, Word32Type::Set({0, 1, 2, 3}, zone()),
+ Word32Type::Set({0, 1, 2, 3, 18}, zone()));
+ CheckLubIs(
+ lhs, Word32Type::Constant(std::numeric_limits<uint32_t>::max()),
+ Word32Type::Set({18, std::numeric_limits<uint32_t>::max()}, zone()));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Word64LeastUpperBound) {
+ auto CheckLubIs = [&](const Word64Type& lhs, const Word64Type& rhs,
+ const Word64Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Word64Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+
+ {
+ const auto lhs = Word64Type::Range(100, 400, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word64Type::Range(50, 350, zone()),
+ Word64Type::Range(50, 400, zone()));
+ CheckLubIs(lhs, Word64Type::Range(150, 600, zone()),
+ Word64Type::Range(100, 600, zone()));
+ CheckLubIs(lhs, Word64Type::Range(150, 350, zone()), lhs);
+ CheckLubIs(lhs, Word64Type::Range(350, 0, zone()),
+ Word64Type::Range(100, 0, zone()));
+ CheckLubIs(lhs, Word64Type::Range(400, 100, zone()), Word64Type::Any());
+ CheckLubIs(lhs, Word64Type::Range(600, 0, zone()),
+ Word64Type::Range(600, 400, zone()));
+ CheckLubIs(lhs, Word64Type::Range(300, 150, zone()), Word64Type::Any());
+ }
+
+ {
+ const auto lhs = Word64Type::Constant(18);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word64Type::Constant(1119),
+ Word64Type::Set({18, 1119}, zone()));
+ CheckLubIs(lhs, Word64Type::Constant(0), Word64Type::Set({0, 18}, zone()));
+ CheckLubIs(lhs, Word64Type::Range(40, 100, zone()),
+ Word64Type::Range(18, 100, zone()));
+ CheckLubIs(lhs, Word64Type::Range(4, 90, zone()),
+ Word64Type::Range(4, 90, zone()));
+ CheckLubIs(lhs, Word64Type::Range(0, 3, zone()),
+ Word64Type::Set({0, 1, 2, 3, 18}, zone()));
+ CheckLubIs(
+ lhs, Word64Type::Constant(std::numeric_limits<uint64_t>::max()),
+ Word64Type::Set({18, std::numeric_limits<uint64_t>::max()}, zone()));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float32LeastUpperBound) {
+ auto CheckLubIs = [&](const Float32Type& lhs, const Float32Type& rhs,
+ const Float32Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Float32Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+ const auto kNaN = Float32Type::kNaN;
+
+ {
+ const auto lhs = Float32Type::Range(-32.19f, 94.07f, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float32Type::Range(-32.19f, 94.07f, kNaN, zone()),
+ Float32Type::Range(-32.19f, 94.07f, kNaN, zone()));
+ CheckLubIs(lhs, Float32Type::NaN(),
+ Float32Type::Range(-32.19f, 94.07f, kNaN, zone()));
+ CheckLubIs(lhs, Float32Type::Constant(0.0f), lhs);
+ CheckLubIs(lhs, Float32Type::Range(-19.9f, 31.29f, zone()), lhs);
+ CheckLubIs(lhs, Float32Type::Range(-91.22f, -40.0f, zone()),
+ Float32Type::Range(-91.22f, 94.07f, zone()));
+ CheckLubIs(lhs, Float32Type::Range(0.0f, 1993.0f, zone()),
+ Float32Type::Range(-32.19f, 1993.0f, zone()));
+ CheckLubIs(lhs, Float32Type::Range(-100.0f, 100.0f, kNaN, zone()),
+ Float32Type::Range(-100.0f, 100.0f, kNaN, zone()));
+ }
+
+ {
+ const auto lhs = Float32Type::Constant(-0.04f);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float32Type::NaN(),
+ Float32Type::Set({-0.04f}, kNaN, zone()));
+ CheckLubIs(lhs, Float32Type::Constant(17.14f),
+ Float32Type::Set({-0.04f, 17.14f}, zone()));
+ CheckLubIs(lhs, Float32Type::Range(-75.4f, -12.7f, zone()),
+ Float32Type::Range(-75.4f, -0.04f, zone()));
+ CheckLubIs(lhs, Float32Type::Set({0.04f}, kNaN, zone()),
+ Float32Type::Set({-0.04f, 0.04f}, kNaN, zone()));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float64LeastUpperBound) {
+ auto CheckLubIs = [&](const Float64Type& lhs, const Float64Type& rhs,
+ const Float64Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Float64Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+ const auto kNaN = Float64Type::kNaN;
+
+ {
+ const auto lhs = Float64Type::Range(-32.19, 94.07, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float64Type::Range(-32.19, 94.07, kNaN, zone()),
+ Float64Type::Range(-32.19, 94.07, kNaN, zone()));
+ CheckLubIs(lhs, Float64Type::NaN(),
+ Float64Type::Range(-32.19, 94.07, kNaN, zone()));
+ CheckLubIs(lhs, Float64Type::Constant(0.0), lhs);
+ CheckLubIs(lhs, Float64Type::Range(-19.9, 31.29, zone()), lhs);
+ CheckLubIs(lhs, Float64Type::Range(-91.22, -40.0, zone()),
+ Float64Type::Range(-91.22, 94.07, zone()));
+ CheckLubIs(lhs, Float64Type::Range(0.0, 1993.0, zone()),
+ Float64Type::Range(-32.19, 1993.0, zone()));
+ CheckLubIs(lhs, Float64Type::Range(-100.0, 100.0, kNaN, zone()),
+ Float64Type::Range(-100.0, 100.0, kNaN, zone()));
+ }
+
+ {
+ const auto lhs = Float64Type::Constant(-0.04);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float64Type::NaN(),
+ Float64Type::Set({-0.04}, kNaN, zone()));
+ CheckLubIs(lhs, Float64Type::Constant(17.14),
+ Float64Type::Set({-0.04, 17.14}, zone()));
+ CheckLubIs(lhs, Float64Type::Range(-75.4, -12.7, zone()),
+ Float64Type::Range(-75.4, -0.04, zone()));
+ CheckLubIs(lhs, Float64Type::Set({0.04}, kNaN, zone()),
+ Float64Type::Set({-0.04, 0.04}, kNaN, zone()));
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index bede5d5441..1a76aa12d4 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -5,6 +5,7 @@
#include <functional>
#include "src/base/overflowing-math.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
@@ -23,6 +24,7 @@ class TyperTest : public TypedGraphTest {
TyperTest()
: TypedGraphTest(3),
broker_(isolate(), zone()),
+ current_broker_(&broker_),
operation_typer_(&broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
@@ -57,6 +59,7 @@ class TyperTest : public TypedGraphTest {
const int kRepetitions = 50;
JSHeapBroker broker_;
+ CurrentHeapBrokerScope current_broker_;
OperationTyper operation_typer_;
Types types_;
JSOperatorBuilder javascript_;
@@ -233,11 +236,10 @@ class TyperTest : public TypedGraphTest {
double x1 = RandomInt(r1.AsRange());
double x2 = RandomInt(r2.AsRange());
bool result_value = opfun(x1, x2);
- Type result_type =
- Type::Constant(&broker_,
- result_value ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value(),
- zone());
+ Type result_type = Type::Constant(
+ &broker_,
+ result_value ? broker_.true_value() : broker_.false_value(),
+ zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
}
diff --git a/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc b/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc
index 135f1cbe23..42700518b1 100644
--- a/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc
+++ b/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc
@@ -141,7 +141,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimple) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
// Test lazy deoptimization of a simple function. Call the function after the
// deoptimization while it is still activated further down the stack.
@@ -157,7 +156,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimple) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeSimpleWithArguments) {
@@ -178,7 +176,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimpleWithArguments) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
// Test lazy deoptimization of a simple function with some arguments. Call the
// function after the deoptimization while it is still activated further down
@@ -195,7 +192,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimpleWithArguments) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeSimpleNested) {
@@ -218,7 +214,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimpleNested) {
CheckJsInt32(1, "count", context());
CheckJsInt32(6, "result", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
}
@@ -241,7 +236,6 @@ TEST_F(DeoptimizationTest, DeoptimizeRecursive) {
CheckJsInt32(1, "count", context());
CheckJsInt32(11, "calls", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
context()->Global()->Get(context(), NewString("f")).ToLocalChecked());
@@ -272,7 +266,6 @@ TEST_F(DeoptimizationTest, DeoptimizeMultiple) {
CheckJsInt32(1, "count", context());
CheckJsInt32(14, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeConstructor) {
@@ -296,7 +289,6 @@ TEST_F(DeoptimizationTest, DeoptimizeConstructor) {
->Get(context(), NewString("result"))
.ToLocalChecked()
->IsTrue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
{
AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -313,7 +305,6 @@ TEST_F(DeoptimizationTest, DeoptimizeConstructor) {
CheckJsInt32(1, "count", context());
CheckJsInt32(3, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeConstructorMultiple) {
@@ -341,7 +332,6 @@ TEST_F(DeoptimizationTest, DeoptimizeConstructorMultiple) {
CheckJsInt32(1, "count", context());
CheckJsInt32(14, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
class DeoptimizationDisableConcurrentRecompilationTest
@@ -439,7 +429,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CHECK(result->IsString());
v8::String::Utf8Value utf8(isolate(), result);
CHECK_EQ(0, strcmp("a+an X", *utf8));
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -451,7 +440,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(15, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -463,7 +451,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(-1, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -476,7 +463,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(56, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -488,7 +474,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(0, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -500,7 +485,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(7, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest, DeoptimizeCompare) {
@@ -550,7 +534,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest, DeoptimizeCompare) {
->Get(context(), NewString("result"))
.ToLocalChecked()
->IsTrue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
diff --git a/deps/v8/test/unittests/flags/flag-definitions-unittest.cc b/deps/v8/test/unittests/flags/flag-definitions-unittest.cc
index 89022cc95e..5797fd2e11 100644
--- a/deps/v8/test/unittests/flags/flag-definitions-unittest.cc
+++ b/deps/v8/test/unittests/flags/flag-definitions-unittest.cc
@@ -221,5 +221,19 @@ TEST_F(FlagDefinitionsTest, FreezeFlags) {
CHECK_EQ(42, *direct_testing_int_ptr);
}
+TEST_F(FlagDefinitionsTest, TestExperimentalImplications) {
+ // Check that experimental features are not staged behind --future/--harmony.
+ if (!v8_flags.experimental) {
+ int argc = 3;
+ const char* argv[] = {"", "--future", "--harmony"};
+ CHECK_EQ(0, FlagList::SetFlagsFromCommandLine(
+ &argc, const_cast<char**>(argv), true));
+ FlagList::EnforceFlagImplications();
+ CHECK(v8_flags.future);
+ CHECK(v8_flags.harmony);
+ CHECK(!v8_flags.experimental);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/allocation-observer-unittest.cc b/deps/v8/test/unittests/heap/allocation-observer-unittest.cc
index 6ddbe8fb17..bbeef0f41d 100644
--- a/deps/v8/test/unittests/heap/allocation-observer-unittest.cc
+++ b/deps/v8/test/unittests/heap/allocation-observer-unittest.cc
@@ -23,7 +23,7 @@ class UnusedObserver : public AllocationObserver {
TEST(AllocationObserverTest, AddAndRemoveUnusedObservers) {
AllocationCounter counter;
- CHECK(!counter.IsActive());
+ CHECK_EQ(SIZE_MAX, counter.NextBytes());
UnusedObserver observer100(100);
UnusedObserver observer200(200);
@@ -41,7 +41,7 @@ TEST(AllocationObserverTest, AddAndRemoveUnusedObservers) {
CHECK_EQ(counter.NextBytes(), 110);
counter.RemoveAllocationObserver(&observer200);
- CHECK(!counter.IsActive());
+ CHECK_EQ(SIZE_MAX, counter.NextBytes());
}
namespace {
@@ -77,7 +77,7 @@ class VerifyStepObserver : public AllocationObserver {
TEST(AllocationObserverTest, Step) {
AllocationCounter counter;
- CHECK(!counter.IsActive());
+ CHECK_EQ(SIZE_MAX, counter.NextBytes());
const Address kSomeObjectAddress = 8;
VerifyStepObserver observer100(100);
diff --git a/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc b/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc
index 7b10a01c49..128f2a22be 100644
--- a/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc
+++ b/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc
@@ -23,9 +23,9 @@ class RecordingVisitor final : public RootVisitor {
inner_address_ = base_address_ + 42 * kTaggedSize;
#ifdef V8_COMPRESS_POINTERS
compr_address_ = static_cast<uint32_t>(
- V8HeapCompressionScheme::CompressTagged(base_address_));
+ V8HeapCompressionScheme::CompressAny(base_address_));
compr_inner_ = static_cast<uint32_t>(
- V8HeapCompressionScheme::CompressTagged(inner_address_));
+ V8HeapCompressionScheme::CompressAny(inner_address_));
#else
compr_address_ = static_cast<uint32_t>(base_address_);
compr_inner_ = static_cast<uint32_t>(inner_address_);
@@ -86,7 +86,7 @@ TEST_F(ConservativeStackVisitorTest, DirectBasePointer) {
volatile Address ptr = recorder->base_address();
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(kNullAddress, ptr);
@@ -107,7 +107,7 @@ TEST_F(ConservativeStackVisitorTest, TaggedBasePointer) {
volatile Address ptr = recorder->tagged_address();
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(kNullAddress, ptr);
@@ -128,7 +128,7 @@ TEST_F(ConservativeStackVisitorTest, InnerPointer) {
volatile Address ptr = recorder->inner_address();
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(kNullAddress, ptr);
@@ -151,7 +151,7 @@ TEST_F(ConservativeStackVisitorTest, HalfWord1) {
volatile uint32_t ptr[] = {recorder->compr_address(), 0};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[0]);
@@ -172,7 +172,7 @@ TEST_F(ConservativeStackVisitorTest, HalfWord2) {
volatile uint32_t ptr[] = {0, recorder->compr_address()};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[1]);
@@ -193,7 +193,7 @@ TEST_F(ConservativeStackVisitorTest, InnerHalfWord1) {
volatile uint32_t ptr[] = {recorder->compr_inner(), 0};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[0]);
@@ -214,7 +214,7 @@ TEST_F(ConservativeStackVisitorTest, InnerHalfWord2) {
volatile uint32_t ptr[] = {0, recorder->compr_inner()};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[1]);
diff --git a/deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc
new file mode 100644
index 0000000000..1759daf5d7
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc
@@ -0,0 +1,287 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handles/handles.h"
+#include "src/handles/traced-handles.h"
+#include "test/unittests/heap/cppgc-js/unified-heap-utils.h"
+#include "test/unittests/heap/heap-utils.h"
+
+namespace v8::internal {
+
+namespace {
+
+constexpr uint16_t kClassIdToOptimize = 23;
+
+using EmbedderRootsHandlerTest = TestWithHeapInternalsAndContext;
+
+class V8_NODISCARD TemporaryEmbedderRootsHandleScope final {
+ public:
+ TemporaryEmbedderRootsHandleScope(v8::Isolate* isolate,
+ v8::EmbedderRootsHandler* handler)
+ : isolate_(isolate) {
+ isolate_->SetEmbedderRootsHandler(handler);
+ }
+
+ ~TemporaryEmbedderRootsHandleScope() {
+ isolate_->SetEmbedderRootsHandler(nullptr);
+ }
+
+ private:
+ v8::Isolate* const isolate_;
+};
+
+// EmbedderRootsHandler that can optimize Scavenger handling when used with
+// TracedReference.
+class ClearingEmbedderRootsHandler final : public v8::EmbedderRootsHandler {
+ public:
+ explicit ClearingEmbedderRootsHandler(uint16_t class_id_to_optimize)
+ : class_id_to_optimize_(class_id_to_optimize) {}
+
+ bool IsRoot(const v8::TracedReference<v8::Value>& handle) final {
+ return handle.WrapperClassId() != class_id_to_optimize_;
+ }
+
+ void ResetRoot(const v8::TracedReference<v8::Value>& handle) final {
+ if (handle.WrapperClassId() != class_id_to_optimize_) return;
+
+ // Convention (for test): Objects that are optimized have their first field
+ // set as a back pointer.
+ BasicTracedReference<v8::Value>* original_handle =
+ reinterpret_cast<BasicTracedReference<v8::Value>*>(
+ v8::Object::GetAlignedPointerFromInternalField(
+ handle.As<v8::Object>(), 0));
+ original_handle->Reset();
+ }
+
+ private:
+ const uint16_t class_id_to_optimize_;
+};
+
+template <typename T>
+void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
+ uint16_t optimized_class_id,
+ T* optimized_handle,
+ T* non_optimized_handle) {
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Object> optimized_object = WrapperHelper::CreateWrapper(
+ isolate->GetCurrentContext(), optimized_handle, nullptr);
+ EXPECT_TRUE(optimized_handle->IsEmpty());
+ *optimized_handle = T(isolate, optimized_object);
+ EXPECT_FALSE(optimized_handle->IsEmpty());
+ optimized_handle->SetWrapperClassId(optimized_class_id);
+
+ v8::Local<v8::Object> non_optimized_object = WrapperHelper::CreateWrapper(
+ isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(non_optimized_handle->IsEmpty());
+ *non_optimized_handle = T(isolate, non_optimized_object);
+ EXPECT_FALSE(non_optimized_handle->IsEmpty());
+}
+
+} // namespace
+
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceNoDestructorReclaimedOnScavenge) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap());
+
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+
+ auto* traced_handles = i_isolate()->traced_handles();
+ const size_t initial_count = traced_handles->used_node_count();
+ auto* optimized_handle = new v8::TracedReference<v8::Value>();
+ auto* non_optimized_handle = new v8::TracedReference<v8::Value>();
+ SetupOptimizedAndNonOptimizedHandle(v8_isolate(), kClassIdToOptimize,
+ optimized_handle, non_optimized_handle);
+ EXPECT_EQ(initial_count + 2, traced_handles->used_node_count());
+ YoungGC();
+ EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
+ EXPECT_TRUE(optimized_handle->IsEmpty());
+ delete optimized_handle;
+ EXPECT_FALSE(non_optimized_handle->IsEmpty());
+ non_optimized_handle->Reset();
+ delete non_optimized_handle;
+ EXPECT_EQ(initial_count, traced_handles->used_node_count());
+}
+
+namespace {
+
+void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ v8::TracedReference<v8::Object>* handle) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(v8::Object::New(isolate));
+ EXPECT_FALSE(object.IsEmpty());
+ *handle = v8::TracedReference<v8::Object>(isolate, object);
+ EXPECT_FALSE(handle->IsEmpty());
+}
+
+template <typename T>
+void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ T* global) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ EXPECT_FALSE(object.IsEmpty());
+ *global = T(isolate, object);
+ EXPECT_FALSE(global->IsEmpty());
+}
+
+enum class SurvivalMode { kSurvives, kDies };
+
+template <typename ModifierFunction, typename ConstructTracedReferenceFunction,
+ typename GCFunction>
+void TracedReferenceTest(v8::Isolate* isolate,
+ ConstructTracedReferenceFunction construct_function,
+ ModifierFunction modifier_function,
+ GCFunction gc_function, SurvivalMode survives) {
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
+ v8::HandleScope scope(isolate);
+ auto* traced_handles = i_isolate->traced_handles();
+ const size_t initial_count = traced_handles->used_node_count();
+ auto gc_invisible_handle =
+ std::make_unique<v8::TracedReference<v8::Object>>();
+ construct_function(isolate, isolate->GetCurrentContext(),
+ gc_invisible_handle.get());
+ ASSERT_TRUE(IsNewObjectInCorrectGeneration(isolate, *gc_invisible_handle));
+ modifier_function(*gc_invisible_handle);
+ const size_t after_modification_count = traced_handles->used_node_count();
+ gc_function();
+ // Cannot check the handle as it is not explicitly cleared by the GC. Instead
+ // check the handles count.
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives,
+ after_modification_count == traced_handles->used_node_count());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies,
+ initial_count == traced_handles->used_node_count());
+}
+
+} // namespace
+
+TEST_F(EmbedderRootsHandlerTest, TracedReferenceWrapperClassId) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+
+ v8::TracedReference<v8::Object> traced;
+ ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &traced);
+ EXPECT_EQ(0, traced.WrapperClassId());
+ traced.SetWrapperClassId(17);
+ EXPECT_EQ(17, traced.WrapperClassId());
+}
+
+// EmbedderRootsHandler does not affect full GCs.
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectDiesOnFullGC) {
+ // When stressing incremental marking, a write barrier may keep the object
+ // alive.
+ if (v8_flags.stress_incremental_marking) return;
+
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](const TracedReference<v8::Object>&) {}, [this]() { FullGC(); },
+ SurvivalMode::kDies);
+}
+
+// EmbedderRootsHandler does not affect full GCs.
+TEST_F(
+ EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectDiesOnFullGCEvenWhenPointeeIsHeldAlive) {
+ ManualGCScope manual_gcs(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ // The TracedReference itself will die as it's not found by the full GC. The
+ // pointee will be kept alive through other means.
+ v8::Global<v8::Object> strong_global;
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [this, &strong_global](const TracedReference<v8::Object>& handle) {
+ v8::HandleScope scope(v8_isolate());
+ strong_global =
+ v8::Global<v8::Object>(v8_isolate(), handle.Get(v8_isolate()));
+ },
+ [this, &strong_global]() {
+ FullGC();
+ strong_global.Reset();
+ },
+ SurvivalMode::kDies);
+}
+
+// EmbedderRootsHandler does not affect non-API objects.
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectSurvivesYoungGC) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
+ SurvivalMode::kSurvives);
+}
+
+// EmbedderRootsHandler does not affect non-API objects, even when the handle
+// has a wrapper class id that allows for reclamation.
+TEST_F(
+ EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectSurvivesYoungGCWhenExcludedFromRoots) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](TracedReference<v8::Object>& handle) {
+ handle.SetWrapperClassId(kClassIdToOptimize);
+ },
+ [this]() { YoungGC(); }, SurvivalMode::kSurvives);
+}
+
+// EmbedderRootsHandler does not affect API objects for handles that have
+// their class ids not set up.
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
+ SurvivalMode::kSurvives);
+}
+
+// EmbedderRootsHandler resets API objects for handles that have their class ids
+// set to being optimized.
+TEST_F(
+ EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [this](TracedReference<v8::Object>& handle) {
+ handle.SetWrapperClassId(kClassIdToOptimize);
+ {
+ HandleScope handles(i_isolate());
+ auto local = handle.Get(v8_isolate());
+ local->SetAlignedPointerInInternalField(0, &handle);
+ }
+ },
+ [this]() { YoungGC(); }, SurvivalMode::kDies);
+}
+
+} // namespace v8::internal
diff --git a/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc
index eec4069ad1..2c069ebdfd 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc
@@ -217,11 +217,11 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnConstruction) {
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
auto ref =
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
USE(ref);
- EXPECT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -238,9 +238,10 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnHeapReset) {
auto ref = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref->Reset(v8_isolate(), local);
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -257,9 +258,10 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnStackReset) {
v8::TracedReference<v8::Object> ref;
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref.Reset(v8_isolate(), local);
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -278,10 +280,11 @@ TEST_F(TracedReferenceTest, WriteBarrierOnHeapCopy) {
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -300,10 +303,11 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnStackCopy) {
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -322,10 +326,11 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnHeapMove) {
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -344,10 +349,11 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnStackMove) {
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
index 384eacdff3..3934eb8b00 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
@@ -10,7 +10,6 @@
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/internal/api-constants.h"
#include "include/cppgc/persistent.h"
-#include "include/cppgc/platform.h"
#include "include/cppgc/testing.h"
#include "include/libplatform/libplatform.h"
#include "include/v8-context.h"
@@ -19,7 +18,6 @@
#include "include/v8-object.h"
#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
-#include "src/base/platform/time.h"
#include "src/common/globals.h"
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -28,8 +26,7 @@
#include "test/unittests/heap/cppgc-js/unified-heap-utils.h"
#include "test/unittests/heap/heap-utils.h"
-namespace v8 {
-namespace internal {
+namespace v8::internal {
namespace {
@@ -61,13 +58,11 @@ TEST_F(UnifiedHeapTest, OnlyGC) { CollectGarbageWithEmbedderStack(); }
TEST_F(UnifiedHeapTest, FindingV8ToBlinkReference) {
v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
auto* wrappable_object =
cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
- v8::Local<v8::Object> api_object =
- WrapperHelper::CreateWrapper(context, &wrappable_type, wrappable_object);
+ v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), &wrappable_type, wrappable_object);
Wrappable::destructor_callcount = 0;
EXPECT_FALSE(api_object.IsEmpty());
EXPECT_EQ(0u, Wrappable::destructor_callcount);
@@ -80,12 +75,11 @@ TEST_F(UnifiedHeapTest, FindingV8ToBlinkReference) {
TEST_F(UnifiedHeapTest, WriteBarrierV8ToCppReference) {
if (!v8_flags.incremental_marking) return;
+
v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
void* wrappable = cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
- v8::Local<v8::Object> api_object =
- WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), nullptr, nullptr);
Wrappable::destructor_callcount = 0;
WrapperHelper::ResetWrappableConnection(api_object);
SimulateIncrementalMarking();
@@ -105,8 +99,6 @@ class Unreferenced : public cppgc::GarbageCollected<Unreferenced> {
TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
auto* unreferenced = cppgc::MakeGarbageCollected<Unreferenced>(
allocation_handle(),
cppgc::AdditionalBytes(cppgc::internal::api_constants::kMB));
@@ -134,8 +126,6 @@ TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
TEST_F(UnifiedHeapTest, TracedReferenceRetainsFromStack) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
TracedReference<v8::Object> holder;
{
v8::HandleScope inner_handle_scope(v8_isolate());
@@ -211,8 +201,7 @@ TEST_F(UnifiedHeapDetachedTest, StandaloneTestingHeap) {
heap.FinalizeGarbageCollection(cppgc::EmbedderStackState::kNoHeapPointers);
}
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal
namespace cppgc {
@@ -225,8 +214,7 @@ constexpr size_t CustomSpaceForTest::kSpaceIndex;
} // namespace cppgc
-namespace v8 {
-namespace internal {
+namespace v8::internal {
namespace {
@@ -267,8 +255,7 @@ class GCed final : public cppgc::GarbageCollected<GCed> {
};
} // namespace
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal
namespace cppgc {
template <>
@@ -278,8 +265,7 @@ struct SpaceTrait<v8::internal::GCed> {
} // namespace cppgc
-namespace v8 {
-namespace internal {
+namespace v8::internal {
namespace {
@@ -359,8 +345,6 @@ class InConstructionObjectReferringToGlobalHandle final
InConstructionObjectReferringToGlobalHandle(Heap* heap,
v8::Local<v8::Object> wrapper)
: wrapper_(reinterpret_cast<v8::Isolate*>(heap->isolate()), wrapper) {
- ScanStackModeScopeForTesting no_stack_scanning(heap,
- Heap::ScanStackMode::kNone);
heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
}
@@ -377,8 +361,6 @@ class InConstructionObjectReferringToGlobalHandle final
TEST_F(UnifiedHeapTest, InConstructionObjectReferringToGlobalHandle) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
{
v8::HandleScope inner_handle_scope(v8_isolate());
auto local = v8::Object::New(v8_isolate());
@@ -410,8 +392,6 @@ class ResetReferenceInDestructorObject final
TEST_F(UnifiedHeapTest, ResetReferenceInDestructor) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
{
v8::HandleScope inner_handle_scope(v8_isolate());
auto local = v8::Object::New(v8_isolate());
@@ -422,5 +402,308 @@ TEST_F(UnifiedHeapTest, ResetReferenceInDestructor) {
CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
}
-} // namespace internal
-} // namespace v8
+TEST_F(UnifiedHeapTest, OnStackReferencesAreTemporary) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::Global<v8::Object> observer;
+ {
+ v8::TracedReference<v8::Value> stack_ref;
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), nullptr, nullptr);
+ stack_ref.Reset(v8_isolate(), api_object);
+ observer.Reset(v8_isolate(), api_object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<Isolate*>(v8_isolate())->heap());
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ }
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+TEST_F(UnifiedHeapTest, TracedReferenceOnStack) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_ref;
+ {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), nullptr, nullptr);
+ stack_ref.Reset(v8_isolate(), object);
+ observer.Reset(v8_isolate(), object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC();
+ EXPECT_FALSE(observer.IsEmpty());
+}
+
+namespace {
+
+enum class Operation {
+ kCopy,
+ kMove,
+};
+
+template <typename T>
+V8_NOINLINE void PerformOperation(Operation op, T* target, T* source) {
+ switch (op) {
+ case Operation::kMove:
+ *target = std::move(*source);
+ break;
+ case Operation::kCopy:
+ *target = *source;
+ source->Reset();
+ break;
+ }
+}
+
+enum class TargetHandling {
+ kNonInitialized,
+ kInitializedYoungGen,
+ kInitializedOldGen
+};
+
+class GCedWithHeapRef final : public cppgc::GarbageCollected<GCedWithHeapRef> {
+ public:
+ v8::TracedReference<v8::Value> heap_handle;
+
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(heap_handle); }
+};
+
+V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::CppHeap* cpp_heap = v8_isolate->GetCppHeap();
+ cppgc::Persistent<GCedWithHeapRef> cpp_heap_obj =
+ cppgc::MakeGarbageCollected<GCedWithHeapRef>(
+ cpp_heap->GetAllocationHandle());
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(
+ IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!v8_flags.single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ FullGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ cpp_heap_obj->heap_handle.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ stack_handle.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &cpp_heap_obj->heap_handle, &stack_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ cpp_heap_obj.Clear();
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<i::Isolate*>(v8_isolate)->heap());
+ FullGC(v8_isolate);
+ }
+ ASSERT_TRUE(observer.IsEmpty());
+}
+
+V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::CppHeap* cpp_heap = v8_isolate->GetCppHeap();
+ cppgc::Persistent<GCedWithHeapRef> cpp_heap_obj =
+ cppgc::MakeGarbageCollected<GCedWithHeapRef>(
+ cpp_heap->GetAllocationHandle());
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(
+ IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!v8_flags.single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ FullGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ stack_handle.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ cpp_heap_obj->heap_handle.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &stack_handle, &cpp_heap_obj->heap_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ stack_handle.Reset();
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<i::Isolate*>(v8_isolate)->heap());
+ FullGC(v8_isolate);
+ }
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle1;
+ v8::TracedReference<v8::Value> stack_handle2;
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(
+ IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!v8_flags.single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ FullGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ stack_handle2.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ stack_handle1.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &stack_handle2, &stack_handle1);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ stack_handle2.Reset();
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<i::Isolate*>(v8_isolate)->heap());
+ FullGC(v8_isolate);
+ }
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+} // namespace
+
+TEST_F(UnifiedHeapTest, TracedReferenceMove) {
+ ManualGCScope manual_gc(i_isolate());
+ StackToHeapTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kNonInitialized);
+ StackToHeapTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kNonInitialized);
+ HeapToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kNonInitialized);
+ StackToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+}
+
+TEST_F(UnifiedHeapTest, TracedReferenceCopy) {
+ ManualGCScope manual_gc(i_isolate());
+ StackToHeapTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ StackToHeapTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ HeapToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ StackToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+}
+
+TEST_F(UnifiedHeapTest, TracingInEphemerons) {
+ // Tests that wrappers that are part of ephemerons are traced.
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
+ Wrappable::destructor_callcount = 0;
+
+ v8::Local<v8::Object> key =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ Handle<JSWeakMap> weak_map = i_isolate()->factory()->NewJSWeakMap();
+ {
+ v8::HandleScope inner_scope(v8_isolate());
+ // C++ object that should be traced through ephemeron value.
+ auto* wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> value = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), &wrappable_type, wrappable_object);
+ EXPECT_FALSE(value.IsEmpty());
+ Handle<JSObject> js_key =
+ handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate());
+ Handle<JSReceiver> js_value = v8::Utils::OpenHandle(*value);
+ int32_t hash = js_key->GetOrCreateHash(i_isolate()).value();
+ JSWeakCollection::Set(weak_map, js_key, js_value, hash);
+ }
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(Wrappable::destructor_callcount, 0u);
+}
+
+TEST_F(UnifiedHeapTest, TracedReferenceHandlesDoNotLeak) {
+ // TracedReference handles are not cleared by the destructor of the embedder
+ // object. To avoid leaks we need to mark these handles during GC.
+ // This test checks that unmarked handles do not leak.
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ v8::TracedReference<v8::Value> ref;
+ ref.Reset(v8_isolate(), v8::Undefined(v8_isolate()));
+ auto* traced_handles = i_isolate()->traced_handles();
+ const size_t initial_count = traced_handles->used_node_count();
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ const size_t final_count = traced_handles->used_node_count();
+ EXPECT_EQ(initial_count, final_count + 1);
+}
+
+} // namespace v8::internal
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
index 6bbded7795..22f91068d3 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
@@ -11,6 +11,7 @@
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/heap.h"
#include "src/objects/objects-inl.h"
+#include "test/unittests/heap/heap-utils.h"
namespace v8 {
namespace internal {
@@ -24,6 +25,9 @@ UnifiedHeapTest::UnifiedHeapTest(
V8::GetCurrentPlatform(),
CppHeapCreateParams{std::move(custom_spaces),
WrapperHelper::DefaultWrapperDescriptor()})) {
+ // --stress-incremental-marking may have started an incremental GC at this
+ // point already.
+ FinalizeGCIfRunning(isolate());
isolate()->heap()->AttachCppHeap(cpp_heap_.get());
}
@@ -49,6 +53,27 @@ void UnifiedHeapTest::CollectGarbageWithoutEmbedderStack(
}
}
+void UnifiedHeapTest::CollectYoungGarbageWithEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type) {
+ EmbedderStackStateScope stack_scope(
+ heap(), EmbedderStackStateScope::kExplicitInvocation,
+ StackState::kMayContainHeapPointers);
+ CollectGarbage(NEW_SPACE);
+ if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
+ cpp_heap().AsBase().sweeper().FinishIfRunning();
+ }
+}
+void UnifiedHeapTest::CollectYoungGarbageWithoutEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type) {
+ EmbedderStackStateScope stack_scope(
+ heap(), EmbedderStackStateScope::kExplicitInvocation,
+ StackState::kNoHeapPointers);
+ CollectGarbage(NEW_SPACE);
+ if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
+ cpp_heap().AsBase().sweeper().FinishIfRunning();
+ }
+}
+
CppHeap& UnifiedHeapTest::cpp_heap() const {
return *CppHeap::From(isolate()->heap()->cpp_heap());
}
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h
index 21be7a07ae..7c212194d4 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h
@@ -18,7 +18,7 @@ namespace internal {
class CppHeap;
-class UnifiedHeapTest : public TestWithHeapInternals {
+class UnifiedHeapTest : public TestWithHeapInternalsAndContext {
public:
UnifiedHeapTest();
explicit UnifiedHeapTest(
@@ -31,6 +31,13 @@ class UnifiedHeapTest : public TestWithHeapInternals {
cppgc::Heap::SweepingType sweeping_type =
cppgc::Heap::SweepingType::kAtomic);
+ void CollectYoungGarbageWithEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type =
+ cppgc::Heap::SweepingType::kAtomic);
+ void CollectYoungGarbageWithoutEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type =
+ cppgc::Heap::SweepingType::kAtomic);
+
CppHeap& cpp_heap() const;
cppgc::AllocationHandle& allocation_handle();
diff --git a/deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc
new file mode 100644
index 0000000000..d5388cbf3d
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc
@@ -0,0 +1,401 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(CPPGC_YOUNG_GENERATION)
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/persistent.h"
+#include "include/cppgc/testing.h"
+#include "include/v8-context.h"
+#include "include/v8-cppgc.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-traced-handle.h"
+#include "src/api/api-inl.h"
+#include "src/common/globals.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/objects/objects-inl.h"
+#include "test/common/flag-utils.h"
+#include "test/unittests/heap/cppgc-js/unified-heap-utils.h"
+#include "test/unittests/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool IsHeapObjectYoung(void* obj) {
+ return cppgc::internal::HeapObjectHeader::FromObject(obj).IsYoung();
+}
+
+bool IsHeapObjectOld(void* obj) { return !IsHeapObjectYoung(obj); }
+
+class Wrappable final : public cppgc::GarbageCollected<Wrappable> {
+ public:
+ static size_t destructor_callcount;
+
+ Wrappable() = default;
+ Wrappable(v8::Isolate* isolate, v8::Local<v8::Object> local)
+ : wrapper_(isolate, local) {}
+
+ Wrappable(const Wrappable&) = default;
+ Wrappable(Wrappable&&) = default;
+
+ Wrappable& operator=(const Wrappable&) = default;
+ Wrappable& operator=(Wrappable&&) = default;
+
+ ~Wrappable() { destructor_callcount++; }
+
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(wrapper_); }
+
+ void SetWrapper(v8::Isolate* isolate, v8::Local<v8::Object> wrapper) {
+ wrapper_.Reset(isolate, wrapper);
+ }
+
+ TracedReference<v8::Object>& wrapper() { return wrapper_; }
+
+ private:
+ TracedReference<v8::Object> wrapper_;
+};
+
+size_t Wrappable::destructor_callcount = 0;
+
+class MinorMCEnabler {
+ public:
+ MinorMCEnabler()
+ : minor_mc_(&v8_flags.minor_mc, true),
+ cppgc_young_generation_(&v8_flags.cppgc_young_generation, true) {}
+
+ private:
+ FlagScope<bool> minor_mc_;
+ FlagScope<bool> cppgc_young_generation_;
+};
+
+class YoungWrapperCollector : public RootVisitor {
+ public:
+ using YoungWrappers = std::set<Address>;
+
+ void VisitRootPointers(Root root, const char*, FullObjectSlot start,
+ FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
+ all_young_wrappers_.insert(*p.location());
+ }
+ }
+
+ YoungWrappers get_wrappers() { return std::move(all_young_wrappers_); }
+
+ private:
+ YoungWrappers all_young_wrappers_;
+};
+
+class ExpectCppGCToV8GenerationalBarrierToFire {
+ public:
+ ExpectCppGCToV8GenerationalBarrierToFire(
+ v8::Isolate& isolate, std::initializer_list<Address> expected_wrappers)
+ : isolate_(reinterpret_cast<Isolate&>(isolate)),
+ expected_wrappers_(expected_wrappers) {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ young_wrappers_before_ = visitor.get_wrappers();
+
+ std::vector<Address> diff;
+ std::set_intersection(young_wrappers_before_.begin(),
+ young_wrappers_before_.end(),
+ expected_wrappers_.begin(), expected_wrappers_.end(),
+ std::back_inserter(diff));
+ EXPECT_TRUE(diff.empty());
+ }
+
+ ~ExpectCppGCToV8GenerationalBarrierToFire() {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ const auto young_wrappers_after = visitor.get_wrappers();
+ EXPECT_GE(young_wrappers_after.size(), young_wrappers_before_.size());
+
+ EXPECT_TRUE(
+ std::includes(young_wrappers_after.begin(), young_wrappers_after.end(),
+ expected_wrappers_.begin(), expected_wrappers_.end()));
+ EXPECT_EQ(expected_wrappers_.size(),
+ young_wrappers_after.size() - young_wrappers_before_.size());
+ }
+
+ private:
+ Isolate& isolate_;
+ YoungWrapperCollector::YoungWrappers expected_wrappers_;
+ YoungWrapperCollector::YoungWrappers young_wrappers_before_;
+};
+
+class ExpectCppGCToV8NoGenerationalBarrier {
+ public:
+ explicit ExpectCppGCToV8NoGenerationalBarrier(v8::Isolate& isolate)
+ : isolate_(reinterpret_cast<Isolate&>(isolate)) {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ young_wrappers_before_ = visitor.get_wrappers();
+ }
+
+ ~ExpectCppGCToV8NoGenerationalBarrier() {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ const auto young_wrappers_after = visitor.get_wrappers();
+ EXPECT_EQ(young_wrappers_before_, young_wrappers_after);
+ }
+
+ private:
+ Isolate& isolate_;
+ YoungWrapperCollector::YoungWrappers young_wrappers_before_;
+};
+
+} // namespace
+
+class YoungUnifiedHeapTest : public MinorMCEnabler, public UnifiedHeapTest {
+ public:
+ YoungUnifiedHeapTest() {
+ // Enable young generation flag and run GC. After the first run the heap
+ // will enable minor GC.
+ CollectGarbageWithoutEmbedderStack();
+ }
+};
+
+TEST_F(YoungUnifiedHeapTest, OnlyGC) { CollectYoungGarbageWithEmbedderStack(); }
+
+TEST_F(YoungUnifiedHeapTest, CollectUnreachableCppGCObject) {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ EXPECT_FALSE(api_object.IsEmpty());
+
+ Wrappable::destructor_callcount = 0;
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(1u, Wrappable::destructor_callcount);
+}
+
+TEST_F(YoungUnifiedHeapTest, FindingV8ToCppGCReference) {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
+ auto* wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, &wrappable_type, wrappable_object);
+ EXPECT_FALSE(api_object.IsEmpty());
+
+ Wrappable::destructor_callcount = 0;
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+
+ WrapperHelper::ResetWrappableConnection(api_object);
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(1u, Wrappable::destructor_callcount);
+}
+
+TEST_F(YoungUnifiedHeapTest, FindingCppGCToV8Reference) {
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ auto* wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+ wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+
+ CollectYoungGarbageWithEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierV8ToCppGCReference) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ auto handle_api_object =
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(api_object));
+
+ EXPECT_TRUE(Heap::InYoungGeneration(*handle_api_object));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_FALSE(Heap::InYoungGeneration(*handle_api_object));
+
+ auto* wrappable = cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ uint16_t type_info = WrapperHelper::kTracedEmbedderId;
+ WrapperHelper::SetWrappableConnection(api_object, &type_info, wrappable);
+
+ Wrappable::destructor_callcount = 0;
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+}
+
+TEST_F(YoungUnifiedHeapTest,
+ GenerationalBarrierCppGCToV8NoInitializingStoreBarrier) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ auto local = v8::Object::New(v8_isolate());
+ {
+ ExpectCppGCToV8NoGenerationalBarrier expect_no_barrier(*v8_isolate());
+ auto* wrappable = cppgc::MakeGarbageCollected<Wrappable>(
+ allocation_handle(), v8_isolate(), local);
+ auto* copied_wrappable =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle(), *wrappable);
+ auto* moved_wrappable = cppgc::MakeGarbageCollected<Wrappable>(
+ allocation_handle(), std::move(*wrappable));
+ USE(moved_wrappable);
+ USE(copied_wrappable);
+ USE(wrappable);
+ }
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierCppGCToV8ReferenceReset) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::Persistent<Wrappable> wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ EXPECT_TRUE(IsHeapObjectYoung(wrappable_object.Get()));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_TRUE(IsHeapObjectOld(wrappable_object.Get()));
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+ {
+ ExpectCppGCToV8GenerationalBarrierToFire expect_barrier(
+ *v8_isolate(), {*reinterpret_cast<Address*>(*local)});
+ wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+ }
+
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierCppGCToV8ReferenceCopy) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::Persistent<Wrappable> wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ EXPECT_TRUE(IsHeapObjectYoung(wrappable_object.Get()));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_TRUE(IsHeapObjectOld(wrappable_object.Get()));
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+
+ Wrappable* another_wrappable_object = nullptr;
+ {
+ // Assign to young host and expect no barrier.
+ ExpectCppGCToV8NoGenerationalBarrier expect_no_barrier(*v8_isolate());
+ another_wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ another_wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+ {
+ // Assign to old object using TracedReference::operator= and expect
+ // the barrier to trigger.
+ ExpectCppGCToV8GenerationalBarrierToFire expect_barrier(
+ *v8_isolate(), {*reinterpret_cast<Address*>(*local)});
+ *wrappable_object = *another_wrappable_object;
+ }
+ }
+
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierCppGCToV8ReferenceMove) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::Persistent<Wrappable> wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ EXPECT_TRUE(IsHeapObjectYoung(wrappable_object.Get()));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_TRUE(IsHeapObjectOld(wrappable_object.Get()));
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+
+ Wrappable* another_wrappable_object = nullptr;
+ {
+ // Assign to young host and expect no barrier.
+ ExpectCppGCToV8NoGenerationalBarrier expect_no_barrier(*v8_isolate());
+ another_wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ another_wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+ {
+ // Assign to old object using TracedReference::operator= and expect
+ // the barrier to trigger.
+ ExpectCppGCToV8GenerationalBarrierToFire expect_barrier(
+ *v8_isolate(), {*reinterpret_cast<Address*>(*local)});
+ *wrappable_object = std::move(*another_wrappable_object);
+ }
+ }
+
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(CPPGC_YOUNG_GENERATION)
diff --git a/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc b/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
index 9a18c49a2c..3c878411ca 100644
--- a/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
@@ -183,38 +183,60 @@ TEST_F(CppgcAllocationTest, LargeDoubleWordAlignedAllocation) {
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromUnaligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
- auto* padding_object =
- MakeGarbageCollected<CustomPadding<kWord>>(GetAllocationHandle());
// The address from which the next object can be allocated, i.e. the end of
- // |padding_object|, should not be properly aligned.
- ASSERT_EQ(kWord, (reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object)) &
- kAlignmentMask);
+ // |padding_object|, should not be double-word aligned. Allocate extra objects
+ // to ensure padding in case payload start is 16-byte aligned.
+ using PaddingObject = CustomPadding<kDoubleWord>;
+ static_assert(((sizeof(HeapObjectHeader) + sizeof(PaddingObject)) %
+ kDoubleWord) == kWord);
+
+ void* padding_object = nullptr;
+ if (NormalPage::PayloadSize() % kDoubleWord == 0) {
+ padding_object = MakeGarbageCollected<PaddingObject>(GetAllocationHandle());
+ ASSERT_EQ(kWord, (reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject)) &
+ kAlignmentMask);
+ }
+
auto* aligned_object =
MakeGarbageCollected<AlignedCustomPadding<16>>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(aligned_object) & kAlignmentMask);
- // Test only yielded a reliable result if objects are adjacent to each other.
- ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object) + sizeof(HeapObjectHeader),
- reinterpret_cast<uintptr_t>(aligned_object));
+ if (padding_object) {
+ // Test only yielded a reliable result if objects are adjacent to each
+ // other.
+ ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject) + sizeof(HeapObjectHeader),
+ reinterpret_cast<uintptr_t>(aligned_object));
+ }
}
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromAligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
- auto* padding_object =
- MakeGarbageCollected<CustomPadding<16>>(GetAllocationHandle());
// The address from which the next object can be allocated, i.e. the end of
- // |padding_object|, should be properly aligned.
- ASSERT_EQ(0u, (reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object)) &
- kAlignmentMask);
+ // |padding_object|, should be double-word aligned. Allocate extra objects to
+ // ensure padding in case payload start is 8-byte aligned.
+ using PaddingObject = CustomPadding<kDoubleWord>;
+ static_assert(((sizeof(HeapObjectHeader) + sizeof(PaddingObject)) %
+ kDoubleWord) == kWord);
+
+ void* padding_object = nullptr;
+ if (NormalPage::PayloadSize() % kDoubleWord == kWord) {
+ padding_object = MakeGarbageCollected<PaddingObject>(GetAllocationHandle());
+ ASSERT_EQ(0u, (reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject)) &
+ kAlignmentMask);
+ }
+
auto* aligned_object =
MakeGarbageCollected<AlignedCustomPadding<16>>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(aligned_object) & kAlignmentMask);
- // Test only yielded a reliable result if objects are adjacent to each other.
- ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object) + 2 * sizeof(HeapObjectHeader),
- reinterpret_cast<uintptr_t>(aligned_object));
+ if (padding_object) {
+ // Test only yielded a reliable result if objects are adjacent to each
+ // other.
+ ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject) + 2 * sizeof(HeapObjectHeader),
+ reinterpret_cast<uintptr_t>(aligned_object));
+ }
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
index d2ea739016..77dd67c2b3 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
@@ -56,6 +56,7 @@ class NonFinalizable : public GarbageCollected<NonFinalizable<Size>> {
private:
char array_[Size];
+ int padding_to_make_size_the_same_as_finalizible_;
};
using NormalNonFinalizable = NonFinalizable<32>;
@@ -355,5 +356,46 @@ TEST_F(ConcurrentSweeperTest, IncrementalSweeping) {
FinishSweeping();
}
+TEST_F(ConcurrentSweeperTest, SweepOnAllocationReturnEmptyPage) {
+ PreciseGC();
+
+ // First, allocate the full page of finalizable objects.
+ const size_t objects_to_allocated =
+ NormalPage::PayloadSize() /
+ (sizeof(HeapObjectHeader) + sizeof(NormalFinalizable));
+ auto* first_obj =
+ MakeGarbageCollected<NormalFinalizable>(GetAllocationHandle());
+ auto* finalizable_page =
+ NormalPage::FromInnerAddress(&HeapBase::From(GetHeapHandle()), first_obj);
+ for (size_t i = 1; i < objects_to_allocated; ++i) {
+ MakeGarbageCollected<NormalFinalizable>(GetAllocationHandle());
+ }
+
+ // Then, allocate a new unfinalizable object on a fresh page. We do that so
+ // that the sweeper on allocation doesn't allocate a new page.
+ auto* non_finalizable =
+ MakeGarbageCollected<NormalNonFinalizable>(GetAllocationHandle());
+ auto* non_finalizable_page = NormalPage::FromInnerAddress(
+ &HeapBase::From(GetHeapHandle()), non_finalizable);
+ ASSERT_NE(finalizable_page, non_finalizable_page);
+
+ // Start the GC without sweeping.
+ static constexpr GCConfig config = {
+ CollectionType::kMajor, StackState::kNoHeapPointers,
+ GCConfig::MarkingType::kAtomic,
+ GCConfig::SweepingType::kIncrementalAndConcurrent};
+ Heap::From(GetHeap())->CollectGarbage(config);
+
+ WaitForConcurrentSweeping();
+
+ // Allocate and sweep.
+ auto* allocated_after_sweeping =
+ MakeGarbageCollected<NormalFinalizable>(GetAllocationHandle());
+ // Check that the empty page of finalizable objects was returned.
+ EXPECT_EQ(finalizable_page,
+ NormalPage::FromInnerAddress(&HeapBase::From(GetHeapHandle()),
+ allocated_after_sweeping));
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc b/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
index 8f4bb9fb75..69fb2ce7a0 100644
--- a/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
@@ -50,7 +50,7 @@ TEST(LoggingTest, ConstexprContext) {
}
#endif
-#if DEBUG && !defined(OFFICIAL_BUILD)
+#if DEBUG && !defined(OFFICIAL_BUILD) && GTEST_HAS_DEATH_TEST
TEST(LoggingTest, Message) {
using ::testing::ContainsRegex;
EXPECT_DEATH_IF_SUPPORTED(CPPGC_DCHECK(5 == 7),
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
index 39909ab7bc..739105eee8 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
@@ -23,9 +23,9 @@ class MarkingVerifierTest : public testing::TestWithHeap {
V8_NOINLINE void VerifyMarking(HeapBase& heap, StackState stack_state,
size_t expected_marked_bytes) {
Heap::From(GetHeap())->object_allocator().ResetLinearAllocationBuffers();
+ Heap::From(GetHeap())->stack()->SetMarkerToCurrentStackPosition();
MarkingVerifier verifier(heap, CollectionType::kMajor);
- verifier.Run(stack_state, v8::base::Stack::GetCurrentStackPosition(),
- expected_marked_bytes);
+ verifier.Run(stack_state, expected_marked_bytes);
}
};
diff --git a/deps/v8/test/unittests/heap/cppgc/member-unittest.cc b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
index b6dd973c49..03cf1383fa 100644
--- a/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
@@ -65,10 +65,12 @@ struct CustomWriteBarrierPolicy {
static void InitializingBarrier(const void* slot, const void* value) {
++InitializingWriteBarriersTriggered;
}
+ template <WriteBarrierSlotType>
static void AssigningBarrier(const void* slot, const void* value) {
++AssigningWriteBarriersTriggered;
}
- static void AssigningBarrier(const void* slot, MemberStorage) {
+ template <WriteBarrierSlotType>
+ static void AssigningBarrier(const void* slot, DefaultMemberStorage) {
++AssigningWriteBarriersTriggered;
}
};
diff --git a/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
index 1625a3a586..fd8fcd8d54 100644
--- a/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
@@ -6,6 +6,7 @@
#include "include/cppgc/allocation.h"
#include "src/base/macros.h"
+#include "src/base/page-allocator.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/page-memory.h"
@@ -18,164 +19,171 @@ namespace internal {
namespace {
-bool IsEmpty(const ObjectStartBitmap& bitmap) {
- size_t count = 0;
- bitmap.Iterate([&count](Address) { count++; });
- return count == 0;
-}
-
-// Abstraction for objects that hides ObjectStartBitmap::kGranularity and
-// the base address as getting either of it wrong will result in failed DCHECKs.
-class Object {
+class PageWithBitmap final {
public:
- static Address kBaseOffset;
+ PageWithBitmap()
+ : base_(allocator_.AllocatePages(
+ nullptr, kPageSize, kPageSize,
+ v8::base::PageAllocator::Permission::kReadWrite)),
+ bitmap_(new(base_) ObjectStartBitmap) {}
+
+ PageWithBitmap(const PageWithBitmap&) = delete;
+ PageWithBitmap& operator=(const PageWithBitmap&) = delete;
+
+ ~PageWithBitmap() { allocator_.FreePages(base_, kPageSize); }
+
+ ObjectStartBitmap& bitmap() const { return *bitmap_; }
+
+ void* base() const { return base_; }
+ size_t size() const { return kPageSize; }
+
+ v8::base::PageAllocator allocator_;
+ void* base_;
+ ObjectStartBitmap* bitmap_;
+};
- explicit Object(size_t number) : number_(number) {
- const size_t max_entries = ObjectStartBitmap::MaxEntries();
- EXPECT_GE(max_entries, number_);
+class ObjectStartBitmapTest : public ::testing::Test {
+ protected:
+ void AllocateObject(size_t object_position) {
+ bitmap().SetBit(ObjectAddress(object_position));
}
- Address address() const {
- return kBaseOffset + ObjectStartBitmap::Granularity() * number_;
+ void FreeObject(size_t object_position) {
+ bitmap().ClearBit(ObjectAddress(object_position));
}
- HeapObjectHeader* header() const {
- return reinterpret_cast<HeapObjectHeader*>(address());
+ bool CheckObjectAllocated(size_t object_position) {
+ return bitmap().CheckBit(ObjectAddress(object_position));
}
- // Allow implicitly converting Object to Address.
- operator Address() const { return address(); }
+ Address ObjectAddress(size_t pos) const {
+ return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(page.base()) +
+ pos * ObjectStartBitmap::Granularity());
+ }
+
+ HeapObjectHeader* ObjectHeader(size_t pos) const {
+ return reinterpret_cast<HeapObjectHeader*>(ObjectAddress(pos));
+ }
+
+ ObjectStartBitmap& bitmap() const { return page.bitmap(); }
+
+ bool IsEmpty() const {
+ size_t count = 0;
+ bitmap().Iterate([&count](Address) { count++; });
+ return count == 0;
+ }
private:
- const size_t number_;
+ PageWithBitmap page;
};
-Address Object::kBaseOffset = reinterpret_cast<Address>(0x4000);
-
} // namespace
-TEST(ObjectStartBitmapTest, MoreThanZeroEntriesPossible) {
+TEST_F(ObjectStartBitmapTest, MoreThanZeroEntriesPossible) {
const size_t max_entries = ObjectStartBitmap::MaxEntries();
EXPECT_LT(0u, max_entries);
}
-TEST(ObjectStartBitmapTest, InitialEmpty) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- EXPECT_TRUE(IsEmpty(bitmap));
-}
+TEST_F(ObjectStartBitmapTest, InitialEmpty) { EXPECT_TRUE(IsEmpty()); }
-TEST(ObjectStartBitmapTest, SetBitImpliesNonEmpty) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- bitmap.SetBit(Object(0));
- EXPECT_FALSE(IsEmpty(bitmap));
+TEST_F(ObjectStartBitmapTest, SetBitImpliesNonEmpty) {
+ AllocateObject(0);
+ EXPECT_FALSE(IsEmpty());
}
-TEST(ObjectStartBitmapTest, SetBitCheckBit) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(7);
- bitmap.SetBit(object);
- EXPECT_TRUE(bitmap.CheckBit(object));
+TEST_F(ObjectStartBitmapTest, SetBitCheckBit) {
+ constexpr size_t object_num = 7;
+ AllocateObject(object_num);
+ EXPECT_TRUE(CheckObjectAllocated(object_num));
}
-TEST(ObjectStartBitmapTest, SetBitClearbitCheckBit) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(77);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_FALSE(bitmap.CheckBit(object));
+TEST_F(ObjectStartBitmapTest, SetBitClearbitCheckBit) {
+ constexpr size_t object_num = 77;
+ AllocateObject(object_num);
+ FreeObject(object_num);
+ EXPECT_FALSE(CheckObjectAllocated(object_num));
}
-TEST(ObjectStartBitmapTest, SetBitClearBitImpliesEmpty) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(123);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_TRUE(IsEmpty(bitmap));
+TEST_F(ObjectStartBitmapTest, SetBitClearBitImpliesEmpty) {
+ constexpr size_t object_num = 123;
+ AllocateObject(object_num);
+ FreeObject(object_num);
+ EXPECT_TRUE(IsEmpty());
}
-TEST(ObjectStartBitmapTest, AdjacentObjectsAtBegin) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object0(0);
- Object object1(1);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(Object(3)));
+TEST_F(ObjectStartBitmapTest, AdjacentObjectsAtBegin) {
+ AllocateObject(0);
+ AllocateObject(1);
+ EXPECT_FALSE(CheckObjectAllocated(3));
size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
+ bitmap().Iterate([&count, this](Address current) {
if (count == 0) {
- EXPECT_EQ(object0.address(), current);
+ EXPECT_EQ(ObjectAddress(0), current);
} else if (count == 1) {
- EXPECT_EQ(object1.address(), current);
+ EXPECT_EQ(ObjectAddress(1), current);
}
count++;
});
EXPECT_EQ(2u, count);
}
-TEST(ObjectStartBitmapTest, AdjacentObjectsAtEnd) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- const size_t last_entry_index = ObjectStartBitmap::MaxEntries() - 1;
- Object object0(last_entry_index - 1);
- Object object1(last_entry_index);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(Object(last_entry_index - 2)));
+TEST_F(ObjectStartBitmapTest, AdjacentObjectsAtEnd) {
+ static constexpr size_t last_entry_index =
+ ObjectStartBitmap::MaxEntries() - 1;
+ AllocateObject(last_entry_index);
+ AllocateObject(last_entry_index - 1);
+ EXPECT_FALSE(CheckObjectAllocated(last_entry_index - 2));
size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
+ bitmap().Iterate([&count, this](Address current) {
if (count == 0) {
- EXPECT_EQ(object0.address(), current);
+ EXPECT_EQ(ObjectAddress(last_entry_index - 1), current);
} else if (count == 1) {
- EXPECT_EQ(object1.address(), current);
+ EXPECT_EQ(ObjectAddress(last_entry_index), current);
}
count++;
});
EXPECT_EQ(2u, count);
}
-TEST(ObjectStartBitmapTest, FindHeaderExact) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.header(), bitmap.FindHeader(object.address()));
+TEST_F(ObjectStartBitmapTest, FindHeaderExact) {
+ constexpr size_t object_num = 654;
+ AllocateObject(object_num);
+ EXPECT_EQ(ObjectHeader(object_num),
+ bitmap().FindHeader(ObjectAddress(object_num)));
}
-TEST(ObjectStartBitmapTest, FindHeaderApproximate) {
+TEST_F(ObjectStartBitmapTest, FindHeaderApproximate) {
static const size_t kInternalDelta = 37;
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.header(),
- bitmap.FindHeader(object.address() + kInternalDelta));
+ constexpr size_t object_num = 654;
+ AllocateObject(object_num);
+ EXPECT_EQ(ObjectHeader(object_num),
+ bitmap().FindHeader(ObjectAddress(object_num) + kInternalDelta));
}
-TEST(ObjectStartBitmapTest, FindHeaderIteratingWholeBitmap) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object_to_find(Object(0));
- Address hint_index = Object(ObjectStartBitmap::MaxEntries() - 1);
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.header(), bitmap.FindHeader(hint_index));
+TEST_F(ObjectStartBitmapTest, FindHeaderIteratingWholeBitmap) {
+ AllocateObject(0);
+ Address hint_index = ObjectAddress(ObjectStartBitmap::MaxEntries() - 1);
+ EXPECT_EQ(ObjectHeader(0), bitmap().FindHeader(hint_index));
}
-TEST(ObjectStartBitmapTest, FindHeaderNextCell) {
+TEST_F(ObjectStartBitmapTest, FindHeaderNextCell) {
// This white box test makes use of the fact that cells are of type uint8_t.
const size_t kCellSize = sizeof(uint8_t);
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object_to_find(Object(kCellSize - 1));
- Address hint = Object(kCellSize);
- bitmap.SetBit(Object(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.header(), bitmap.FindHeader(hint));
+ AllocateObject(0);
+ AllocateObject(kCellSize - 1);
+ Address hint = ObjectAddress(kCellSize);
+ EXPECT_EQ(ObjectHeader(kCellSize - 1), bitmap().FindHeader(hint));
}
-TEST(ObjectStartBitmapTest, FindHeaderSameCell) {
+TEST_F(ObjectStartBitmapTest, FindHeaderSameCell) {
// This white box test makes use of the fact that cells are of type uint8_t.
const size_t kCellSize = sizeof(uint8_t);
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object_to_find(Object(kCellSize - 1));
- bitmap.SetBit(Object(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.header(),
- bitmap.FindHeader(object_to_find.address()));
+ AllocateObject(0);
+ AllocateObject(kCellSize - 1);
+ Address hint = ObjectAddress(kCellSize);
+ EXPECT_EQ(ObjectHeader(kCellSize - 1), bitmap().FindHeader(hint));
+ EXPECT_EQ(ObjectHeader(kCellSize - 1),
+ bitmap().FindHeader(ObjectAddress(kCellSize - 1)));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/test-platform.cc b/deps/v8/test/unittests/heap/cppgc/test-platform.cc
index 5b2139a101..bcf6ada50e 100644
--- a/deps/v8/test/unittests/heap/cppgc/test-platform.cc
+++ b/deps/v8/test/unittests/heap/cppgc/test-platform.cc
@@ -24,7 +24,8 @@ std::unique_ptr<cppgc::JobHandle> TestPlatform::PostJob(
}
void TestPlatform::RunAllForegroundTasks() {
- v8::platform::PumpMessageLoop(v8_platform_.get(), kNoIsolate);
+ while (v8::platform::PumpMessageLoop(v8_platform_.get(), kNoIsolate)) {
+ }
if (GetForegroundTaskRunner()->IdleTasksEnabled()) {
v8::platform::RunIdleTasks(v8_platform_.get(), kNoIsolate,
std::numeric_limits<double>::max());
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.cc b/deps/v8/test/unittests/heap/cppgc/tests.cc
index b144908dda..77a48e9cfe 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.cc
+++ b/deps/v8/test/unittests/heap/cppgc/tests.cc
@@ -11,6 +11,7 @@
#if !CPPGC_IS_STANDALONE
#include "include/v8-initialization.h"
+#include "src/init/v8.h"
#endif // !CPPGC_IS_STANDALONE
namespace cppgc {
@@ -28,7 +29,7 @@ void TestWithPlatform::SetUpTestSuite() {
#if !CPPGC_IS_STANDALONE
// For non-standalone builds, we need to initialize V8's platform so that it
// can be looked-up by trace-event.h.
- v8::V8::InitializePlatform(platform_->GetV8Platform());
+ i::V8::InitializePlatformForTesting(platform_->GetV8Platform());
v8::V8::Initialize();
#endif // !CPPGC_IS_STANDALONE
}
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index 5a9536b048..52ca765d04 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -122,6 +122,7 @@ class TestSupportingAllocationOnly : public TestWithHeap {
TestSupportingAllocationOnly();
private:
+ CPPGC_STACK_ALLOCATED_IGNORE("permitted for test code")
subtle::NoGarbageCollectionScope no_gc_scope_;
};
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
deleted file mode 100644
index 39b9712bc6..0000000000
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ /dev/null
@@ -1,1212 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/embedder-tracing.h"
-
-#include "include/v8-function.h"
-#include "include/v8-template.h"
-#include "src/handles/global-handles.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/heap.h"
-#include "test/unittests/heap/heap-utils.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-using LocalEmbedderHeapTracerWithIsolate = TestWithHeapInternals;
-
-namespace heap {
-
-using testing::StrictMock;
-using testing::_;
-using testing::Return;
-using v8::EmbedderHeapTracer;
-using v8::internal::LocalEmbedderHeapTracer;
-
-namespace {
-
-LocalEmbedderHeapTracer::WrapperInfo CreateWrapperInfo() {
- return LocalEmbedderHeapTracer::WrapperInfo(nullptr, nullptr);
-}
-
-} // namespace
-
-START_ALLOW_USE_DEPRECATED()
-class MockEmbedderHeapTracer : public EmbedderHeapTracer {
- public:
- MOCK_METHOD(void, TracePrologue, (EmbedderHeapTracer::TraceFlags),
- (override));
- MOCK_METHOD(void, TraceEpilogue, (EmbedderHeapTracer::TraceSummary*),
- (override));
- MOCK_METHOD(void, EnterFinalPause, (EmbedderHeapTracer::EmbedderStackState),
- (override));
- MOCK_METHOD(bool, IsTracingDone, (), (override));
- MOCK_METHOD(void, RegisterV8References,
- ((const std::vector<std::pair<void*, void*> >&)), (override));
- MOCK_METHOD(bool, AdvanceTracing, (double deadline_in_ms), (override));
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-TEST(LocalEmbedderHeapTracer, InUse) {
- MockEmbedderHeapTracer mock_remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&mock_remote_tracer);
- EXPECT_TRUE(local_tracer.InUse());
-}
-
-TEST(LocalEmbedderHeapTracer, NoRemoteTracer) {
- LocalEmbedderHeapTracer local_tracer(nullptr);
- // We should be able to call all functions without a remote tracer being
- // attached.
- EXPECT_FALSE(local_tracer.InUse());
- local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags);
- local_tracer.EnterFinalPause();
- bool done = local_tracer.Trace(std::numeric_limits<double>::infinity());
- EXPECT_TRUE(done);
- local_tracer.TraceEpilogue();
-}
-
-TEST(LocalEmbedderHeapTracer, TracePrologueForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, TracePrologue(_));
- local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags);
-}
-
-TEST(LocalEmbedderHeapTracer, TracePrologueForwardsMemoryReducingFlag) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer,
- TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory));
- local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory);
-}
-
-TEST(LocalEmbedderHeapTracer, TraceEpilogueForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, TraceEpilogue(_));
- local_tracer.TraceEpilogue();
-}
-
-TEST(LocalEmbedderHeapTracer, EnterFinalPauseForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, EnterFinalPause(_));
- local_tracer.EnterFinalPause();
-}
-
-TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, IsTracingDone());
- local_tracer.IsRemoteTracingDone();
-}
-
-TEST(LocalEmbedderHeapTracer, EnterFinalPauseDefaultStackStateUnkown) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- // The default stack state is expected to be unkown.
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
- local_tracer.EnterFinalPause();
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate,
- EnterFinalPauseStackStateIsForwarded) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, TemporaryEmbedderStackState) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- // Default is unknown, see above.
- {
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- EXPECT_CALL(remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
- }
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate,
- TemporaryEmbedderStackStateRestores) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- // Default is unknown, see above.
- {
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- {
- EmbedderStackStateScope nested_scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
- local_tracer.EnterFinalPause();
- }
- EXPECT_CALL(remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
- }
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, TraceEpilogueStackStateResets) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
- EXPECT_CALL(remote_tracer, TraceEpilogue(_));
- local_tracer.TraceEpilogue();
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
- local_tracer.EnterFinalPause();
-}
-
-TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneIncludesRemote) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, IsTracingDone());
- local_tracer.IsRemoteTracingDone();
-}
-
-TEST(LocalEmbedderHeapTracer, RegisterV8ReferencesWithRemoteTracer) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- {
- LocalEmbedderHeapTracer::ProcessingScope scope(&local_tracer);
- scope.AddWrapperInfoForTesting(CreateWrapperInfo());
- EXPECT_CALL(remote_tracer, RegisterV8References(_));
- }
- EXPECT_CALL(remote_tracer, IsTracingDone()).WillOnce(Return(false));
- EXPECT_FALSE(local_tracer.IsRemoteTracingDone());
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, SetRemoteTracerSetsIsolate) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_EQ(isolate(), reinterpret_cast<Isolate*>(remote_tracer.isolate()));
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, DestructorClearsIsolate) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- {
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_EQ(isolate(), reinterpret_cast<Isolate*>(remote_tracer.isolate()));
- }
- EXPECT_EQ(nullptr, remote_tracer.isolate());
-}
-
-namespace {
-
-v8::Local<v8::Object> ConstructTraceableJSApiObject(
- v8::Local<v8::Context> context, void* first_field, void* second_field) {
- v8::EscapableHandleScope scope(context->GetIsolate());
- v8::Local<v8::FunctionTemplate> function_t =
- v8::FunctionTemplate::New(context->GetIsolate());
- v8::Local<v8::ObjectTemplate> instance_t = function_t->InstanceTemplate();
- instance_t->SetInternalFieldCount(2);
- v8::Local<v8::Function> function =
- function_t->GetFunction(context).ToLocalChecked();
- v8::Local<v8::Object> instance =
- function->NewInstance(context).ToLocalChecked();
- instance->SetAlignedPointerInInternalField(0, first_field);
- instance->SetAlignedPointerInInternalField(1, second_field);
- EXPECT_FALSE(instance.IsEmpty());
- i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
- EXPECT_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
- return scope.Escape(instance);
-}
-
-enum class TracePrologueBehavior { kNoop, kCallV8WriteBarrier };
-
-START_ALLOW_USE_DEPRECATED()
-
-class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
- public:
- TestEmbedderHeapTracer() = default;
- TestEmbedderHeapTracer(TracePrologueBehavior prologue_behavior,
- v8::Global<v8::Array> array)
- : prologue_behavior_(prologue_behavior), array_(std::move(array)) {}
-
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {
- registered_from_v8_.insert(registered_from_v8_.end(),
- embedder_fields.begin(), embedder_fields.end());
- }
-
- void AddReferenceForTracing(v8::TracedReference<v8::Value>* ref) {
- to_register_with_v8_references_.push_back(ref);
- }
-
- bool AdvanceTracing(double deadline_in_ms) final {
- for (auto ref : to_register_with_v8_references_) {
- RegisterEmbedderReference(ref->As<v8::Data>());
- }
- to_register_with_v8_references_.clear();
- return true;
- }
-
- bool IsTracingDone() final { return to_register_with_v8_references_.empty(); }
-
- void TracePrologue(EmbedderHeapTracer::TraceFlags) final {
- if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
- auto local = array_.Get(isolate());
- local
- ->Set(local->GetCreationContext().ToLocalChecked(), 0,
- v8::Object::New(isolate()))
- .Check();
- }
- }
-
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-
- bool IsRegisteredFromV8(void* first_field) const {
- for (auto pair : registered_from_v8_) {
- if (pair.first == first_field) return true;
- }
- return false;
- }
-
- void DoNotConsiderAsRootForScavenge(v8::TracedReference<v8::Value>* handle) {
- handle->SetWrapperClassId(17);
- non_root_handles_.push_back(handle);
- }
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- return handle.WrapperClassId() != 17;
- }
-
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- for (auto* non_root_handle : non_root_handles_) {
- if (*non_root_handle == handle) {
- non_root_handle->Reset();
- }
- }
- }
-
- private:
- std::vector<std::pair<void*, void*>> registered_from_v8_;
- std::vector<v8::TracedReference<v8::Value>*> to_register_with_v8_references_;
- TracePrologueBehavior prologue_behavior_ = TracePrologueBehavior::kNoop;
- v8::Global<v8::Array> array_;
- std::vector<v8::TracedReference<v8::Value>*> non_root_handles_;
-};
-
-class V8_NODISCARD TemporaryEmbedderHeapTracerScope final {
- public:
- TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
- v8::EmbedderHeapTracer* tracer)
- : isolate_(isolate) {
- isolate_->SetEmbedderHeapTracer(tracer);
- }
-
- ~TemporaryEmbedderHeapTracerScope() {
- isolate_->SetEmbedderHeapTracer(nullptr);
- }
-
- private:
- v8::Isolate* const isolate_;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-} // namespace
-
-using EmbedderTracingTest = TestWithHeapInternalsAndContext;
-
-TEST_F(EmbedderTracingTest, V8RegisterEmbedderReference) {
- // Tests that wrappers are properly registered with the embedder heap
- // tracer.
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
-
- void* first_and_second_field = reinterpret_cast<void*>(0x2);
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
- context, first_and_second_field, first_and_second_field);
- ASSERT_FALSE(api_object.IsEmpty());
- CollectGarbage(i::OLD_SPACE);
- EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field));
-}
-
-TEST_F(EmbedderTracingTest, EmbedderRegisteringV8Reference) {
- // Tests that references that are registered by the embedder heap tracer are
- // considered live by V8.
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
-
- auto handle = std::make_unique<v8::TracedReference<v8::Value>>();
- {
- v8::HandleScope inner_scope(v8_isolate());
- v8::Local<v8::Value> o =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- handle->Reset(v8_isolate(), o);
- }
- tracer.AddReferenceForTracing(handle.get());
- CollectGarbage(i::OLD_SPACE);
- EXPECT_FALSE(handle->IsEmpty());
-}
-
-TEST_F(EmbedderTracingTest, TracingInEphemerons) {
- // Tests that wrappers that are part of ephemerons are traced.
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
-
- v8::Local<v8::Object> key =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- void* first_and_second_field = reinterpret_cast<void*>(0x8);
- Handle<JSWeakMap> weak_map = i_isolate()->factory()->NewJSWeakMap();
- {
- v8::HandleScope inner_scope(v8_isolate());
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
- context, first_and_second_field, first_and_second_field);
- EXPECT_FALSE(api_object.IsEmpty());
- Handle<JSObject> js_key =
- handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate());
- Handle<JSReceiver> js_api_object = v8::Utils::OpenHandle(*api_object);
- int32_t hash = js_key->GetOrCreateHash(i_isolate()).value();
- JSWeakCollection::Set(weak_map, js_key, js_api_object, hash);
- }
- CollectGarbage(i::OLD_SPACE);
- EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field));
-}
-
-TEST_F(EmbedderTracingTest, FinalizeTracingIsNoopWhenNotMarking) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- // Finalize a potentially running garbage collection.
- CollectGarbage(OLD_SPACE);
- EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped());
-
- int gc_counter = i_isolate()->heap()->gc_count();
- tracer.FinalizeTracing();
- EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped());
- EXPECT_EQ(gc_counter, i_isolate()->heap()->gc_count());
-}
-
-TEST_F(EmbedderTracingTest, FinalizeTracingWhenMarking) {
- if (!v8_flags.incremental_marking) return;
- ManualGCScope manual_gc(i_isolate());
- Heap* heap = i_isolate()->heap();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- // Finalize a potentially running garbage collection.
- CollectGarbage(OLD_SPACE);
- if (heap->sweeping_in_progress()) {
- heap->EnsureSweepingCompleted(
- Heap::SweepingForcedFinalizationMode::kV8Only);
- }
- heap->tracer()->StopFullCycleIfNeeded();
- EXPECT_TRUE(heap->incremental_marking()->IsStopped());
-
- i::IncrementalMarking* marking = heap->incremental_marking();
- {
- IsolateSafepointScope scope(heap);
- heap->tracer()->StartCycle(
- GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector cctest", GCTracer::MarkingType::kIncremental);
- marking->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting);
- }
-
- // Sweeping is not runing so we should immediately start marking.
- EXPECT_TRUE(marking->IsMarking());
- tracer.FinalizeTracing();
- EXPECT_TRUE(marking->IsStopped());
-}
-
-namespace {
-
-void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- v8::TracedReference<v8::Object>* handle) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(v8::Object::New(isolate));
- EXPECT_FALSE(object.IsEmpty());
- *handle = v8::TracedReference<v8::Object>(isolate, object);
- EXPECT_FALSE(handle->IsEmpty());
-}
-
-template <typename T>
-void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- T* global) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(
- ConstructTraceableJSApiObject(context, nullptr, nullptr));
- EXPECT_FALSE(object.IsEmpty());
- *global = T(isolate, object);
- EXPECT_FALSE(global->IsEmpty());
-}
-
-enum class SurvivalMode { kSurvives, kDies };
-
-template <typename ModifierFunction, typename ConstructTracedReferenceFunction,
- typename GCFunction>
-void TracedReferenceTest(v8::Isolate* isolate,
- ConstructTracedReferenceFunction construct_function,
- ModifierFunction modifier_function,
- GCFunction gc_function, SurvivalMode survives) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- auto* global_handles =
- reinterpret_cast<i::Isolate*>(isolate)->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
- construct_function(isolate, context, handle.get());
- ASSERT_TRUE(IsNewObjectInCorrectGeneration(isolate, *handle));
- modifier_function(*handle);
- const size_t after_modification_count = global_handles->handles_count();
- gc_function();
- // Cannot check the handle as it is not explicitly cleared by the GC. Instead
- // check the handles count.
- CHECK_IMPLIES(survives == SurvivalMode::kSurvives,
- after_modification_count == global_handles->handles_count());
- CHECK_IMPLIES(survives == SurvivalMode::kDies,
- initial_count == global_handles->handles_count());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceReset) {
- v8::HandleScope scope(v8_isolate());
- v8::TracedReference<v8::Object> handle;
- ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &handle);
- EXPECT_FALSE(handle.IsEmpty());
- handle.Reset();
- EXPECT_TRUE(handle.IsEmpty());
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceCopyReferences) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope outer_scope(v8_isolate());
- auto* traced_handles = i_isolate()->traced_handles();
-
- const size_t initial_count = traced_handles->used_node_count();
- auto handle1 = std::make_unique<v8::TracedReference<v8::Value>>();
- {
- v8::HandleScope scope(v8_isolate());
- handle1->Reset(v8_isolate(), v8::Object::New(v8_isolate()));
- }
- auto handle2 = std::make_unique<v8::TracedReference<v8::Value>>(*handle1);
- auto handle3 = std::make_unique<v8::TracedReference<v8::Value>>();
- *handle3 = *handle2;
- EXPECT_EQ(initial_count + 3, traced_handles->used_node_count());
- EXPECT_FALSE(handle1->IsEmpty());
- EXPECT_EQ(*handle1, *handle2);
- EXPECT_EQ(*handle2, *handle3);
- {
- v8::HandleScope scope(v8_isolate());
- auto tmp = v8::Local<v8::Value>::New(v8_isolate(), *handle3);
- EXPECT_FALSE(tmp.IsEmpty());
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope stack_scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate())
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC();
- }
- EXPECT_EQ(initial_count, traced_handles->used_node_count());
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceToUnmodifiedJSObjectDiesOnFullGC) {
- // When stressing incremental marking, a write barrier may keep the object
- // alive.
- if (v8_flags.stress_incremental_marking) return;
-
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [](const TracedReference<v8::Object>&) {}, [this]() { FullGC(); },
- SurvivalMode::kDies);
-}
-
-TEST_F(
- EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSObjectDiesOnFullGCEvenWhenPointeeIsHeldAlive) {
- ManualGCScope manual_gcs(i_isolate());
- // The TracedReference itself will die as it's not found by the full GC. The
- // pointee will be kept alive through other means.
- v8::Global<v8::Object> strong_global;
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [this, &strong_global](const TracedReference<v8::Object>& handle) {
- v8::HandleScope scope(v8_isolate());
- strong_global =
- v8::Global<v8::Object>(v8_isolate(), handle.Get(v8_isolate()));
- },
- [this, &strong_global]() {
- FullGC();
- strong_global.Reset();
- },
- SurvivalMode::kDies);
-}
-
-TEST_F(EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSObjectSurvivesYoungGC) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
- SurvivalMode::kSurvives);
-}
-
-TEST_F(
- EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSObjectSurvivesYoungGCWhenExcludedFromRoots) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [&tracer](const TracedReference<v8::Object>& handle) {
- tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
- },
- [this]() { YoungGC(); }, SurvivalMode::kSurvives);
-}
-
-TEST_F(EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- TracedReferenceTest(
- v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
- [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
- SurvivalMode::kSurvives);
-}
-
-TEST_F(
- EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- TracedReferenceTest(
- v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
- [&tracer](const TracedReference<v8::Object>& handle) {
- tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
- },
- [this]() { YoungGC(); }, SurvivalMode::kDies);
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceWrapperClassId) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- v8::TracedReference<v8::Object> traced;
- ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &traced);
- EXPECT_EQ(0, traced.WrapperClassId());
- traced.SetWrapperClassId(17);
- EXPECT_EQ(17, traced.WrapperClassId());
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceHandlesMarking) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- auto live = std::make_unique<v8::TracedReference<v8::Value>>();
- auto dead = std::make_unique<v8::TracedReference<v8::Value>>();
- live->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
- dead->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
- auto* traced_handles = i_isolate()->traced_handles();
- {
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.AddReferenceForTracing(live.get());
- const size_t initial_count = traced_handles->used_node_count();
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate())
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC();
- }
- const size_t final_count = traced_handles->used_node_count();
- // Handles are not black allocated, so `dead` is immediately reclaimed.
- EXPECT_EQ(initial_count, final_count + 1);
- }
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceHandlesDoNotLeak) {
- // TracedReference handles are not cleared by the destructor of the embedder
- // object. To avoid leaks we need to mark these handles during GC.
- // This test checks that unmarked handles do not leak.
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- auto ref = std::make_unique<v8::TracedReference<v8::Value>>();
- ref->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
- auto* traced_handles = i_isolate()->traced_handles();
- const size_t initial_count = traced_handles->used_node_count();
- // We need two GCs because handles are black allocated.
- FullGC();
- FullGC();
- const size_t final_count = traced_handles->used_node_count();
- EXPECT_EQ(initial_count, final_count + 1);
-}
-
-namespace {
-
-START_ALLOW_USE_DEPRECATED()
-
-class TracedReferenceVisitor final
- : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
- public:
- ~TracedReferenceVisitor() override = default;
-
- void VisitTracedReference(const TracedReference<Value>& value) final {
- if (value.WrapperClassId() == 57) {
- count_++;
- }
- }
-
- size_t count() const { return count_; }
-
- private:
- size_t count_ = 0;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceIteration) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
- ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(),
- handle.get());
- EXPECT_FALSE(handle->IsEmpty());
- handle->SetWrapperClassId(57);
- TracedReferenceVisitor visitor;
- {
- v8::HandleScope new_scope(v8_isolate());
- tracer.IterateTracedGlobalHandles(&visitor);
- }
- EXPECT_EQ(1u, visitor.count());
-}
-
-TEST_F(EmbedderTracingTest, TracePrologueCallingIntoV8WriteBarrier) {
- // Regression test: https://crbug.com/940003
- if (!v8_flags.incremental_marking) return;
- ManualGCScope manual_gc(isolate());
- v8::HandleScope scope(v8_isolate());
- v8::Global<v8::Array> global;
- {
- v8::HandleScope new_scope(v8_isolate());
- auto local = v8::Array::New(v8_isolate(), 10);
- global.Reset(v8_isolate(), local);
- }
- TestEmbedderHeapTracer tracer(TracePrologueBehavior::kCallV8WriteBarrier,
- std::move(global));
- TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- SimulateIncrementalMarking();
- // Finish GC to avoid removing the tracer while GC is running which may end up
- // in an infinite loop because of unprocessed objects.
- FullGC();
-}
-
-TEST_F(EmbedderTracingTest, BasicTracedReference) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- auto* traced_handles = i_isolate()->traced_handles();
-
- const size_t initial_count = traced_handles->used_node_count();
- char* memory = new char[sizeof(v8::TracedReference<v8::Value>)];
- auto* traced = new (memory) v8::TracedReference<v8::Value>();
- {
- v8::HandleScope new_scope(v8_isolate());
- v8::Local<v8::Value> object(ConstructTraceableJSApiObject(
- v8_isolate()->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(traced->IsEmpty());
- *traced = v8::TracedReference<v8::Value>(v8_isolate(), object);
- EXPECT_FALSE(traced->IsEmpty());
- EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
- }
- traced->~TracedReference<v8::Value>();
- EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate())
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC();
- }
- EXPECT_EQ(initial_count, traced_handles->used_node_count());
- delete[] memory;
-}
-
-namespace {
-
-START_ALLOW_USE_DEPRECATED()
-
-class EmptyEmbedderHeapTracer : public v8::EmbedderHeapTracer {
- public:
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
-
- bool AdvanceTracing(double deadline_in_ms) final { return true; }
- bool IsTracingDone() final { return true; }
- void TracePrologue(EmbedderHeapTracer::TraceFlags) final {}
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-// EmbedderHeapTracer that can optimize Scavenger handling when used with
-// TracedReference.
-class EmbedderHeapTracerNoDestructorNonTracingClearing final
- : public EmptyEmbedderHeapTracer {
- public:
- explicit EmbedderHeapTracerNoDestructorNonTracingClearing(
- uint16_t class_id_to_optimize)
- : class_id_to_optimize_(class_id_to_optimize) {}
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- return handle.WrapperClassId() != class_id_to_optimize_;
- }
-
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- if (handle.WrapperClassId() != class_id_to_optimize_) return;
-
- // Convention (for test): Objects that are optimized have their first field
- // set as a back pointer.
- BasicTracedReference<v8::Value>* original_handle =
- reinterpret_cast<BasicTracedReference<v8::Value>*>(
- v8::Object::GetAlignedPointerFromInternalField(
- handle.As<v8::Object>(), 0));
- original_handle->Reset();
- }
-
- private:
- uint16_t class_id_to_optimize_;
-};
-
-template <typename T>
-void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
- uint16_t optimized_class_id,
- T* optimized_handle,
- T* non_optimized_handle) {
- v8::HandleScope scope(isolate);
-
- v8::Local<v8::Object> optimized_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), optimized_handle, nullptr));
- EXPECT_TRUE(optimized_handle->IsEmpty());
- *optimized_handle = T(isolate, optimized_object);
- EXPECT_FALSE(optimized_handle->IsEmpty());
- optimized_handle->SetWrapperClassId(optimized_class_id);
-
- v8::Local<v8::Object> non_optimized_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(non_optimized_handle->IsEmpty());
- *non_optimized_handle = T(isolate, non_optimized_object);
- EXPECT_FALSE(non_optimized_handle->IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceNoDestructorReclaimedOnScavenge) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- constexpr uint16_t kClassIdToOptimize = 23;
- EmbedderHeapTracerNoDestructorNonTracingClearing tracer(kClassIdToOptimize);
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- auto* traced_handles = i_isolate()->traced_handles();
-
- const size_t initial_count = traced_handles->used_node_count();
- auto* optimized_handle = new v8::TracedReference<v8::Value>();
- auto* non_optimized_handle = new v8::TracedReference<v8::Value>();
- SetupOptimizedAndNonOptimizedHandle(v8_isolate(), kClassIdToOptimize,
- optimized_handle, non_optimized_handle);
- EXPECT_EQ(initial_count + 2, traced_handles->used_node_count());
- YoungGC();
- EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
- EXPECT_TRUE(optimized_handle->IsEmpty());
- delete optimized_handle;
- EXPECT_FALSE(non_optimized_handle->IsEmpty());
- non_optimized_handle->Reset();
- delete non_optimized_handle;
- EXPECT_EQ(initial_count, traced_handles->used_node_count());
-}
-
-namespace {
-
-template <typename T>
-V8_NOINLINE void OnStackTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer) {
- v8::Global<v8::Object> observer;
- T stack_ref;
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- stack_ref.Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceOnStack) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- OnStackTest<v8::TracedReference<v8::Value>>(v8_isolate(), &tracer);
-}
-
-namespace {
-
-enum class Operation {
- kCopy,
- kMove,
-};
-
-template <typename T>
-V8_NOINLINE void PerformOperation(Operation op, T* target, T* source) {
- switch (op) {
- case Operation::kMove:
- *target = std::move(*source);
- break;
- case Operation::kCopy:
- *target = *source;
- source->Reset();
- break;
- }
-}
-
-enum class TargetHandling {
- kNonInitialized,
- kInitializedYoungGen,
- kInitializedOldGen
-};
-
-V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle;
- v8::TracedReference<v8::Value>* heap_handle =
- new v8::TracedReference<v8::Value>();
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(
- IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!v8_flags.single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- FullGC(v8_isolate);
- EXPECT_FALSE(
- i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- heap_handle->Reset(v8_isolate, to_object);
- }
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- stack_handle.Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- tracer->AddReferenceForTracing(heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- PerformOperation(op, heap_handle, &stack_handle);
- tracer->AddReferenceForTracing(heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate)
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC(v8_isolate);
- }
- ASSERT_TRUE(observer.IsEmpty());
- delete heap_handle;
-}
-
-V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle;
- v8::TracedReference<v8::Value>* heap_handle =
- new v8::TracedReference<v8::Value>();
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(
- IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!v8_flags.single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- FullGC(v8_isolate);
- EXPECT_FALSE(
- i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- stack_handle.Reset(v8_isolate, to_object);
- }
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- heap_handle->Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- tracer->AddReferenceForTracing(heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- PerformOperation(op, &stack_handle, heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- stack_handle.Reset();
- FullGC(v8_isolate);
- EXPECT_TRUE(observer.IsEmpty());
- delete heap_handle;
-}
-
-V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle1;
- v8::TracedReference<v8::Value> stack_handle2;
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(
- IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!v8_flags.single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- FullGC(v8_isolate);
- EXPECT_FALSE(
- i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- stack_handle2.Reset(v8_isolate, to_object);
- }
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- stack_handle1.Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- PerformOperation(op, &stack_handle2, &stack_handle1);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- stack_handle2.Reset();
- FullGC(v8_isolate);
- EXPECT_TRUE(observer.IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceMove) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceCopy) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
-}
-
-namespace {
-
-V8_NOINLINE void CreateTracedReferenceInDeepStack(
- v8::Isolate* isolate, v8::Global<v8::Object>* observer) {
- v8::TracedReference<v8::Value> stack_ref;
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- stack_ref.Reset(isolate, object);
- observer->Reset(isolate, object);
- observer->SetWeak();
-}
-
-V8_NOINLINE void TracedReferenceOnStackReferencesAreTemporaryTest(
- v8::Isolate* v8_isolate, TestEmbedderHeapTracer* tracer) {
- v8::Global<v8::Object> observer;
- CreateTracedReferenceInDeepStack(v8_isolate, &observer);
- EXPECT_FALSE(observer.IsEmpty());
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate)
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC(v8_isolate);
- }
- EXPECT_TRUE(observer.IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, OnStackReferencesAreTemporary) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- TracedReferenceOnStackReferencesAreTemporaryTest(v8_isolate(), &tracer);
-}
-
-} // namespace heap
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 95857cf4f7..6865486d53 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -119,6 +119,7 @@ void StopTracing(GCTracer* tracer, GarbageCollector collector) {
} // namespace
TEST_F(GCTracerTest, AllocationThroughput) {
+ if (v8_flags.stress_incremental_marking) return;
// GCTracer::AllocationThroughputInBytesPerMillisecond ignores global memory.
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -147,6 +148,7 @@ TEST_F(GCTracerTest, AllocationThroughput) {
}
TEST_F(GCTracerTest, PerGenerationAllocationThroughput) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -184,6 +186,7 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughput) {
}
TEST_F(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -228,6 +231,7 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime) {
}
TEST_F(GCTracerTest, RegularScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -242,6 +246,7 @@ TEST_F(GCTracerTest, RegularScope) {
}
TEST_F(GCTracerTest, IncrementalScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -258,6 +263,7 @@ TEST_F(GCTracerTest, IncrementalScope) {
}
TEST_F(GCTracerTest, IncrementalMarkingDetails) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -307,6 +313,7 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
}
TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -357,6 +364,7 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
}
TEST_F(GCTracerTest, MutatorUtilization) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -393,6 +401,7 @@ TEST_F(GCTracerTest, MutatorUtilization) {
}
TEST_F(GCTracerTest, BackgroundScavengerScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
@@ -407,6 +416,7 @@ TEST_F(GCTracerTest, BackgroundScavengerScope) {
}
TEST_F(GCTracerTest, BackgroundMinorMCScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
StartTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR,
@@ -416,10 +426,6 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY,
20);
tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2);
- tracer->AddScopeSample(
- GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 30);
- tracer->AddScopeSample(
- GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 3);
StopTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
11,
@@ -427,12 +433,10 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
EXPECT_DOUBLE_EQ(
22, tracer->current_
.scopes[GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY]);
- EXPECT_DOUBLE_EQ(
- 33, tracer->current_.scopes
- [GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]);
}
TEST_F(GCTracerTest, BackgroundMajorMCScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_MARKING, 100);
@@ -479,6 +483,7 @@ class ThreadWithBackgroundScope final : public base::Thread {
};
TEST_F(GCTracerTest, MultithreadedBackgroundScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
ThreadWithBackgroundScope thread1(tracer);
ThreadWithBackgroundScope thread2(tracer);
diff --git a/deps/v8/test/unittests/heap/global-handles-unittest.cc b/deps/v8/test/unittests/heap/global-handles-unittest.cc
index a7eda52f7e..0789f678ff 100644
--- a/deps/v8/test/unittests/heap/global-handles-unittest.cc
+++ b/deps/v8/test/unittests/heap/global-handles-unittest.cc
@@ -27,6 +27,7 @@
#include "src/handles/global-handles.h"
+#include "include/v8-embedder-heap.h"
#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate.h"
@@ -47,29 +48,13 @@ struct TracedReferenceWrapper {
v8::TracedReference<v8::Object> handle;
};
-START_ALLOW_USE_DEPRECATED()
-
-// Empty v8::EmbedderHeapTracer that never keeps objects alive on Scavenge. See
-// |IsRootForNonTracingGC|.
-class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
+class NonRootingEmbedderRootsHandler final : public v8::EmbedderRootsHandler {
public:
- NonRootingEmbedderHeapTracer() = default;
-
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
- bool AdvanceTracing(double deadline_in_ms) final { return true; }
- bool IsTracingDone() final { return true; }
- void TracePrologue(TraceFlags) final {}
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
+ bool IsRoot(const v8::TracedReference<v8::Value>& handle) final {
return false;
}
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
+ void ResetRoot(const v8::TracedReference<v8::Value>& handle) final {
for (auto* wrapper : wrappers_) {
if (wrapper->handle == handle) {
wrapper->handle.Reset();
@@ -85,19 +70,18 @@ class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
std::vector<TracedReferenceWrapper*> wrappers_;
};
-END_ALLOW_USE_DEPRECATED()
-
void SimpleCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Isolate* isolate = info.GetIsolate();
info.GetReturnValue().Set(v8::Number::New(isolate, 0));
}
-struct FlagAndGlobal {
+struct FlagAndHandles {
bool flag;
v8::Global<v8::Object> handle;
+ v8::Local<v8::Object> local;
};
-void ResetHandleAndSetFlag(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ResetHandleAndSetFlag(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.GetParameter()->flag = true;
}
@@ -155,7 +139,7 @@ void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- FlagAndGlobal fp;
+ FlagAndHandles fp;
construct_function(isolate, context, &fp);
CHECK(IsNewObjectInCorrectGeneration(isolate, fp.handle));
fp.handle.SetWeak(&fp, &ResetHandleAndSetFlag,
@@ -176,15 +160,17 @@ class GlobalHandlesTest : public TestWithContext {
ModifierFunction modifier_function,
SurvivalMode survives) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- NonRootingEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ NonRootingEmbedderRootsHandler roots_handler;
+ v8_isolate()->SetEmbedderRootsHandler(&roots_handler);
auto fp = std::make_unique<TracedReferenceWrapper>();
- tracer.Register(fp.get());
+ roots_handler.Register(fp.get());
construct_function(isolate, context, fp.get());
CHECK(IsNewObjectInCorrectGeneration(isolate, fp->handle));
modifier_function(fp.get());
@@ -193,6 +179,8 @@ class GlobalHandlesTest : public TestWithContext {
// handle directly here.
CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !fp->handle.IsEmpty());
CHECK_IMPLIES(survives == SurvivalMode::kDies, fp->handle.IsEmpty());
+
+ v8_isolate()->SetEmbedderRootsHandler(nullptr);
}
};
@@ -202,6 +190,8 @@ TEST_F(GlobalHandlesTest, EternalHandles) {
Isolate* isolate = i_isolate();
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
EternalHandles* eternal_handles = isolate->eternal_handles();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ isolate->heap());
// Create a number of handles that will not be on a block boundary
const int kArrayLength = 2048 - 1;
@@ -289,10 +279,10 @@ TEST_F(GlobalHandlesTest, WeakPersistentSmi) {
v8::WeakCallbackType::kParameter);
}
-START_ALLOW_USE_DEPRECATED()
-
TEST_F(GlobalHandlesTest, PhantomHandlesWithoutCallbacks) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::Global<v8::Object> g1, g2;
{
@@ -312,9 +302,14 @@ TEST_F(GlobalHandlesTest, PhantomHandlesWithoutCallbacks) {
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSObjectDiesOnScavenge) {
if (v8_flags.single_generation) return;
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSObject<FlagAndGlobal>, [](FlagAndGlobal* fp) {},
- [this]() { CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kDies);
+ v8_isolate(), &ConstructJSObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectGarbage(i::NEW_SPACE); },
+ SurvivalMode::kDies);
}
TEST_F(GlobalHandlesTest, TracedReferenceToUnmodifiedJSObjectSurvivesScavenge) {
@@ -327,19 +322,22 @@ TEST_F(GlobalHandlesTest, TracedReferenceToUnmodifiedJSObjectSurvivesScavenge) {
}
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSObjectDiesOnMarkCompact) {
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSObject<FlagAndGlobal>, [](FlagAndGlobal* fp) {},
- [this]() { CollectAllGarbage(); }, SurvivalMode::kDies);
+ v8_isolate(), &ConstructJSObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectAllGarbage(); },
+ SurvivalMode::kDies);
}
TEST_F(GlobalHandlesTest,
WeakHandleToUnmodifiedJSObjectSurvivesMarkCompactWhenInHandle) {
WeakHandleTest(
- v8_isolate(), &ConstructJSObject<FlagAndGlobal>,
- [this](FlagAndGlobal* fp) {
- v8::Local<v8::Object> handle =
- v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
- USE(handle);
+ v8_isolate(), &ConstructJSObject<FlagAndHandles>,
+ [this](FlagAndHandles* fp) {
+ fp->local = v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
},
[this]() { CollectAllGarbage(); }, SurvivalMode::kSurvives);
}
@@ -347,9 +345,13 @@ TEST_F(GlobalHandlesTest,
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSApiObjectDiesOnScavenge) {
if (v8_flags.single_generation) return;
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [](FlagAndGlobal* fp) {}, [this]() { CollectGarbage(i::NEW_SPACE); },
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectGarbage(i::NEW_SPACE); },
SurvivalMode::kDies);
}
@@ -390,30 +392,30 @@ TEST_F(GlobalHandlesTest,
if (v8_flags.single_generation) return;
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [this](FlagAndGlobal* fp) {
- v8::Local<v8::Object> handle =
- v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
- USE(handle);
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [this](FlagAndHandles* fp) {
+ fp->local = v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
},
[this]() { CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kSurvives);
}
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSApiObjectDiesOnMarkCompact) {
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [](FlagAndGlobal* fp) {}, [this]() { CollectAllGarbage(); },
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectAllGarbage(); },
SurvivalMode::kDies);
}
TEST_F(GlobalHandlesTest,
WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [this](FlagAndGlobal* fp) {
- v8::Local<v8::Object> handle =
- v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
- USE(handle);
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [this](FlagAndHandles* fp) {
+ fp->local = v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
},
[this]() { CollectAllGarbage(); }, SurvivalMode::kSurvives);
}
@@ -466,22 +468,22 @@ TEST_F(GlobalHandlesTest,
namespace {
-void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->flag = true;
YoungGC(data.GetIsolate());
}
-void ForceScavenge1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceScavenge1(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(ForceScavenge2);
}
-void ForceMarkSweep2(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceMarkSweep2(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->flag = true;
FullGC(data.GetIsolate());
}
-void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(ForceMarkSweep2);
}
@@ -490,12 +492,14 @@ void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
if (v8_flags.single_generation) {
- FlagAndGlobal fp;
+ FlagAndHandles fp;
ConstructJSApiObject(isolate, context, &fp);
CHECK_IMPLIES(!v8_flags.single_generation,
!InYoungGeneration(isolate, fp.handle));
@@ -508,7 +512,7 @@ TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
}
static const int kNumberOfGCTypes = 2;
- using Callback = v8::WeakCallbackInfo<FlagAndGlobal>::Callback;
+ using Callback = v8::WeakCallbackInfo<FlagAndHandles>::Callback;
Callback gc_forcing_callback[kNumberOfGCTypes] = {&ForceScavenge1,
&ForceMarkSweep1};
@@ -519,7 +523,7 @@ TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
- FlagAndGlobal fp;
+ FlagAndHandles fp;
ConstructJSApiObject(isolate, context, &fp);
CHECK(InYoungGeneration(isolate, fp.handle));
fp.flag = false;
@@ -534,11 +538,11 @@ TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
namespace {
-void SecondPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void SecondPassCallback(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->flag = true;
}
-void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(SecondPassCallback);
}
@@ -547,10 +551,12 @@ void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
TEST_F(GlobalHandlesTest, SecondPassPhantomCallbacks) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- FlagAndGlobal fp;
+ FlagAndHandles fp;
ConstructJSApiObject(isolate, context, &fp);
fp.flag = false;
fp.handle.SetWeak(&fp, FirstPassCallback, v8::WeakCallbackType::kParameter);
diff --git a/deps/v8/test/unittests/heap/global-safepoint-unittest.cc b/deps/v8/test/unittests/heap/global-safepoint-unittest.cc
index 16cb6ea64a..024d9ac4e8 100644
--- a/deps/v8/test/unittests/heap/global-safepoint-unittest.cc
+++ b/deps/v8/test/unittests/heap/global-safepoint-unittest.cc
@@ -72,7 +72,6 @@ class InfiniteLooperThread final : public ParkingThread {
v8::Local<v8::String> source =
v8::String::NewFromUtf8(v8_isolate, "for(;;) {}").ToLocalChecked();
auto context = v8_isolate->GetCurrentContext();
- v8::Local<v8::Value> result;
v8::Local<v8::Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
@@ -125,9 +124,9 @@ TEST_F(GlobalSafepointTest, Interrupt) {
// as of FeedbackVectors, and we wouldn't be testing the interrupt check.
base::OS::Sleep(base::TimeDelta::FromMilliseconds(500));
GlobalSafepointScope global_safepoint(i_main_isolate);
- i_main_isolate->shared_heap_isolate()
+ i_main_isolate->shared_space_isolate()
->global_safepoint()
- ->IterateClientIsolates([](Isolate* client) {
+ ->IterateSharedSpaceAndClientIsolates([](Isolate* client) {
client->stack_guard()->RequestTerminateExecution();
});
}
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 45230d7c32..8f649d0412 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -151,12 +151,11 @@ TEST_F(HeapTest, HeapLayout) {
EXPECT_TRUE(IsAligned(cage_base, size_t{4} * GB));
Address code_cage_base = i_isolate()->code_cage_base();
- EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
-
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- Address isolate_root = i_isolate()->isolate_root();
- EXPECT_EQ(cage_base, isolate_root);
-#endif
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ EXPECT_TRUE(IsAligned(code_cage_base, kMinExpectedOSPageSize));
+ } else {
+ EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
+ }
// Check that all memory chunks belong this region.
base::AddressRegion heap_reservation(cage_base, size_t{4} * GB);
@@ -184,12 +183,14 @@ TEST_F(HeapTest, HeapLayout) {
namespace {
void ShrinkNewSpace(NewSpace* new_space) {
if (!v8_flags.minor_mc) {
- new_space->Shrink();
+ SemiSpaceNewSpace::From(new_space)->Shrink();
return;
}
// MinorMC shrinks the space as part of sweeping.
PagedNewSpace* paged_new_space = PagedNewSpace::From(new_space);
- GCTracer* tracer = paged_new_space->heap()->tracer();
+ Heap* heap = paged_new_space->heap();
+ heap->EnsureSweepingCompleted(Heap::SweepingForcedFinalizationMode::kV8Only);
+ GCTracer* tracer = heap->tracer();
tracer->StartObservablePause();
tracer->StartCycle(GarbageCollector::MARK_COMPACTOR,
GarbageCollectionReason::kTesting, "heap unittest",
@@ -198,7 +199,7 @@ void ShrinkNewSpace(NewSpace* new_space) {
paged_new_space->StartShrinking();
for (Page* page = paged_new_space->first_page();
page != paged_new_space->last_page() &&
- (paged_new_space->ShouldReleasePage());) {
+ (paged_new_space->ShouldReleaseEmptyPage());) {
Page* current_page = page;
page = page->next_page();
if (current_page->allocated_bytes() == 0) {
@@ -392,6 +393,7 @@ TEST_F(HeapTest, RememberedSet_InsertOnPromotingObjectToOld) {
// Promote 'arr' into old, its element is still in new, the old to new
// refs are inserted into the remembered sets during GC.
CollectGarbage(i::NEW_SPACE);
+ heap->EnsureSweepingCompleted(Heap::SweepingForcedFinalizationMode::kV8Only);
CHECK(heap->InOldSpace(*arr));
CHECK(heap->InYoungGeneration(arr->get(0)));
@@ -430,11 +432,9 @@ TEST_F(HeapTest, Regress978156) {
marking->Start(GarbageCollector::MARK_COMPACTOR,
i::GarbageCollectionReason::kTesting);
}
- MarkingState* marking_state = heap->marking_state();
// 6. Mark the filler black to access its two markbits. This triggers
// an out-of-bounds access of the marking bitmap in a bad case.
- marking_state->WhiteToGrey(filler);
- marking_state->GreyToBlack(filler);
+ heap->marking_state()->TryMarkAndAccountLiveBytes(filler);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/heap-utils.cc b/deps/v8/test/unittests/heap/heap-utils.cc
index c1a6f3cf5d..a2aa2ff653 100644
--- a/deps/v8/test/unittests/heap/heap-utils.cc
+++ b/deps/v8/test/unittests/heap/heap-utils.cc
@@ -244,7 +244,7 @@ void FillCurrentSemiSpacePage(v8::internal::NewSpace* space,
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
- DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
+ DCHECK_IMPLIES(!space->heap()->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining = GetSpaceRemainingOnCurrentSemiSpacePage(space);
if (space_remaining == 0) return;
@@ -282,5 +282,16 @@ bool IsNewObjectInCorrectGeneration(HeapObject object) {
: i::Heap::InYoungGeneration(object);
}
+void FinalizeGCIfRunning(Isolate* isolate) {
+ if (!isolate) {
+ return;
+ }
+ auto* heap = isolate->heap();
+ if (heap->incremental_marking()->IsMarking()) {
+ heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+ heap->CompleteSweepingFull();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-utils.h b/deps/v8/test/unittests/heap/heap-utils.h
index 0d9dddba87..633652617d 100644
--- a/deps/v8/test/unittests/heap/heap-utils.h
+++ b/deps/v8/test/unittests/heap/heap-utils.h
@@ -33,22 +33,18 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
WithHeapInternals& operator=(const WithHeapInternals&) = delete;
void CollectGarbage(AllocationSpace space) {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(space, GarbageCollectionReason::kTesting);
}
void FullGC() {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
}
void YoungGC() {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
}
void CollectAllAvailableGarbage() {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kTesting);
}
@@ -92,7 +88,6 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
}
void GcAndSweep(AllocationSpace space) {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(space, GarbageCollectionReason::kTesting);
if (heap()->sweeping_in_progress()) {
IsolateSafepointScope scope(heap());
@@ -102,26 +97,6 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
}
};
-START_ALLOW_USE_DEPRECATED()
-
-class V8_NODISCARD TemporaryEmbedderHeapTracerScope {
- public:
- TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
- v8::EmbedderHeapTracer* tracer)
- : isolate_(isolate) {
- isolate_->SetEmbedderHeapTracer(tracer);
- }
-
- ~TemporaryEmbedderHeapTracerScope() {
- isolate_->SetEmbedderHeapTracer(nullptr);
- }
-
- private:
- v8::Isolate* const isolate_;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
using TestWithHeapInternals = //
WithHeapInternals< //
WithInternalIsolateMixin< //
@@ -136,19 +111,16 @@ using TestWithHeapInternalsAndContext = //
inline void CollectGarbage(AllocationSpace space, v8::Isolate* isolate) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
}
inline void FullGC(v8::Isolate* isolate) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectAllGarbage(Heap::kNoGCFlags, GarbageCollectionReason::kTesting);
}
inline void YoungGC(v8::Isolate* isolate) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
}
@@ -170,6 +142,8 @@ bool IsNewObjectInCorrectGeneration(v8::Isolate* isolate,
return IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*tmp));
}
+void FinalizeGCIfRunning(Isolate* isolate);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index 5330028295..cbbdeb1a6c 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -159,13 +159,9 @@ class BackgroundThreadForGCEpilogue final : public v8::base::Thread {
TEST_F(LocalHeapTest, GCEpilogue) {
Heap* heap = i_isolate()->heap();
- LocalHeap lh(heap, ThreadKind::kMain);
- lh.SetUpMainThreadForTesting();
+ LocalHeap* lh = heap->main_thread_local_heap();
std::array<GCEpilogue, 3> epilogue;
- {
- UnparkedScope unparked(&lh);
- lh.AddGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
- }
+ lh->AddGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
auto thread1 =
std::make_unique<BackgroundThreadForGCEpilogue>(heap, true, &epilogue[1]);
auto thread2 = std::make_unique<BackgroundThreadForGCEpilogue>(heap, false,
@@ -174,18 +170,12 @@ TEST_F(LocalHeapTest, GCEpilogue) {
CHECK(thread2->Start());
epilogue[1].WaitUntilStarted();
epilogue[2].WaitUntilStarted();
- {
- UnparkedScope scope(&lh);
- PreciseCollectAllGarbage(i_isolate());
- }
+ PreciseCollectAllGarbage(i_isolate());
epilogue[1].RequestStop();
epilogue[2].RequestStop();
thread1->Join();
thread2->Join();
- {
- UnparkedScope unparked(&lh);
- lh.RemoveGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
- }
+ lh->RemoveGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
for (auto& e : epilogue) {
CHECK(e.WasInvoked());
}
diff --git a/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc b/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc
index 6801f1441f..c8823e9644 100644
--- a/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/heap/conservative-stack-visitor.h"
#include "src/heap/gc-tracer.h"
-#include "src/heap/mark-compact.h"
#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/test-utils.h"
@@ -15,7 +15,18 @@ namespace {
constexpr int Tagged = kTaggedSize;
constexpr int FullCell = Bitmap::kBitsPerCell * Tagged;
-class InnerPointerResolutionTest : public TestWithIsolate {
+template <typename TMixin>
+class WithInnerPointerResolutionMixin : public TMixin {
+ public:
+ Address ResolveInnerPointer(Address maybe_inner_ptr) {
+ return ConservativeStackVisitor::FindBasePtrForMarking(
+ maybe_inner_ptr, this->isolate()->heap()->memory_allocator(),
+ GarbageCollector::MARK_COMPACTOR);
+ }
+};
+
+class InnerPointerResolutionTest
+ : public WithInnerPointerResolutionMixin<TestWithIsolate> {
public:
struct ObjectRequest {
int size; // The only required field.
@@ -48,7 +59,6 @@ class InnerPointerResolutionTest : public TestWithIsolate {
Heap* heap() { return isolate()->heap(); }
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
- MarkCompactCollector* collector() { return heap()->mark_compact_collector(); }
// Create, free and lookup pages, normal or large.
@@ -205,12 +215,12 @@ class InnerPointerResolutionTest : public TestWithIsolate {
case ObjectRequest::WHITE:
break;
case ObjectRequest::GREY:
- heap()->marking_state()->WhiteToGrey(
+ heap()->marking_state()->TryMark(
HeapObject::FromAddress(object.address));
break;
case ObjectRequest::BLACK:
DCHECK_LE(2 * Tagged, object.size);
- heap()->marking_state()->WhiteToBlack(
+ heap()->marking_state()->TryMarkAndAccountLiveBytes(
HeapObject::FromAddress(object.address));
break;
case ObjectRequest::BLACK_AREA: {
@@ -227,8 +237,7 @@ class InnerPointerResolutionTest : public TestWithIsolate {
void RunTestInside(const ObjectRequest& object, int offset) {
DCHECK_LE(0, offset);
DCHECK_GT(object.size, offset);
- Address base_ptr =
- collector()->FindBasePtrForMarking(object.address + offset);
+ Address base_ptr = ResolveInnerPointer(object.address + offset);
bool should_return_null =
!IsPageAlive(object.page_id) || (object.type == ObjectRequest::FREE) ||
(object.type == ObjectRequest::REGULAR &&
@@ -243,7 +252,7 @@ class InnerPointerResolutionTest : public TestWithIsolate {
// This must be called with an address not contained in any created object.
void RunTestOutside(Address ptr) {
- Address base_ptr = collector()->FindBasePtrForMarking(ptr);
+ Address base_ptr = ResolveInnerPointer(ptr);
EXPECT_EQ(kNullAddress, base_ptr);
}
@@ -600,16 +609,20 @@ TEST_F(InnerPointerResolutionTest, FreePages) {
TestAll();
}
-using InnerPointerResolutionHeapTest = TestWithHeapInternalsAndContext;
+using InnerPointerResolutionHeapTest =
+ WithInnerPointerResolutionMixin<TestWithHeapInternalsAndContext>;
TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
ManualGCScope manual_gc_scope(isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap());
v8_flags.page_promotion = false;
Persistent<v8::FixedArray> weak1, weak2, strong;
Address inner_ptr1, inner_ptr2, inner_ptr3, outside_ptr1, outside_ptr2;
Page *page1, *page2;
+ auto allocator = heap()->memory_allocator();
+
{
PtrComprCageBase cage_base{isolate()};
HandleScope scope(isolate());
@@ -636,13 +649,18 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
EXPECT_TRUE(v8_flags.minor_mc || page2->IsToPage());
EXPECT_NE(page1, page2);
- // Allocate one more object, small enough that it fits in page2.
- // Keep a strong reference to this object.
+ // Allocate one more object, small enough that it fits in either page1 or
+ // page2. Keep a strong reference to this object.
auto h3 = factory()->NewFixedArray(16, AllocationType::kYoung);
strong.Reset(v8_isolate(), Utils::FixedArrayToLocal(h3));
auto obj3 = h3->GetHeapObject();
- EXPECT_EQ(page2, Page::FromHeapObject(obj3));
- EXPECT_EQ(obj3.address(), obj2.address() + obj2.Size(cage_base));
+ auto page3 = Page::FromHeapObject(obj3);
+ EXPECT_TRUE(page3 == page1 || page3 == page2);
+ if (page3 == page1) {
+ EXPECT_EQ(obj3.address(), obj1.address() + obj1.Size(cage_base));
+ } else {
+ EXPECT_EQ(obj3.address(), obj2.address() + obj2.Size(cage_base));
+ }
// Keep inner pointers to all objects.
inner_ptr1 = obj1.address() + 17 * Tagged;
@@ -654,28 +672,22 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
outside_ptr2 = page2->area_end() - 2 * Tagged;
EXPECT_LE(obj1.address() + obj1.Size(cage_base), outside_ptr1);
EXPECT_LE(obj2.address() + obj2.Size(cage_base), outside_ptr2);
- EXPECT_LE(obj3.address() + obj3.Size(cage_base), outside_ptr2);
+ if (page3 == page1) {
+ EXPECT_LE(obj3.address() + obj3.Size(cage_base), outside_ptr1);
+ } else {
+ EXPECT_LE(obj3.address() + obj3.Size(cage_base), outside_ptr2);
+ }
// Ensure the young generation space is iterable.
heap()->new_space()->MakeLinearAllocationAreaIterable();
// Inner pointer resolution should work now, finding the objects in the
// case of the inner pointers.
- EXPECT_EQ(
- obj1.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
- EXPECT_EQ(
- obj2.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
- EXPECT_EQ(
- obj3.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr3));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
+ EXPECT_EQ(obj1.address(), ResolveInnerPointer(inner_ptr1));
+ EXPECT_EQ(obj2.address(), ResolveInnerPointer(inner_ptr2));
+ EXPECT_EQ(obj3.address(), ResolveInnerPointer(inner_ptr3));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr2));
// Start incremental marking and mark the third object.
i::IncrementalMarking* marking = heap()->incremental_marking();
@@ -688,21 +700,18 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
i::GarbageCollectionReason::kTesting);
}
MarkingState* marking_state = heap()->marking_state();
- marking_state->WhiteToGrey(obj3);
- marking_state->GreyToBlack(obj3);
+ marking_state->TryMarkAndAccountLiveBytes(obj3);
}
// Garbage collection should reclaim the two large objects with the weak
// references, but not the small one with the strong reference.
- CollectGarbage(NEW_SPACE);
+ GcAndSweep(NEW_SPACE);
EXPECT_TRUE(weak1.IsEmpty());
EXPECT_TRUE(weak2.IsEmpty());
EXPECT_TRUE(!strong.IsEmpty());
// The two pages should still be around, in the new space.
- EXPECT_EQ(page1, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr1));
- EXPECT_EQ(page2, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr2));
+ EXPECT_EQ(page1, allocator->LookupChunkContainingAddress(inner_ptr1));
+ EXPECT_EQ(page2, allocator->LookupChunkContainingAddress(inner_ptr2));
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_EQ(AllocationSpace::NEW_SPACE, page2->owner_identity());
EXPECT_TRUE(v8_flags.minor_mc || page1->IsFromPage());
@@ -711,61 +720,39 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
// Inner pointer resolution should work with pointers to unused young
// generation pages (in case of the scavenger, the two pages are now in the
// "from" semispace). There are no objects to be found.
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr3));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr3));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr2));
// Garbage collection once more.
- CollectGarbage(NEW_SPACE);
+ GcAndSweep(NEW_SPACE);
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_EQ(AllocationSpace::NEW_SPACE, page2->owner_identity());
// The two pages should still be around, in the new space.
- EXPECT_EQ(page1, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr1));
- EXPECT_EQ(page2, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr2));
+ EXPECT_EQ(page1, allocator->LookupChunkContainingAddress(inner_ptr1));
+ EXPECT_EQ(page2, allocator->LookupChunkContainingAddress(inner_ptr2));
EXPECT_TRUE(v8_flags.minor_mc || page1->IsToPage());
EXPECT_TRUE(v8_flags.minor_mc || page2->IsToPage());
// Inner pointer resolution should work with pointers to unused young
// generation pages (in case of the scavenger, the two pages are now in the
// "to" semispace). There are no objects to be found.
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr3));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr3));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr2));
}
TEST_F(InnerPointerResolutionHeapTest, UnusedLargeYoungPage) {
ManualGCScope manual_gc_scope(isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap());
v8_flags.page_promotion = false;
Global<v8::FixedArray> weak;
Address inner_ptr;
- Page* page;
{
PtrComprCageBase cage_base{isolate()};
@@ -780,7 +767,7 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedLargeYoungPage) {
weak.Reset(v8_isolate(), Utils::FixedArrayToLocal(h));
weak.SetWeak();
auto obj = h->GetHeapObject();
- page = Page::FromHeapObject(obj);
+ auto page = Page::FromHeapObject(obj);
EXPECT_TRUE(page->IsLargePage());
EXPECT_EQ(AllocationSpace::NEW_LO_SPACE, page->owner_identity());
EXPECT_TRUE(v8_flags.minor_mc || page->IsToPage());
@@ -789,26 +776,25 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedLargeYoungPage) {
inner_ptr = obj.address() + 17 * Tagged;
// Inner pointer resolution should work now, finding the object.
- EXPECT_EQ(
- obj.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(obj.address(), ResolveInnerPointer(inner_ptr));
}
// Garbage collection should reclaim the object.
- CollectGarbage(NEW_SPACE);
+ GcAndSweep(NEW_SPACE);
EXPECT_TRUE(weak.IsEmpty());
// Inner pointer resolution should work with a pointer to an unused young
// generation large page. There is no object to be found.
- EXPECT_EQ(kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr));
}
TEST_F(InnerPointerResolutionHeapTest, RegularPageAfterEnd) {
+ auto allocator = heap()->memory_allocator();
+
// Allocate a regular page.
OldSpace* old_space = heap()->old_space();
DCHECK_NE(nullptr, old_space);
- auto* page = heap()->memory_allocator()->AllocatePage(
+ auto* page = allocator->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, old_space, NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
@@ -825,21 +811,21 @@ TEST_F(InnerPointerResolutionHeapTest, RegularPageAfterEnd) {
// Inner pointer resolution after the end of the page area should work.
Address inner_ptr = page->area_end() + Tagged;
EXPECT_FALSE(Page::IsAlignedToPageSize(inner_ptr));
- EXPECT_EQ(kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr));
// Deallocate the page.
- heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
- page);
+ allocator->Free(MemoryAllocator::FreeMode::kImmediately, page);
}
TEST_F(InnerPointerResolutionHeapTest, LargePageAfterEnd) {
+ auto allocator = heap()->memory_allocator();
+
// Allocate a large page.
OldLargeObjectSpace* lo_space = heap()->lo_space();
EXPECT_NE(nullptr, lo_space);
const int size = 3 * (1 << kPageSizeBits) / 2;
- LargePage* page = heap()->memory_allocator()->AllocateLargePage(
- lo_space, size, NOT_EXECUTABLE);
+ LargePage* page =
+ allocator->AllocateLargePage(lo_space, size, NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
// The end of the page area is expected not to coincide with the beginning of
@@ -849,12 +835,10 @@ TEST_F(InnerPointerResolutionHeapTest, LargePageAfterEnd) {
// Inner pointer resolution after the end of the pare area should work.
Address inner_ptr = page->area_end() + Tagged;
EXPECT_FALSE(Page::IsAlignedToPageSize(inner_ptr));
- EXPECT_EQ(kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr));
// Deallocate the page.
- heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
- page);
+ allocator->Free(MemoryAllocator::FreeMode::kImmediately, page);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/marking-worklist-unittest.cc b/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
index 0bd53c2893..45bbdad4be 100644
--- a/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
@@ -22,7 +22,10 @@ TEST_F(MarkingWorklistTest, PushPop) {
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.Push(pushed_object);
HeapObject popped_object;
EXPECT_TRUE(worklists.Pop(&popped_object));
@@ -33,30 +36,25 @@ TEST_F(MarkingWorklistTest, PushPopOnHold) {
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.PushOnHold(pushed_object);
HeapObject popped_object;
EXPECT_TRUE(worklists.PopOnHold(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
-TEST_F(MarkingWorklistTest, PushPopEmbedder) {
- MarkingWorklists holder;
- MarkingWorklists::Local worklists(&holder);
- HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
- worklists.PushWrapper(pushed_object);
- HeapObject popped_object;
- EXPECT_TRUE(worklists.PopWrapper(&popped_object));
- EXPECT_EQ(popped_object, pushed_object);
-}
-
TEST_F(MarkingWorklistTest, MergeOnHold) {
MarkingWorklists holder;
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worker_worklists.PushOnHold(pushed_object);
worker_worklists.Publish();
main_worklists.MergeOnHold();
@@ -70,7 +68,10 @@ TEST_F(MarkingWorklistTest, ShareWorkIfGlobalPoolIsEmpty) {
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
main_worklists.Push(pushed_object);
main_worklists.ShareWork();
HeapObject popped_object;
@@ -84,7 +85,10 @@ TEST_F(MarkingWorklistTest, ContextWorklistsPushPop) {
holder.CreateContextWorklists({context});
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.SwitchToContext(context);
worklists.Push(pushed_object);
worklists.SwitchToSharedForTesting();
@@ -100,7 +104,10 @@ TEST_F(MarkingWorklistTest, ContextWorklistsEmpty) {
holder.CreateContextWorklists({context});
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.SwitchToContext(context);
worklists.Push(pushed_object);
EXPECT_FALSE(worklists.IsEmpty());
@@ -121,7 +128,10 @@ TEST_F(MarkingWorklistTest, ContextWorklistCrossTask) {
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
main_worklists.SwitchToContext(context1);
main_worklists.Push(pushed_object);
main_worklists.ShareWork();
diff --git a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
index d0986bb81f..16023efadd 100644
--- a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
+++ b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
@@ -11,26 +11,6 @@
namespace v8 {
namespace internal {
-MemoryReducer::State DoneState() {
- return MemoryReducer::State(MemoryReducer::kDone, 0, 0.0, 1.0, 0);
-}
-
-MemoryReducer::State DoneState(size_t committed_memory) {
- return MemoryReducer::State(MemoryReducer::kDone, 0, 0.0, 1.0,
- committed_memory);
-}
-
-MemoryReducer::State WaitState(int started_gcs, double next_gc_start_ms) {
- return MemoryReducer::State(MemoryReducer::kWait, started_gcs,
- next_gc_start_ms, 1.0, 0);
-}
-
-
-MemoryReducer::State RunState(int started_gcs, double next_gc_start_ms) {
- return MemoryReducer::State(MemoryReducer::kRun, started_gcs,
- next_gc_start_ms, 1.0, 0);
-}
-
MemoryReducer::Event MarkCompactEvent(double time_ms,
bool next_gc_likely_to_collect_more,
size_t committed_memory) {
@@ -88,248 +68,241 @@ MemoryReducer::Event PossibleGarbageEvent(double time_ms) {
TEST(MemoryReducer, FromDoneToDone) {
- MemoryReducer::State state0(DoneState()), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateDone(1.0, 0)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
state1 = MemoryReducer::Step(
state0,
MarkCompactEventGarbageLeft(0, MemoryReducer::kCommittedMemoryDelta - 1));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
- state0 = DoneState(1000 * MB);
+ state0 = MemoryReducer::State::CreateDone(1, 1000 * MB);
state1 = MemoryReducer::Step(
state0, MarkCompactEventGarbageLeft(
0, static_cast<size_t>(
1000 * MB * MemoryReducer::kCommittedMemoryFactor) -
1));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
}
TEST(MemoryReducer, FromDoneToWait) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(DoneState()), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateDone(1.0, 0)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(
state0,
MarkCompactEventGarbageLeft(2, MemoryReducer::kCommittedMemoryDelta));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms + 2,
- state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(2, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(2, state1.last_gc_time_ms());
state1 = MemoryReducer::Step(
state0,
MarkCompactEventNoGarbageLeft(2, MemoryReducer::kCommittedMemoryDelta));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms + 2,
- state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(2, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(2, state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms, state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms,
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
- state0 = DoneState(1000 * MB);
+ state0 = MemoryReducer::State::CreateDone(1, 1000 * MB);
state1 = MemoryReducer::Step(
state0, MarkCompactEventGarbageLeft(
2, static_cast<size_t>(
1000 * MB * MemoryReducer::kCommittedMemoryFactor)));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms + 2,
- state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(2, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(2, state1.last_gc_time_ms());
}
TEST(MemoryReducer, FromWaitToWait) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(WaitState(2, 1000.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateWait(2, 1000.0, 1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(state0.next_gc_start_ms(), state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(
- state0, TimerEventLowAllocationRate(state0.next_gc_start_ms - 1));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ state0, TimerEventLowAllocationRate(state0.next_gc_start_ms() - 1));
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(state0.next_gc_start_ms(), state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
+
+ state0 = MemoryReducer::State::CreateWait(2, 1000.0, 0);
- state0.last_gc_time_ms = 0;
state1 = MemoryReducer::Step(
state0,
TimerEventHighAllocationRate(MemoryReducer::kWatchdogDelayMs + 1));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(MemoryReducer::kWatchdogDelayMs + 1 + MemoryReducer::kLongDelayMs,
- state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
- state0.last_gc_time_ms = 1;
+ state0 = MemoryReducer::State::CreateWait(2, 1000.0, 1);
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
}
TEST(MemoryReducer, FromWaitToRun) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(WaitState(0, 1000.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateWait(0, 1000.0, 1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(
- state0, TimerEventLowAllocationRate(state0.next_gc_start_ms + 1));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs + 1, state1.started_gcs);
+ state0, TimerEventLowAllocationRate(state0.next_gc_start_ms() + 1));
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs() + 1, state1.started_gcs());
state1 = MemoryReducer::Step(
state0,
TimerEventHighAllocationRate(MemoryReducer::kWatchdogDelayMs + 2));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs + 1, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs() + 1, state1.started_gcs());
}
TEST(MemoryReducer, FromWaitToDone) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(WaitState(2, 0.0)), state1(DoneState());
-
- state0.started_gcs = MemoryReducer::kMaxNumberOfGCs;
+ MemoryReducer::State state0(
+ MemoryReducer::State::CreateWait(MemoryReducer::kMaxNumberOfGCs, 0.0, 1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
}
TEST(MemoryReducer, FromRunToRun) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(RunState(1, 0.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateRun(1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
}
TEST(MemoryReducer, FromRunToDone) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateRun(2)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
- state0.started_gcs = MemoryReducer::kMaxNumberOfGCs;
+ state0 = MemoryReducer::State::CreateRun(MemoryReducer::kMaxNumberOfGCs);
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(state1.started_gcs(), state1.started_gcs());
}
TEST(MemoryReducer, FromRunToWait) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateRun(2)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
- state0.started_gcs = 1;
+ state0 = MemoryReducer::State::CreateRun(1);
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc b/deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc
deleted file mode 100644
index d08c9d04d0..0000000000
--- a/deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/object-start-bitmap.h"
-
-#include "src/base/macros.h"
-#include "src/heap/object-start-bitmap-inl.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-bool IsEmpty(const ObjectStartBitmap& bitmap) {
- size_t count = 0;
- bitmap.Iterate([&count](Address) { count++; });
- return count == 0;
-}
-
-// Abstraction for objects that hides ObjectStartBitmap::kGranularity and
-// the base address as getting either of it wrong will result in failed DCHECKs.
-class TestObject {
- public:
- static PtrComprCageBase kCageBase;
- static Address kBaseOffset;
-
- explicit TestObject(size_t number) : number_(number) {
- const size_t max_entries = ObjectStartBitmap::MaxEntries();
- EXPECT_GE(max_entries, number_);
- }
-
- Address base_ptr() const {
- return kBaseOffset + ObjectStartBitmap::Granularity() * number_;
- }
-
- // Allow implicitly converting Object to Address.
- operator Address() const { return base_ptr(); }
-
- private:
- const size_t number_;
-};
-
-PtrComprCageBase TestObject::kCageBase{0xca6e00000000ul};
-Address TestObject::kBaseOffset = reinterpret_cast<Address>(0x4000ul);
-
-} // namespace
-
-TEST(V8ObjectStartBitmapTest, MoreThanZeroEntriesPossible) {
- const size_t max_entries = ObjectStartBitmap::MaxEntries();
- EXPECT_LT(0u, max_entries);
-}
-
-TEST(V8ObjectStartBitmapTest, InitialEmpty) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- EXPECT_TRUE(IsEmpty(bitmap));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitImpliesNonEmpty) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- bitmap.SetBit(TestObject(0));
- EXPECT_FALSE(IsEmpty(bitmap));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitCheckBit) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(7);
- bitmap.SetBit(object);
- EXPECT_TRUE(bitmap.CheckBit(object));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitClearbitCheckBit) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(77);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_FALSE(bitmap.CheckBit(object));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitClearBitImpliesEmpty) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(123);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_TRUE(IsEmpty(bitmap));
-}
-
-TEST(V8ObjectStartBitmapTest, AdjacentObjectsAtBegin) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object0(0);
- TestObject object1(1);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(TestObject(3)));
- size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
- if (count == 0) {
- EXPECT_EQ(object0.base_ptr(), current);
- } else if (count == 1) {
- EXPECT_EQ(object1.base_ptr(), current);
- }
- count++;
- });
- EXPECT_EQ(2u, count);
-}
-
-TEST(V8ObjectStartBitmapTest, AdjacentObjectsAtEnd) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- const size_t last_entry_index = ObjectStartBitmap::MaxEntries() - 1;
- TestObject object0(last_entry_index - 1);
- TestObject object1(last_entry_index);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(TestObject(last_entry_index - 2)));
- size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
- if (count == 0) {
- EXPECT_EQ(object0.base_ptr(), current);
- } else if (count == 1) {
- EXPECT_EQ(object1.base_ptr(), current);
- }
- count++;
- });
- EXPECT_EQ(2u, count);
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrExact) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.base_ptr(), bitmap.FindBasePtrImpl(object.base_ptr()));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrApproximate) {
- const size_t kInternalDelta = 37;
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.base_ptr(),
- bitmap.FindBasePtrImpl(object.base_ptr() + kInternalDelta));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrIteratingWholeBitmap) {
- const size_t kLastWordDelta = ObjectStartBitmap::MaxEntries() - 1;
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object_to_find(0);
- bitmap.SetBit(object_to_find);
- Address hint_index = TestObject(kLastWordDelta);
- EXPECT_EQ(object_to_find.base_ptr(), bitmap.FindBasePtrImpl(hint_index));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrNextCell) {
- // This white box test makes use of the fact that cells are of type uint32_t.
- const size_t kCellSize = sizeof(uint32_t);
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object_to_find(kCellSize - 1);
- Address hint = TestObject(kCellSize);
- bitmap.SetBit(TestObject(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.base_ptr(), bitmap.FindBasePtrImpl(hint));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrSameCell) {
- // This white box test makes use of the fact that cells are of type uint32_t.
- const size_t kCellSize = sizeof(uint32_t);
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object_to_find(kCellSize - 1);
- Address hint = object_to_find;
- bitmap.SetBit(TestObject(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.base_ptr(), bitmap.FindBasePtrImpl(hint));
-}
-
-// TODO(v8:12851): If the ObjectStartBitmap implementation stays, unit tests
-// should be added to test the functionality of method FindBasePtr.
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/shared-heap-unittest.cc b/deps/v8/test/unittests/heap/shared-heap-unittest.cc
index 19b5b1eb6f..8cd597a7fe 100644
--- a/deps/v8/test/unittests/heap/shared-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/shared-heap-unittest.cc
@@ -19,32 +19,20 @@ using SharedHeapTest = TestJSSharedMemoryWithIsolate;
class SharedHeapNoClientsTest : public TestJSSharedMemoryWithPlatform {
public:
SharedHeapNoClientsTest() {
- if (v8_flags.shared_space) {
- shared_space_isolate_wrapper.emplace(kNoCounters);
- shared_isolate_ = shared_space_isolate_wrapper->i_isolate();
- } else {
- bool created;
- shared_isolate_ = Isolate::GetProcessWideSharedIsolate(&created);
- CHECK(created);
- }
+ shared_space_isolate_wrapper.emplace(kNoCounters);
+ shared_space_isolate_ = shared_space_isolate_wrapper->i_isolate();
}
- ~SharedHeapNoClientsTest() override {
- if (!v8_flags.shared_space) {
- Isolate::DeleteProcessWideSharedIsolate();
- }
-
- shared_isolate_ = nullptr;
- }
+ ~SharedHeapNoClientsTest() override { shared_space_isolate_ = nullptr; }
- v8::Isolate* shared_heap_isolate() {
- return reinterpret_cast<v8::Isolate*>(i_shared_heap_isolate());
+ v8::Isolate* shared_space_isolate() {
+ return reinterpret_cast<v8::Isolate*>(i_shared_space_isolate());
}
- Isolate* i_shared_heap_isolate() { return shared_isolate_; }
+ Isolate* i_shared_space_isolate() { return shared_space_isolate_; }
private:
- Isolate* shared_isolate_;
+ Isolate* shared_space_isolate_;
base::Optional<IsolateWrapper> shared_space_isolate_wrapper;
};
@@ -192,12 +180,7 @@ TEST_F(SharedHeapTest, ConcurrentAllocationInSharedMapSpace) {
}
TEST_F(SharedHeapNoClientsTest, SharedCollectionWithoutClients) {
- if (!v8_flags.shared_space) {
- DCHECK_NULL(i_shared_heap_isolate()->heap()->new_space());
- DCHECK_NULL(i_shared_heap_isolate()->heap()->new_lo_space());
- }
-
- ::v8::internal::CollectGarbage(OLD_SPACE, shared_heap_isolate());
+ ::v8::internal::CollectGarbage(OLD_SPACE, shared_space_isolate());
}
void AllocateInSharedHeap(int iterations = 100) {
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 07be431f97..87b9dccf80 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -128,11 +128,14 @@ TEST_F(SpacesTest, WriteBarrierInYoungGenerationFromSpace) {
TEST_F(SpacesTest, CodeRangeAddressReuse) {
CodeRangeAddressHint hint;
- const size_t kAnyBaseAlignment = 1;
+ const size_t base_alignment = MemoryChunk::kPageSize;
// Create code ranges.
- Address code_range1 = hint.GetAddressHint(100, kAnyBaseAlignment);
- Address code_range2 = hint.GetAddressHint(200, kAnyBaseAlignment);
- Address code_range3 = hint.GetAddressHint(100, kAnyBaseAlignment);
+ Address code_range1 = hint.GetAddressHint(100, base_alignment);
+ CHECK(IsAligned(code_range1, base_alignment));
+ Address code_range2 = hint.GetAddressHint(200, base_alignment);
+ CHECK(IsAligned(code_range2, base_alignment));
+ Address code_range3 = hint.GetAddressHint(100, base_alignment);
+ CHECK(IsAligned(code_range3, base_alignment));
// Since the addresses are random, we cannot check that they are different.
@@ -141,14 +144,14 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
hint.NotifyFreedCodeRange(code_range2, 200);
// The next two code ranges should reuse the freed addresses.
- Address code_range4 = hint.GetAddressHint(100, kAnyBaseAlignment);
+ Address code_range4 = hint.GetAddressHint(100, base_alignment);
EXPECT_EQ(code_range4, code_range1);
- Address code_range5 = hint.GetAddressHint(200, kAnyBaseAlignment);
+ Address code_range5 = hint.GetAddressHint(200, base_alignment);
EXPECT_EQ(code_range5, code_range2);
// Free the third code range and check address reuse.
hint.NotifyFreedCodeRange(code_range3, 100);
- Address code_range6 = hint.GetAddressHint(100, kAnyBaseAlignment);
+ Address code_range6 = hint.GetAddressHint(100, base_alignment);
EXPECT_EQ(code_range6, code_range3);
}
diff --git a/deps/v8/test/unittests/inspector/inspector-unittest.cc b/deps/v8/test/unittests/inspector/inspector-unittest.cc
index e2d390a8e0..ec70f061c9 100644
--- a/deps/v8/test/unittests/inspector/inspector-unittest.cc
+++ b/deps/v8/test/unittests/inspector/inspector-unittest.cc
@@ -212,6 +212,20 @@ TEST_F(InspectorTest, NoInterruptOnGetAssociatedData) {
CHECK(recorder.WasInvoked);
}
+class TestChannel : public V8Inspector::Channel {
+ public:
+ ~TestChannel() override = default;
+ void sendResponse(int callId,
+ std::unique_ptr<StringBuffer> message) override {
+ CHECK_EQ(callId, 1);
+ CHECK_NE(toString16(message->string()).find(expected_response_matcher_),
+ String16::kNotFound);
+ }
+ void sendNotification(std::unique_ptr<StringBuffer> message) override {}
+ void flushProtocolNotifications() override {}
+ v8_inspector::String16 expected_response_matcher_;
+};
+
TEST_F(InspectorTest, NoConsoleAPIForUntrustedClient) {
v8::Isolate* isolate = v8_isolate();
v8::HandleScope handle_scope(isolate);
@@ -222,20 +236,6 @@ TEST_F(InspectorTest, NoConsoleAPIForUntrustedClient) {
V8ContextInfo context_info(v8_context(), 1, toStringView(""));
inspector->contextCreated(context_info);
- class TestChannel : public V8Inspector::Channel {
- public:
- ~TestChannel() override = default;
- void sendResponse(int callId,
- std::unique_ptr<StringBuffer> message) override {
- CHECK_EQ(callId, 1);
- CHECK_NE(toString16(message->string()).find(expected_response_matcher_),
- String16::kNotFound);
- }
- void sendNotification(std::unique_ptr<StringBuffer> message) override {}
- void flushProtocolNotifications() override {}
- v8_inspector::String16 expected_response_matcher_;
- };
-
TestChannel channel;
const char kCommand[] = R"({
"id": 1,
@@ -258,6 +258,26 @@ TEST_F(InspectorTest, NoConsoleAPIForUntrustedClient) {
untrusted_session->dispatchProtocolMessage(toStringView(kCommand));
}
+TEST_F(InspectorTest, CanHandleMalformedCborMessage) {
+ v8::Isolate* isolate = v8_isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8_inspector::V8InspectorClient default_client;
+ std::unique_ptr<V8Inspector> inspector =
+ V8Inspector::create(isolate, &default_client);
+ V8ContextInfo context_info(v8_context(), 1, toStringView(""));
+ inspector->contextCreated(context_info);
+
+ TestChannel channel;
+ const unsigned char kCommand[] = {0xD8, 0x5A, 0x00, 0xBA, 0xDB, 0xEE, 0xF0};
+ std::unique_ptr<V8InspectorSession> trusted_session =
+ inspector->connect(1, &channel, toStringView("{}"),
+ v8_inspector::V8Inspector::kFullyTrusted);
+ channel.expected_response_matcher_ = R"("value":42)";
+ trusted_session->dispatchProtocolMessage(
+ StringView(kCommand, sizeof(kCommand)));
+}
+
TEST_F(InspectorTest, ApiCreatedTasksAreCleanedUp) {
v8::Isolate* isolate = v8_isolate();
v8::HandleScope handle_scope(isolate);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 2a0770a90c..ecd874ee5a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -151,7 +151,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.SetKeyedProperty(reg, reg, strict_keyed_store_slot.ToInt(),
LanguageMode::kStrict)
.DefineNamedOwnProperty(reg, name, define_named_own_slot.ToInt())
- .DefineKeyedOwnProperty(reg, reg, define_named_own_slot.ToInt())
+ .DefineKeyedOwnProperty(reg, reg, DefineKeyedOwnPropertyFlag::kNoFlags,
+ define_named_own_slot.ToInt())
.StoreInArrayLiteral(reg, reg, store_array_element_slot.ToInt());
// Emit Iterator-protocol operations
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden
index 606a6ad481..fe3dd9a4d6 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -233,7 +233,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 20
+bytecode array length: 21
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star0),
@@ -242,7 +242,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star2),
B(Ldar), R(0),
- /* 57 E> */ B(DefineKeyedOwnProperty), R(1), R(2), U8(1),
+ /* 57 E> */ B(DefineKeyedOwnProperty), R(1), R(2), U8(0), U8(1),
B(Ldar), R(1),
/* 61 S> */ B(Return),
]
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index bc1c08c83e..3594623033 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -23,12 +23,12 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 82
+bytecode array length: 83
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 67 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 67 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 76 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
@@ -75,15 +75,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -107,15 +107,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -139,17 +139,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 32
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -173,15 +173,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden
index 195e64c76a..5f1727548b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -24,7 +24,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 114
+bytecode array length: 112
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
@@ -43,7 +43,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(5), R(0),
@@ -64,7 +63,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(5), R(1),
@@ -127,7 +125,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 232
+bytecode array length: 229
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
@@ -153,7 +151,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(6), R(0),
@@ -191,7 +188,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
B(CreateClosure), U8(16), U8(7), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(6), R(1),
@@ -211,7 +207,6 @@ bytecodes: [
B(Mov), R(1), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
B(CreateClosure), U8(20), U8(9), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(6), R(2),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index a7cde86a77..b7bb831e61 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -18,12 +18,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 24
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 61 E> */ B(GetKeyedProperty), R(this), U8(2),
B(LdaImmutableCurrentContextSlot), U8(2),
@@ -48,17 +48,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 32
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -83,15 +83,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -116,7 +116,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 48
+bytecode array length: 49
bytecodes: [
/* 44 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
@@ -125,7 +125,7 @@ bytecodes: [
B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
B(Star1),
B(Ldar), R(0),
- B(DefineKeyedOwnProperty), R(this), R(1), U8(0),
+ B(DefineKeyedOwnProperty), R(this), R(1), U8(0), U8(0),
/* 49 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
B(Star3),
/* 61 E> */ B(CallUndefinedReceiver0), R(3), U8(2),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden
index 43e40974f9..c814f8489d 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -23,7 +23,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 104
+bytecode array length: 102
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
@@ -41,7 +41,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(5), R(0),
@@ -61,7 +60,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(5), R(1),
@@ -119,7 +117,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 199
+bytecode array length: 196
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
@@ -144,7 +142,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(6), R(0),
@@ -173,7 +170,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
B(CreateClosure), U8(14), U8(6), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(6), R(1),
@@ -192,7 +188,6 @@ bytecodes: [
B(Mov), R(1), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(6), R(2),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden
index 559afa2fa0..439c0eb00b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -27,7 +27,7 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 166
+bytecode array length: 164
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
@@ -55,11 +55,10 @@ bytecodes: [
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
B(CreateClosure), U8(6), U8(1), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(7), U8(1),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star6),
- B(CallProperty0), R(6), R(3), U8(3),
+ B(Star5),
+ B(CallProperty0), R(5), R(3), U8(3),
B(PopContext), R(2),
B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(9),
@@ -88,11 +87,10 @@ bytecodes: [
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
B(CreateClosure), U8(12), U8(4), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(7), U8(5),
B(CreateClosure), U8(13), U8(5), U8(2),
- B(Star6),
- B(CallProperty0), R(6), R(3), U8(7),
+ B(Star5),
+ B(CallProperty0), R(5), R(3), U8(7),
B(PopContext), R(2),
B(Mov), R(3), R(1),
/* 197 S> */ B(Ldar), R(0),
@@ -159,7 +157,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 298
+bytecode array length: 295
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
@@ -194,11 +192,10 @@ bytecodes: [
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(9), U8(1),
B(CreateClosure), U8(10), U8(3), U8(2),
- B(Star7),
- B(CallProperty0), R(7), R(4), U8(3),
+ B(Star6),
+ B(CallProperty0), R(6), R(4), U8(3),
B(PopContext), R(3),
B(Mov), R(4), R(0),
/* 38 E> */ B(CreateBlockContext), U8(11),
@@ -226,8 +223,8 @@ bytecodes: [
B(Star9),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(9), U8(0),
- B(Mov), R(10), R(7),
B(Mov), R(4), R(6),
+ B(Mov), R(10), R(7),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
B(Ldar), R(9),
@@ -236,11 +233,10 @@ bytecodes: [
B(Star10),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(6),
B(CreateClosure), U8(17), U8(7), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(9), U8(5),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star7),
- B(CallProperty0), R(7), R(4), U8(7),
+ B(Star6),
+ B(CallProperty0), R(6), R(4), U8(7),
B(PopContext), R(3),
B(Mov), R(4), R(1),
/* 122 E> */ B(CreateBlockContext), U8(19),
@@ -260,8 +256,8 @@ bytecodes: [
B(Star9),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(9), U8(0),
- B(Mov), R(1), R(7),
B(Mov), R(4), R(6),
+ B(Mov), R(1), R(7),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
B(Ldar), R(9),
@@ -271,11 +267,10 @@ bytecodes: [
B(Ldar), R(4),
B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(22), U8(10), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(9), U8(9),
B(CreateClosure), U8(23), U8(11), U8(2),
- B(Star7),
- B(CallProperty0), R(7), R(4), U8(11),
+ B(Star6),
+ B(CallProperty0), R(6), R(4), U8(11),
B(PopContext), R(3),
B(Mov), R(4), R(2),
/* 456 S> */ B(Ldar), R(0),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 9ff074e5bb..e8350d6b7b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -24,7 +24,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -61,13 +61,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star2),
B(LdaConstant), U8(1),
B(Star3),
@@ -99,13 +99,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -145,7 +145,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -167,7 +167,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -182,7 +182,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -216,13 +216,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -253,13 +253,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 58 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -292,13 +292,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
/* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star2),
B(LdaConstant), U8(1),
B(Star3),
@@ -327,7 +327,7 @@ bytecode array length: 19
bytecodes: [
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(0),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index dcb6b72c35..51e61dae23 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -46,6 +46,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::
if (Bytecodes::WritesAccumulator(bytecode())) {
SetAccumulator(NullConstant());
}
+ if (Bytecodes::ClobbersAccumulator(bytecode())) {
+ ClobberAccumulator(NullConstant());
+ }
if (Bytecodes::WritesImplicitRegister(bytecode())) {
StoreRegisterForShortStar(NullConstant(), IntPtrConstant(2));
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-unittest.cc
index 1052bccb54..dae9431252 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-unittest.cc
@@ -1841,8 +1841,10 @@ TEST_F(InterpreterTest, InterpreterBigIntComparisons) {
if (tester.HasFeedbackMetadata()) {
MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
- CHECK_EQ(CompareOperationFeedback::kBigInt,
- feedback->ToSmi().value());
+ // TODO(panq): Create a standalone unit test for kBigInt64.
+ CHECK(CompareOperationFeedback::kBigInt64 ==
+ feedback->ToSmi().value() ||
+ CompareOperationFeedback::kBigInt == feedback->ToSmi().value());
}
}
}
@@ -4758,11 +4760,11 @@ TEST_F(InterpreterTest, InterpreterWithNativeStack) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
CHECK(f->shared().HasBytecodeArray());
- i::CodeT code = f->shared().GetCode();
- i::Handle<i::CodeT> interpreter_entry_trampoline =
+ i::Code code = f->shared().GetCode(i_isolate());
+ i::Handle<i::Code> interpreter_entry_trampoline =
BUILTIN_CODE(i_isolate(), InterpreterEntryTrampoline);
- CHECK(code.IsCodeT());
+ CHECK(code.IsCode());
CHECK(code.is_interpreter_trampoline_builtin());
CHECK_NE(code.address(), interpreter_entry_trampoline->address());
}
@@ -4772,24 +4774,24 @@ TEST_F(InterpreterTest, InterpreterGetBytecodeHandler) {
Interpreter* interpreter = i_isolate()->interpreter();
// Test that single-width bytecode handlers deserializer correctly.
- CodeT wide_handler =
+ Code wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kSingle);
CHECK_EQ(wide_handler.builtin_id(), Builtin::kWideHandler);
- CodeT add_handler =
+ Code add_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kSingle);
CHECK_EQ(add_handler.builtin_id(), Builtin::kAddHandler);
// Test that double-width bytecode handlers deserializer correctly, including
// an illegal bytecode handler since there is no Wide.Wide handler.
- CodeT wide_wide_handler =
+ Code wide_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kDouble);
CHECK_EQ(wide_wide_handler.builtin_id(), Builtin::kIllegalHandler);
- CodeT add_wide_handler =
+ Code add_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kDouble);
CHECK_EQ(add_wide_handler.builtin_id(), Builtin::kAddWideHandler);
@@ -4982,15 +4984,15 @@ TEST_F(InterpreterTest, InterpreterCollectSourcePositions_GenerateStackTrace) {
TEST_F(InterpreterTest, InterpreterLookupNameOfBytecodeHandler) {
Interpreter* interpreter = i_isolate()->interpreter();
- CodeT ldaLookupSlot = interpreter->GetBytecodeHandler(
- Bytecode::kLdaLookupSlot, OperandScale::kSingle);
+ Code ldaLookupSlot = interpreter->GetBytecodeHandler(Bytecode::kLdaLookupSlot,
+ OperandScale::kSingle);
CheckStringEqual("LdaLookupSlotHandler",
Builtins::name(ldaLookupSlot.builtin_id()));
- CodeT wideLdaLookupSlot = interpreter->GetBytecodeHandler(
+ Code wideLdaLookupSlot = interpreter->GetBytecodeHandler(
Bytecode::kLdaLookupSlot, OperandScale::kDouble);
CheckStringEqual("LdaLookupSlotWideHandler",
Builtins::name(wideLdaLookupSlot.builtin_id()));
- CodeT extraWideLdaLookupSlot = interpreter->GetBytecodeHandler(
+ Code extraWideLdaLookupSlot = interpreter->GetBytecodeHandler(
Bytecode::kLdaLookupSlot, OperandScale::kQuadruple);
CheckStringEqual("LdaLookupSlotExtraWideHandler",
Builtins::name(extraWideLdaLookupSlot.builtin_id()));
diff --git a/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
index d3d9580a21..f6ef9834d2 100644
--- a/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "include/v8-platform.h"
+#include "src/init/v8.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -14,7 +15,7 @@ class WithSingleThreadedDefaultPlatformMixin : public TMixin {
WithSingleThreadedDefaultPlatformMixin() {
platform_ = v8::platform::NewSingleThreadedDefaultPlatform();
CHECK_NOT_NULL(platform_.get());
- v8::V8::InitializePlatform(platform_.get());
+ i::V8::InitializePlatformForTesting(platform_.get());
v8::V8::Initialize();
}
diff --git a/deps/v8/test/unittests/libplatform/tracing-unittest.cc b/deps/v8/test/unittests/libplatform/tracing-unittest.cc
index 76f78af14c..b92fc34bb7 100644
--- a/deps/v8/test/unittests/libplatform/tracing-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/tracing-unittest.cc
@@ -11,7 +11,8 @@
#include "testing/gtest/include/gtest/gtest.h"
#ifdef V8_USE_PERFETTO
-#include "perfetto/tracing.h"
+#include "perfetto/tracing/track_event.h" // nogncheck
+#include "perfetto/tracing/track_event_legacy.h" // nogncheck
#include "protos/perfetto/trace/trace.pb.h" // nogncheck
#include "src/libplatform/tracing/trace-event-listener.h"
#include "src/tracing/traced-value.h"
@@ -389,6 +390,10 @@ TEST_F(PlatformTracingTest, TestTracingControllerMultipleArgsAndCopy) {
}
#endif // !defined(V8_USE_PERFETTO)
+// In Perfetto build there are no TracingObservers. Instead the code relies on
+// TrackEventSessionObserver to track tracing sessions, which is tested
+// upstream.
+#if !defined(V8_USE_PERFETTO)
namespace {
class TraceStateObserverImpl : public TracingController::TraceStateObserver {
@@ -412,16 +417,11 @@ TEST_F(PlatformTracingTest, TracingObservers) {
v8::platform::tracing::TracingController* tracing_controller = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
->SetTracingController(std::move(tracing));
-#ifdef V8_USE_PERFETTO
- std::ostringstream sstream;
- tracing_controller->InitializeForPerfetto(&sstream);
-#else
MockTraceWriter* writer = new MockTraceWriter();
v8::platform::tracing::TraceBuffer* ring_buffer =
v8::platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(1,
writer);
tracing_controller->Initialize(ring_buffer);
-#endif
v8::platform::tracing::TraceConfig* trace_config =
new v8::platform::tracing::TraceConfig();
trace_config->AddIncludedCategory("v8");
@@ -469,6 +469,7 @@ TEST_F(PlatformTracingTest, TracingObservers) {
i::V8::SetPlatformForTesting(old_platform);
}
+#endif // !defined(V8_USE_PERFETTO)
// With Perfetto the tracing controller doesn't observe events.
#if !defined(V8_USE_PERFETTO)
@@ -597,7 +598,12 @@ class TestListener : public TraceEventListener {
if (!first_annotation) {
slice += ",";
}
- slice += debug_annotation_names_[it.name_iid()] + "=";
+ if (!it.name().empty()) {
+ slice += it.name();
+ } else {
+ slice += debug_annotation_names_[it.name_iid()];
+ }
+ slice += "=";
std::stringstream value;
if (it.has_bool_value()) {
value << "(bool)" << it.bool_value();
diff --git a/deps/v8/test/unittests/logging/log-unittest.cc b/deps/v8/test/unittests/logging/log-unittest.cc
index 764064c40d..fdeb11dda7 100644
--- a/deps/v8/test/unittests/logging/log-unittest.cc
+++ b/deps/v8/test/unittests/logging/log-unittest.cc
@@ -444,13 +444,16 @@ TEST_F(LogTest, Issue539892) {
explicit FakeCodeEventLogger(i::Isolate* isolate)
: CodeEventLogger(isolate) {}
- void CodeMoveEvent(i::AbstractCode from, i::AbstractCode to) override {}
+ void CodeMoveEvent(i::InstructionStream from,
+ i::InstructionStream to) override {}
+ void BytecodeMoveEvent(i::BytecodeArray from,
+ i::BytecodeArray to) override {}
void CodeDisableOptEvent(i::Handle<i::AbstractCode> code,
i::Handle<i::SharedFunctionInfo> shared) override {
}
private:
- void LogRecordedBuffer(i::Handle<i::AbstractCode> code,
+ void LogRecordedBuffer(i::AbstractCode code,
i::MaybeHandle<i::SharedFunctionInfo> maybe_shared,
const char* name, int length) override {}
#if V8_ENABLE_WEBASSEMBLY
@@ -1214,7 +1217,7 @@ TEST_F(LogTest, BuiltinsNotLoggedAsLazyCompile) {
logger.StopLogging();
i::Isolate* i_isolate = logger.i_isolate();
- i::Handle<i::CodeT> builtin = BUILTIN_CODE(i_isolate, BooleanConstructor);
+ i::Handle<i::Code> builtin = BUILTIN_CODE(i_isolate, BooleanConstructor);
v8::base::EmbeddedVector<char, 100> buffer;
// Should only be logged as "Builtin" with a name, never as "Function".
diff --git a/deps/v8/test/unittests/objects/concurrent-string-unittest.cc b/deps/v8/test/unittests/objects/concurrent-string-unittest.cc
index 569bceb01c..919b9097d2 100644
--- a/deps/v8/test/unittests/objects/concurrent-string-unittest.cc
+++ b/deps/v8/test/unittests/objects/concurrent-string-unittest.cc
@@ -245,7 +245,6 @@ TEST_F(ConcurrentStringTest, InspectOneByteExternalizing_ThinString) {
// Create a string.
const char* raw_string = STRING_VALUE;
Handle<String> thin_string = factory->NewStringFromAsciiChecked(raw_string);
- EXPECT_TRUE(thin_string->IsOneByteRepresentation());
EXPECT_TRUE(!thin_string->IsExternalString());
EXPECT_TRUE(!thin_string->IsInternalizedString());
@@ -347,9 +346,6 @@ TEST_F(ConcurrentStringTest, InspectOneIntoTwoByteExternalizing_ThinString) {
EXPECT_TRUE(!thin_string->IsExternalString());
EXPECT_TRUE(!thin_string->IsInternalizedString());
EXPECT_TRUE(thin_string->IsThinString());
- // Even its representation is still one byte, even when the internalized
- // string moved to two bytes.
- EXPECT_TRUE(thin_string->IsOneByteRepresentation());
thread->Join();
}
diff --git a/deps/v8/test/unittests/objects/roots-unittest.cc b/deps/v8/test/unittests/objects/roots-unittest.cc
index 6bb3bc16ee..17e20e3fbc 100644
--- a/deps/v8/test/unittests/objects/roots-unittest.cc
+++ b/deps/v8/test/unittests/objects/roots-unittest.cc
@@ -93,6 +93,17 @@ TEST_F(RootsTest, TestHeapRootsNotReadOnly) {
MUTABLE_ROOT_LIST(CHECK_NOT_IN_RO_SPACE)
}
+TEST_F(RootsTest, TestHeapNumberList) {
+ ReadOnlyRoots roots(isolate());
+ for (auto pos = RootIndex::kFirstReadOnlyRoot;
+ pos <= RootIndex::kLastReadOnlyRoot; ++pos) {
+ auto obj = roots.object_at(pos);
+ bool in_nr_range = pos >= RootIndex::kFirstHeapNumberRoot &&
+ pos <= RootIndex::kLastHeapNumberRoot;
+ CHECK_EQ(obj.IsHeapNumber(), in_nr_range);
+ }
+}
+
#undef CHECK_NOT_IN_RO_SPACE
} // namespace internal
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 8a36f6dbb3..a2f6461ab2 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -21,7 +21,9 @@
#include "src/base/build_config.h"
#include "src/objects/backing-store.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/objects-inl.h"
+#include "test/common/flag-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -1398,7 +1400,6 @@ TEST_F(ValueSerializerTest, RoundTripDate) {
}
TEST_F(ValueSerializerTest, DecodeDate) {
- Local<Value> value;
#if defined(V8_TARGET_LITTLE_ENDIAN)
DecodeTestFutureVersions(
{0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84, 0x2E,
@@ -1884,11 +1885,20 @@ TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
ASSERT_TRUE(value->IsArrayBuffer());
EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
ExpectScriptTrue("Object.getPrototypeOf(result) === ArrayBuffer.prototype");
+ // TODO(v8:11111): Use API functions for testing max_byte_length and resizable
+ // once they're exposed via the API.
+ i::Handle<i::JSArrayBuffer> array_buffer =
+ Utils::OpenHandle(ArrayBuffer::Cast(*value));
+ EXPECT_EQ(0u, array_buffer->max_byte_length());
+ EXPECT_EQ(false, array_buffer->is_resizable_by_js());
value = RoundTripTest("new Uint8Array([0, 128, 255]).buffer");
ASSERT_TRUE(value->IsArrayBuffer());
EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
ExpectScriptTrue("new Uint8Array(result).toString() === '0,128,255'");
+ array_buffer = Utils::OpenHandle(ArrayBuffer::Cast(*value));
+ EXPECT_EQ(3u, array_buffer->max_byte_length());
+ EXPECT_EQ(false, array_buffer->is_resizable_by_js());
value =
RoundTripTest("({ a: new ArrayBuffer(), get b() { return this.a; }})");
@@ -1896,6 +1906,21 @@ TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
ExpectScriptTrue("result.a === result.b");
}
+TEST_F(ValueSerializerTest, RoundTripResizableArrayBuffer) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ Local<Value> value =
+ RoundTripTest("new ArrayBuffer(100, {maxByteLength: 200})");
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(100u, ArrayBuffer::Cast(*value)->ByteLength());
+
+ // TODO(v8:11111): Use API functions for testing max_byte_length and resizable
+ // once they're exposed via the API.
+ i::Handle<i::JSArrayBuffer> array_buffer =
+ Utils::OpenHandle(ArrayBuffer::Cast(*value));
+ EXPECT_EQ(200u, array_buffer->max_byte_length());
+ EXPECT_EQ(true, array_buffer->is_resizable_by_js());
+}
+
TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
DecodeTestFutureVersions(
{0xFF, 0x09, 0x3F, 0x00, 0x42, 0x00}, [this](Local<Value> value) {
@@ -1927,6 +1952,13 @@ TEST_F(ValueSerializerTest, DecodeInvalidArrayBuffer) {
InvalidDecodeTest({0xFF, 0x09, 0x42, 0xFF, 0xFF, 0x00});
}
+TEST_F(ValueSerializerTest, DecodeInvalidResizableArrayBuffer) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ // Enough bytes available after reading the length, but not anymore when
+ // reading the max byte length.
+ InvalidDecodeTest({0xFF, 0x09, 0x7E, 0x2, 0x10, 0x00});
+}
+
// An array buffer allocator that never has available memory.
class OOMArrayBufferAllocator : public ArrayBuffer::Allocator {
public:
@@ -2026,14 +2058,20 @@ TEST_F(ValueSerializerTestWithArrayBufferTransfer,
TEST_F(ValueSerializerTest, RoundTripTypedArray) {
// Check that the right type comes out the other side for every kind of typed
// array.
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
Local<Value> value;
+ i::Handle<i::JSTypedArray> i_ta;
#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype) \
value = RoundTripTest("new " #Type "Array(2)"); \
ASSERT_TRUE(value->Is##Type##Array()); \
EXPECT_EQ(2u * sizeof(ctype), TypedArray::Cast(*value)->ByteLength()); \
EXPECT_EQ(2u, TypedArray::Cast(*value)->Length()); \
ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
- "Array.prototype");
+ "Array.prototype"); \
+ i_ta = v8::Utils::OpenHandle(TypedArray::Cast(*value)); \
+ EXPECT_EQ(false, i_ta->is_length_tracking()); \
+ EXPECT_EQ(false, i_ta->is_backed_by_rab());
TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
#undef TYPED_ARRAY_ROUND_TRIP_TEST
@@ -2066,6 +2104,56 @@ TEST_F(ValueSerializerTest, RoundTripTypedArray) {
ExpectScriptTrue("result.f32.length === 5");
}
+TEST_F(ValueSerializerTest, RoundTripRabBackedLengthTrackingTypedArray) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ // Check that the right type comes out the other side for every kind of typed
+ // array.
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ Local<Value> value;
+ i::Handle<i::JSTypedArray> i_ta;
+#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype) \
+ value = RoundTripTest("new " #Type \
+ "Array(new ArrayBuffer(80, " \
+ "{maxByteLength: 160}))"); \
+ ASSERT_TRUE(value->Is##Type##Array()); \
+ EXPECT_EQ(80u, TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(80u / sizeof(ctype), TypedArray::Cast(*value)->Length()); \
+ ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
+ "Array.prototype"); \
+ i_ta = v8::Utils::OpenHandle(TypedArray::Cast(*value)); \
+ EXPECT_EQ(true, i_ta->is_length_tracking()); \
+ EXPECT_EQ(true, i_ta->is_backed_by_rab());
+
+ TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
+#undef TYPED_ARRAY_ROUND_TRIP_TEST
+}
+
+TEST_F(ValueSerializerTest, RoundTripRabBackedNonLengthTrackingTypedArray) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ // Check that the right type comes out the other side for every kind of typed
+ // array.
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ Local<Value> value;
+ i::Handle<i::JSTypedArray> i_ta;
+#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype) \
+ value = RoundTripTest("new " #Type \
+ "Array(new ArrayBuffer(80, " \
+ "{maxByteLength: 160}), 8, 4)"); \
+ ASSERT_TRUE(value->Is##Type##Array()); \
+ EXPECT_EQ(4u * sizeof(ctype), TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->Length()); \
+ ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
+ "Array.prototype"); \
+ i_ta = v8::Utils::OpenHandle(TypedArray::Cast(*value)); \
+ EXPECT_EQ(false, i_ta->is_length_tracking()); \
+ EXPECT_EQ(true, i_ta->is_backed_by_rab());
+
+ TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
+#undef TYPED_ARRAY_ROUND_TRIP_TEST
+}
+
TEST_F(ValueSerializerTest, DecodeTypedArray) {
// Check that the right type comes out the other side for every kind of typed
// array (version 14 and above).
@@ -2397,7 +2485,8 @@ TEST_F(ValueSerializerTest, RoundTripDataView) {
// TODO(v8:11111): Use API functions for testing is_length_tracking and
// is_backed_by_rab, once they're exposed
// via the API.
- i::Handle<i::JSDataView> i_dv = v8::Utils::OpenHandle(DataView::Cast(*value));
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv =
+ v8::Utils::OpenHandle(DataView::Cast(*value));
EXPECT_EQ(false, i_dv->is_length_tracking());
EXPECT_EQ(false, i_dv->is_backed_by_rab());
}
@@ -2416,6 +2505,42 @@ TEST_F(ValueSerializerTest, DecodeDataView) {
});
}
+TEST_F(ValueSerializerTest, RoundTripRabBackedDataView) {
+ FLAG_SCOPE(harmony_rab_gsab);
+
+ Local<Value> value = RoundTripTest(
+ "new DataView(new ArrayBuffer(4, {maxByteLength: 8}), 1, 2)");
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv =
+ v8::Utils::OpenHandle(DataView::Cast(*value));
+ EXPECT_EQ(false, i_dv->is_length_tracking());
+ EXPECT_EQ(true, i_dv->is_backed_by_rab());
+}
+
+TEST_F(ValueSerializerTest, RoundTripRabBackedLengthTrackingDataView) {
+ FLAG_SCOPE(harmony_rab_gsab);
+
+ Local<Value> value =
+ RoundTripTest("new DataView(new ArrayBuffer(4, {maxByteLength: 8}), 1)");
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(3u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv =
+ v8::Utils::OpenHandle(DataView::Cast(*value));
+ EXPECT_EQ(true, i_dv->is_length_tracking());
+ EXPECT_EQ(true, i_dv->is_backed_by_rab());
+}
+
TEST_F(ValueSerializerTest, DecodeDataViewBackwardsCompatibility) {
DecodeTestUpToVersion(
13,
@@ -2633,6 +2758,47 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
ExpectScriptTrue(
"new Uint8Array(result.buffer, 0, 4).toString() === '0,1,128,255'");
}
+
+TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
+ RoundTripWebAssemblyMemory_WithPreviousReference) {
+ // This is a regression test for crbug.com/1421524.
+ // It ensures that WasmMemoryObject can deserialize even if its underlying
+ // buffer was already encountered, and so will be encoded with an object
+ // backreference.
+ std::vector<uint8_t> data = {0x00, 0x01, 0x80, 0xFF};
+ data.resize(65536);
+ InitializeData(data, true);
+
+ EXPECT_CALL(serializer_delegate_,
+ GetSharedArrayBufferId(isolate(), input_buffer()))
+ .WillRepeatedly(Return(Just(0U)));
+ EXPECT_CALL(deserializer_delegate_, GetSharedArrayBufferFromId(isolate(), 0U))
+ .WillRepeatedly(Return(output_buffer()));
+
+ Local<Value> input;
+ {
+ Context::Scope scope(serialization_context());
+ const int32_t kMaxPages = 1;
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*input_buffer());
+ i::Handle<i::WasmMemoryObject> wasm_memory =
+ i::WasmMemoryObject::New(i_isolate, buffer, kMaxPages)
+ .ToHandleChecked();
+ i::Handle<i::FixedArray> fixed_array =
+ i_isolate->factory()->NewFixedArray(2);
+ fixed_array->set(0, *buffer);
+ fixed_array->set(1, *wasm_memory);
+ input = Utils::ToLocal(i_isolate->factory()->NewJSArrayWithElements(
+ fixed_array, i::PACKED_ELEMENTS, 2));
+ }
+ RoundTripTest(input);
+ ExpectScriptTrue("result[0] instanceof SharedArrayBuffer");
+ ExpectScriptTrue("result[1] instanceof WebAssembly.Memory");
+ ExpectScriptTrue("result[0] === result[1].buffer");
+ ExpectScriptTrue("result[0].byteLength === 65536");
+ ExpectScriptTrue(
+ "new Uint8Array(result[0], 0, 4).toString() === '0,1,128,255'");
+}
#endif // V8_ENABLE_WEBASSEMBLY
TEST_F(ValueSerializerTest, UnsupportedHostObject) {
diff --git a/deps/v8/test/unittests/objects/weakmaps-unittest.cc b/deps/v8/test/unittests/objects/weakmaps-unittest.cc
index 9e839463dd..79f07aa938 100644
--- a/deps/v8/test/unittests/objects/weakmaps-unittest.cc
+++ b/deps/v8/test/unittests/objects/weakmaps-unittest.cc
@@ -58,6 +58,8 @@ TEST_F(WeakMapsTest, Weakness) {
v8_flags.incremental_marking = false;
Isolate* isolate = i_isolate();
Factory* factory = isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ isolate->heap());
HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
GlobalHandles* global_handles = isolate->global_handles();
@@ -117,6 +119,8 @@ TEST_F(WeakMapsTest, Weakness) {
TEST_F(WeakMapsTest, Shrinking) {
Isolate* isolate = i_isolate();
Factory* factory = isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ isolate->heap());
HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
@@ -329,6 +333,8 @@ TEST_F(WeakMapsTest, Regress399527) {
TEST_F(WeakMapsTest, WeakMapsWithChainedEntries) {
ManualGCScope manual_gc_scope(i_isolate());
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
const int initial_gc_count = i_isolate()->heap()->gc_count();
diff --git a/deps/v8/test/unittests/parser/decls-unittest.cc b/deps/v8/test/unittests/parser/decls-unittest.cc
index 5a0f0c1b89..8b12db12e3 100644
--- a/deps/v8/test/unittests/parser/decls-unittest.cc
+++ b/deps/v8/test/unittests/parser/decls-unittest.cc
@@ -145,8 +145,6 @@ void DeclarationContext::Check(const char* source, int get, int set, int query,
InitializeIfNeeded();
// A retry after a GC may pollute the counts, so perform gc now
// to avoid that.
- i::ScanStackModeScopeForTesting no_stack_scanning(
- i_isolate()->heap(), i::Heap::ScanStackMode::kNone);
i_isolate()->heap()->CollectGarbage(i::NEW_SPACE,
i::GarbageCollectionReason::kTesting);
HandleScope scope(isolate_);
diff --git a/deps/v8/test/unittests/parser/parsing-unittest.cc b/deps/v8/test/unittests/parser/parsing-unittest.cc
index ac53e538fe..9048d7f1fa 100644
--- a/deps/v8/test/unittests/parser/parsing-unittest.cc
+++ b/deps/v8/test/unittests/parser/parsing-unittest.cc
@@ -2980,13 +2980,13 @@ TEST_F(ParsingTest, NoErrorsObjectLiteralChecking) {
TEST_F(ParsingTest, TooManyArguments) {
const char* context_data[][2] = {{"foo(", "0)"}, {nullptr, nullptr}};
- using v8::internal::Code;
- char statement[Code::kMaxArguments * 2 + 1];
- for (int i = 0; i < Code::kMaxArguments; ++i) {
+ using v8::internal::InstructionStream;
+ char statement[InstructionStream::kMaxArguments * 2 + 1];
+ for (int i = 0; i < InstructionStream::kMaxArguments; ++i) {
statement[2 * i] = '0';
statement[2 * i + 1] = ',';
}
- statement[Code::kMaxArguments * 2] = 0;
+ statement[InstructionStream::kMaxArguments * 2] = 0;
const char* statement_data[] = {statement, nullptr};
@@ -9059,9 +9059,9 @@ TEST_F(ParsingTest, ObjectRestNegativeTestSlow) {
{ nullptr, nullptr }
};
- using v8::internal::Code;
+ using v8::internal::InstructionStream;
std::string statement;
- for (int i = 0; i < Code::kMaxArguments; ++i) {
+ for (int i = 0; i < InstructionStream::kMaxArguments; ++i) {
statement += std::to_string(i) + " : " + "x, ";
}
statement += "...y";
diff --git a/deps/v8/test/unittests/regexp/regexp-unittest.cc b/deps/v8/test/unittests/regexp/regexp-unittest.cc
index 065eea336f..2b84c701ee 100644
--- a/deps/v8/test/unittests/regexp/regexp-unittest.cc
+++ b/deps/v8/test/unittests/regexp/regexp-unittest.cc
@@ -1653,6 +1653,7 @@ void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
++global_use_counts[feature];
}
+
} // namespace
using RegExpTestWithContext = TestWithContext;
@@ -2314,10 +2315,10 @@ TEST_F(RegExpTestWithContext, UnicodePropertyEscapeCodeSize) {
if (maybe_bytecode.IsByteArray()) {
// On x64, excessive inlining produced >250KB.
CHECK_LT(ByteArray::cast(maybe_bytecode).Size(), kMaxSize);
- } else if (maybe_code.IsCodeT()) {
+ } else if (maybe_code.IsCode()) {
// On x64, excessive inlining produced >360KB.
- CHECK_LT(FromCodeT(CodeT::cast(maybe_code)).Size(), kMaxSize);
- CHECK_EQ(FromCodeT(CodeT::cast(maybe_code)).kind(), CodeKind::REGEXP);
+ CHECK_LT(Code::cast(maybe_code).Size(), kMaxSize);
+ CHECK_EQ(Code::cast(maybe_code).kind(), CodeKind::REGEXP);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 6d95a2b7ac..053c81bcde 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -10,8 +10,10 @@
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
+#include "test/unittests/heap/heap-utils.h"
namespace v8 {
@@ -53,7 +55,9 @@ IsolateWrapper::IsolateWrapper(CountersMode counters_mode)
IsolateWrapper::~IsolateWrapper() {
v8::Platform* platform = internal::V8::GetCurrentPlatform();
CHECK_NOT_NULL(platform);
+ isolate_->Enter();
while (platform::PumpMessageLoop(platform, isolate())) continue;
+ isolate_->Exit();
isolate_->Dispose();
if (counter_map_) {
CHECK_EQ(kCurrentCounterMap, counter_map_.get());
@@ -88,14 +92,7 @@ ManualGCScope::ManualGCScope(i::Isolate* isolate) {
// Some tests run threaded (back-to-back) and thus the GC may already be
// running by the time a ManualGCScope is created. Finalizing existing marking
// prevents any undefined/unexpected behavior.
- if (isolate && isolate->heap()->incremental_marking()->IsMarking()) {
- ScanStackModeScopeForTesting no_stack_scanning(isolate->heap(),
- Heap::ScanStackMode::kNone);
- isolate->heap()->CollectGarbage(OLD_SPACE,
- GarbageCollectionReason::kTesting);
- // Make sure there is no concurrent sweeping running in the background.
- isolate->heap()->CompleteSweepingFull();
- }
+ FinalizeGCIfRunning(isolate);
i::v8_flags.concurrent_marking = false;
i::v8_flags.concurrent_sweeping = false;
@@ -105,6 +102,13 @@ ManualGCScope::ManualGCScope(i::Isolate* isolate) {
// Parallel marking has a dependency on concurrent marking.
i::v8_flags.parallel_marking = false;
i::v8_flags.detect_ineffective_gcs_near_heap_limit = false;
+ // CppHeap concurrent marking has a dependency on concurrent marking.
+ i::v8_flags.cppheap_concurrent_marking = false;
+
+ if (isolate && isolate->heap()->cpp_heap()) {
+ CppHeap::From(isolate->heap()->cpp_heap())
+ ->ReduceGCCapabilitiesFromFlagsForTesting();
+ }
}
} // namespace internal
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 7860ed5815..523cddfe02 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -38,7 +38,7 @@ class WithDefaultPlatformMixin : public TMixin {
platform_ = v8::platform::NewDefaultPlatform(
0, v8::platform::IdleTaskSupport::kEnabled);
CHECK_NOT_NULL(platform_.get());
- v8::V8::InitializePlatform(platform_.get());
+ i::V8::InitializePlatformForTesting(platform_.get());
// Allow changing flags in unit tests.
// TODO(12887): Fix tests to avoid changing flag values after
// initialization.
@@ -165,7 +165,6 @@ class WithIsolateScopeMixin : public TMixin {
static MaybeLocal<Value> TryRunJS(Local<Context> context,
Local<String> source) {
- v8::Local<v8::Value> result;
Local<Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
return script->Run(context);
@@ -187,37 +186,25 @@ class WithIsolateScopeMixin : public TMixin {
}
// By default, the GC methods do not scan the stack conservatively.
- void CollectGarbage(
- i::AllocationSpace space, i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void CollectGarbage(i::AllocationSpace space, i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
}
- void CollectAllGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void CollectAllGarbage(i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
- void CollectAllAvailableGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void CollectAllAvailableGarbage(i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectAllAvailableGarbage(
i::GarbageCollectionReason::kTesting);
}
- void PreciseCollectAllGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void PreciseCollectAllGarbage(i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
@@ -586,18 +573,7 @@ class FeedbackVectorHelper {
template <typename Spec>
Handle<FeedbackVector> NewFeedbackVector(Isolate* isolate, Spec* spec) {
- Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate, spec);
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfoForBuiltin(
- isolate->factory()->empty_string(), Builtin::kIllegal);
- // Set the raw feedback metadata to circumvent checks that we are not
- // overwriting existing metadata.
- shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
- ClosureFeedbackCellArray::New(isolate, shared);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
- return FeedbackVector::New(isolate, shared, closure_feedback_cell_array,
- &is_compiled_scope);
+ return FeedbackVector::NewForTesting(isolate, spec);
}
class ParkingThread : public v8::base::Thread {
diff --git a/deps/v8/test/unittests/testcfg.py b/deps/v8/test/unittests/testcfg.py
index 43ec0c2b11..6863c3e216 100644
--- a/deps/v8/test/unittests/testcfg.py
+++ b/deps/v8/test/unittests/testcfg.py
@@ -10,15 +10,27 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
+ADDITIONAL_VARIANTS = set(["minor_mc"])
+
+
class VariantsGenerator(testsuite.VariantsGenerator):
+
+ def __init__(self, variants):
+ super().__init__(variants)
+ self._supported_variants = self._standard_variant + [
+ v for v in variants if v in ADDITIONAL_VARIANTS
+ ]
+
def _get_variants(self, test):
- return self._standard_variant
+ if test.only_standard_variant:
+ return self._standard_variant
+ return self._supported_variants
class TestLoader(testsuite.TestLoader):
def _list_test_filenames(self):
shell = os.path.abspath(
- os.path.join(self.test_config.shell_dir, "unittests"))
+ os.path.join(self.test_config.shell_dir, "v8_unittests"))
if utils.IsWindows():
shell += ".exe"
@@ -76,9 +88,9 @@ class TestCase(testcase.TestCase):
)
def get_shell(self):
- return self.suite.name
+ return 'v8_' + self.suite.name
- def _get_resources(self):
+ def get_android_resources(self):
# Bytecode-generator tests are the only ones requiring extra files on
# Android.
parts = self.name.split('.')
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 5299766823..446d2345a8 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -84,7 +84,7 @@ type string constexpr 'const char*';
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
type ExternalPointer
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
-type Code extends HeapObject generates 'TNode<Code>';
+type InstructionStream extends HeapObject generates 'TNode<InstructionStream>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>';
type NativeContext extends Context;
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index ba61394ed1..ffe19b464c 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -32,6 +32,9 @@
# Slow tests.
'ParsingTest.ObjectRestNegativeTestSlow': [PASS, ['mode == debug', SKIP]],
+
+ # BUG(13660): Flaky test.
+ 'OS.RemapPages': [SKIP],
}], # ALWAYS
##############################################################################
@@ -192,6 +195,7 @@
'LogInterpretedFramesNativeStackWithSerializationTest.LogInterpretedFramesNativeStackWithSerialization': [SKIP],
}], # lite_mode
+##############################################################################
['variant == jitless', {
# --interpreted-frames-native-stack tests
'LogExternalInterpretedFramesNativeStackTest.ExternalLogEventListenerWithInterpretedFramesNativeStack': [SKIP],
@@ -200,6 +204,25 @@
'InterpreterTest.InterpreterWithNativeStack': [SKIP],
}], # jitless
+##############################################################################
+['jitless_build_mode', {
+ # Feedback collection maintenance is (mostly) disabled.
+ 'FeedbackVectorTest.Vector*': [SKIP],
+ 'InterpreterTest.InterpreterBigIntComparisons': [SKIP],
+ 'InterpreterTest.InterpreterBinaryOpSmiTypeFeedback': [SKIP],
+ 'InterpreterTest.InterpreterUnaryOpFeedback': [SKIP],
+ 'InterpreterTest.InterpreterStringComparisons': [SKIP],
+ 'InterpreterTest.InterpreterSmiComparisons': [SKIP],
+ 'InterpreterTest.InterpreterStringAdd': [SKIP],
+ 'InterpreterTest.InterpreterMixedComparisons': [SKIP],
+ 'InterpreterTest.InterpreterHeapNumberComparisons': [SKIP],
+ 'InterpreterTest.InterpreterBinaryOpsBigInt': [SKIP],
+ 'InterpreterTest.InterpreterBinaryOpTypeFeedback': [SKIP],
+ 'InterpreterTest.InterpreterBitwiseTypeFeedback': [SKIP],
+ # These require executable code space.
+ 'AssemblerX64Test.*': [SKIP],
+}], # jitless_build_mode
+
################################################################################
['third_party_heap', {
# Tests on OptimizingCompileDispatcher
@@ -243,7 +266,6 @@
'WeakMapsTest.WeakMapsWithChainedEntries': [SKIP],
'WeakMapsTest.Weakness': [SKIP],
'WeakSetsTest.WeakSet_Weakness': [SKIP],
- 'WebSnapshotTest.SFIDeduplicationAfterBytecodeFlushing': [SKIP],
# CodeRange tests
'CodePagesTest.LargeCodeObjectWithSignalHandler': [SKIP],
@@ -261,13 +283,6 @@
'FactoryCodeBuilderOOMTest.Factory_CodeBuilder_TryBuildOOM': [SKIP],
}], # third_party_heap
-################################################################################
-['variant == always_sparkplug', {
- # SFI deduplication tests check compilation state, which always_sparkplug
- # can break.
- 'WebSnapshotTest.SFIDeduplication*': [SKIP],
-}],
-
##############################################################################
['byteorder == big', {
# Peephole optimization not supported on big-endian machines.
@@ -337,4 +352,8 @@
'WasmDisassemblerTest.Simd': [SKIP],
}], # no_simd_hardware == True
+['tsan and mode == debug', {
+ 'LazyCompileDispatcherTest.CompileLazy2FinishesDispatcherJob': [SKIP],
+}]
+
]
diff --git a/deps/v8/test/unittests/utils/identity-map-unittest.cc b/deps/v8/test/unittests/utils/identity-map-unittest.cc
index f61559f750..281f2d9ecc 100644
--- a/deps/v8/test/unittests/utils/identity-map-unittest.cc
+++ b/deps/v8/test/unittests/utils/identity-map-unittest.cc
@@ -788,6 +788,7 @@ TEST_F(IdentityMapTest, GCShortCutting) {
if (v8_flags.single_generation) return;
// We don't create ThinStrings immediately when using the forwarding table.
if (v8_flags.always_use_string_forwarding_table) return;
+ v8_flags.shortcut_strings_with_stack = true;
ManualGCScope manual_gc_scope(isolate());
IdentityMapTester t(isolate()->heap(), zone());
Factory* factory = isolate()->factory();
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 26d37fe7d6..3e43729e0e 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -20,54 +20,52 @@ class DecoderTest : public TestWithZone {
Decoder decoder;
};
-#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(static_cast<uint32_t>(expected), \
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), \
- &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(static_cast<uint32_t>(expected), value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i32v<Decoder::FullValidationTag>( \
- decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(expected, decoder.consume_i32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_i32v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(expected, value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(expected, decoder.consume_i32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(static_cast<uint64_t>(expected), \
- decoder.read_u64v<Decoder::FullValidationTag>(decoder.start(), \
- &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_u64v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(static_cast<uint64_t>(expected), value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
-#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i64v<Decoder::FullValidationTag>( \
- decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_i64v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(expected, value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
TEST_F(DecoderTest, ReadU32v_OneByte) {
@@ -377,18 +375,16 @@ TEST_F(DecoderTest, ReadI32v_FiveByte) {
TEST_F(DecoderTest, ReadU32v_off_end1) {
static const byte data[] = {U32V_1(11)};
- unsigned length = 0;
decoder.Reset(data, data);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
TEST_F(DecoderTest, ReadU32v_off_end2) {
static const byte data[] = {U32V_2(1111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -396,9 +392,8 @@ TEST_F(DecoderTest, ReadU32v_off_end2) {
TEST_F(DecoderTest, ReadU32v_off_end3) {
static const byte data[] = {U32V_3(111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -406,9 +401,8 @@ TEST_F(DecoderTest, ReadU32v_off_end3) {
TEST_F(DecoderTest, ReadU32v_off_end4) {
static const byte data[] = {U32V_4(11111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -416,9 +410,8 @@ TEST_F(DecoderTest, ReadU32v_off_end4) {
TEST_F(DecoderTest, ReadU32v_off_end5) {
static const byte data[] = {U32V_5(111111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -427,29 +420,27 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 16; i++) {
data[4] = static_cast<byte>(i << 4);
- unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ auto [result, length] =
+ decoder.read_i32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_EQ(5u, length);
EXPECT_TRUE(decoder.ok());
}
TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_i32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
@@ -483,9 +474,8 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
// foreach buffer size 0...5
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned rlen;
- uint32_t result =
- decoder.read_u32v<Decoder::FullValidationTag>(data, &rlen);
+ auto [result, rlen] =
+ decoder.read_u32v<Decoder::FullValidationTag>(data);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -540,9 +530,8 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned length;
- uint64_t result =
- decoder.read_u64v<Decoder::FullValidationTag>(data, &length);
+ auto [result, length] =
+ decoder.read_u64v<Decoder::FullValidationTag>(data);
if (limit <= index) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -582,9 +571,8 @@ TEST_F(DecoderTest, ReadU64v_Bits) {
// foreach buffer size 0...10
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned rlen;
- uint64_t result =
- decoder.read_u64v<Decoder::FullValidationTag>(data, &rlen);
+ auto [result, rlen] =
+ decoder.read_u64v<Decoder::FullValidationTag>(data);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -626,9 +614,8 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
// foreach buffer size 0...10
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned rlen;
- int64_t result =
- decoder.read_i64v<Decoder::FullValidationTag>(data, &rlen);
+ auto [result, rlen] =
+ decoder.read_i64v<Decoder::FullValidationTag>(data);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -645,29 +632,27 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 128; i++) {
data[9] = static_cast<byte>(i << 1);
- unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u64v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u64v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<Decoder::FullValidationTag>(decoder.start(), &length);
+ auto [result, length] =
+ decoder.read_i64v<Decoder::FullValidationTag>(decoder.start());
EXPECT_EQ(10u, length);
EXPECT_TRUE(decoder.ok());
}
TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_i64v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index c55f3cb660..b91df08e9a 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -75,9 +75,11 @@ enum MemoryType { kMemory32, kMemory64 };
// globals, or memories.
class TestModuleBuilder {
public:
- explicit TestModuleBuilder(ModuleOrigin origin = kWasmOrigin)
- : allocator(), mod(std::make_unique<Zone>(&allocator, ZONE_NAME)) {
- mod.origin = origin;
+ explicit TestModuleBuilder(ModuleOrigin origin = kWasmOrigin) : mod(origin) {
+ mod.num_declared_functions = 1;
+ mod.validated_functions = std::make_unique<std::atomic<uint8_t>[]>(1);
+ // Asm.js functions are valid by design.
+ if (is_asmjs_module(&mod)) mod.validated_functions[0] = 0xff;
}
byte AddGlobal(ValueType type, bool mutability = true) {
mod.globals.push_back({type, mutability, {}, {0}, false, false});
@@ -85,7 +87,7 @@ class TestModuleBuilder {
return static_cast<byte>(mod.globals.size() - 1);
}
byte AddSignature(const FunctionSig* sig, uint32_t supertype = kNoSuperType) {
- mod.add_signature(sig, supertype);
+ mod.add_signature(sig, supertype, v8_flags.wasm_final_types);
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
@@ -105,7 +107,7 @@ class TestModuleBuilder {
return result;
}
byte AddException(WasmTagSig* sig) {
- mod.tags.emplace_back(sig);
+ mod.tags.emplace_back(sig, AddSignature(sig));
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.tags.size() - 1);
}
@@ -124,19 +126,20 @@ class TestModuleBuilder {
byte AddStruct(std::initializer_list<F> fields,
uint32_t supertype = kNoSuperType) {
- StructType::Builder type_builder(mod.signature_zone.get(),
+ StructType::Builder type_builder(&mod.signature_zone,
static_cast<uint32_t>(fields.size()));
for (F field : fields) {
type_builder.AddField(field.first, field.second);
}
- mod.add_struct_type(type_builder.Build(), supertype);
+ mod.add_struct_type(type_builder.Build(), supertype,
+ v8_flags.wasm_final_types);
GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
}
byte AddArray(ValueType type, bool mutability) {
- ArrayType* array = mod.signature_zone->New<ArrayType>(type, mutability);
- mod.add_array_type(array, kNoSuperType);
+ ArrayType* array = mod.signature_zone.New<ArrayType>(type, mutability);
+ mod.add_array_type(array, kNoSuperType, v8_flags.wasm_final_types);
GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
}
@@ -156,14 +159,14 @@ class TestModuleBuilder {
byte AddPassiveElementSegment(wasm::ValueType type) {
mod.elem_segments.emplace_back(type, WasmElemSegment::kStatusPassive,
- WasmElemSegment::kExpressionElements);
+ WasmElemSegment::kExpressionElements, 0, 0);
return static_cast<byte>(mod.elem_segments.size() - 1);
}
byte AddDeclarativeElementSegment() {
mod.elem_segments.emplace_back(kWasmFuncRef,
WasmElemSegment::kStatusDeclarative,
- WasmElemSegment::kExpressionElements);
+ WasmElemSegment::kExpressionElements, 0, 0);
return static_cast<byte>(mod.elem_segments.size() - 1);
}
@@ -191,7 +194,6 @@ class TestModuleBuilder {
return static_cast<byte>(mod.functions.size() - 1);
}
- AccountingAllocator allocator;
WasmModule mod;
};
@@ -260,9 +262,8 @@ class FunctionBodyDecoderTestBase : public WithZoneMixin<BaseTest> {
// Validate the code.
FunctionBody body(sig, 0, code.begin(), code.end());
WasmFeatures unused_detected_features = WasmFeatures::None();
- DecodeResult result =
- ValidateFunctionBody(this->zone()->allocator(), enabled_features_,
- module, &unused_detected_features, body);
+ DecodeResult result = ValidateFunctionBody(enabled_features_, module,
+ &unused_detected_features, body);
std::ostringstream str;
if (result.failed()) {
@@ -1718,7 +1719,7 @@ TEST_F(FunctionBodyDecoderTest, ReturnCallWithSubtype) {
WASM_FEATURE_SCOPE(return_call);
auto sig = MakeSig::Returns(kWasmAnyRef);
- auto callee_sig = MakeSig::Returns(kWasmAnyNonNullableRef);
+ auto callee_sig = MakeSig::Returns(kWasmAnyRef.AsNonNull());
builder.AddFunction(&callee_sig);
ExpectValidates(&sig, {WASM_RETURN_CALL_FUNCTION0(0)});
@@ -2354,97 +2355,6 @@ TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
ExpectFailure(sigs.i_d(), code);
}
-TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
- ValueType float32int32float32[] = {kWasmF32, kWasmI32, kWasmF32};
- FunctionSig sig_f_if(1, 2, float32int32float32);
- ValueType float64int32float64[] = {kWasmF64, kWasmI32, kWasmF64};
- FunctionSig sig_d_id(1, 2, float64int32float64);
- struct {
- WasmOpcode op;
- const FunctionSig* sig;
- } AsmJsBinOps[] = {
- {kExprF64Atan2, sigs.d_dd()},
- {kExprF64Pow, sigs.d_dd()},
- {kExprF64Mod, sigs.d_dd()},
- {kExprI32AsmjsDivS, sigs.i_ii()},
- {kExprI32AsmjsDivU, sigs.i_ii()},
- {kExprI32AsmjsRemS, sigs.i_ii()},
- {kExprI32AsmjsRemU, sigs.i_ii()},
- {kExprI32AsmjsStoreMem8, sigs.i_ii()},
- {kExprI32AsmjsStoreMem16, sigs.i_ii()},
- {kExprI32AsmjsStoreMem, sigs.i_ii()},
- {kExprF32AsmjsStoreMem, &sig_f_if},
- {kExprF64AsmjsStoreMem, &sig_d_id},
- };
-
- {
- TestModuleBuilder builder(kAsmJsSloppyOrigin);
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
- TestBinop(AsmJsBinOps[i].op, AsmJsBinOps[i].sig);
- }
- }
-
- {
- TestModuleBuilder builder;
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
- ExpectFailure(AsmJsBinOps[i].sig,
- {WASM_BINOP(AsmJsBinOps[i].op, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1))});
- }
- }
-}
-
-TEST_F(FunctionBodyDecoderTest, AsmJsUnOpsCheckOrigin) {
- ValueType float32int32[] = {kWasmF32, kWasmI32};
- FunctionSig sig_f_i(1, 1, float32int32);
- ValueType float64int32[] = {kWasmF64, kWasmI32};
- FunctionSig sig_d_i(1, 1, float64int32);
- struct {
- WasmOpcode op;
- const FunctionSig* sig;
- } AsmJsUnOps[] = {{kExprF64Acos, sigs.d_d()},
- {kExprF64Asin, sigs.d_d()},
- {kExprF64Atan, sigs.d_d()},
- {kExprF64Cos, sigs.d_d()},
- {kExprF64Sin, sigs.d_d()},
- {kExprF64Tan, sigs.d_d()},
- {kExprF64Exp, sigs.d_d()},
- {kExprF64Log, sigs.d_d()},
- {kExprI32AsmjsLoadMem8S, sigs.i_i()},
- {kExprI32AsmjsLoadMem8U, sigs.i_i()},
- {kExprI32AsmjsLoadMem16S, sigs.i_i()},
- {kExprI32AsmjsLoadMem16U, sigs.i_i()},
- {kExprI32AsmjsLoadMem, sigs.i_i()},
- {kExprF32AsmjsLoadMem, &sig_f_i},
- {kExprF64AsmjsLoadMem, &sig_d_i},
- {kExprI32AsmjsSConvertF32, sigs.i_f()},
- {kExprI32AsmjsUConvertF32, sigs.i_f()},
- {kExprI32AsmjsSConvertF64, sigs.i_d()},
- {kExprI32AsmjsUConvertF64, sigs.i_d()}};
- {
- TestModuleBuilder builder(kAsmJsSloppyOrigin);
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
- TestUnop(AsmJsUnOps[i].op, AsmJsUnOps[i].sig);
- }
- }
-
- {
- TestModuleBuilder builder;
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
- ExpectFailure(AsmJsUnOps[i].sig,
- {WASM_UNOP(AsmJsUnOps[i].op, WASM_LOCAL_GET(0))});
- }
- }
-}
-
TEST_F(FunctionBodyDecoderTest, BreakEnd) {
ExpectValidates(
sigs.i_i(),
@@ -3289,9 +3199,8 @@ TEST_F(FunctionBodyDecoderTest, Regression709741) {
for (size_t i = 0; i < arraysize(code); ++i) {
FunctionBody body(sigs.v_v(), 0, code, code + i);
WasmFeatures unused_detected_features;
- DecodeResult result =
- ValidateFunctionBody(this->zone()->allocator(), WasmFeatures::All(),
- nullptr, &unused_detected_features, body);
+ DecodeResult result = ValidateFunctionBody(WasmFeatures::All(), module,
+ &unused_detected_features, body);
if (result.ok()) {
std::ostringstream str;
str << "Expected verification to fail";
@@ -4361,6 +4270,10 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
WASM_HEAP_TYPE(to_heap))});
ExpectValidates(&cast_sig, {WASM_REF_CAST(WASM_LOCAL_GET(0),
WASM_HEAP_TYPE(to_heap))});
+ ExpectValidates(&test_sig, {WASM_REF_TEST_NULL(WASM_LOCAL_GET(0),
+ WASM_HEAP_TYPE(to_heap))});
+ ExpectValidates(&cast_sig, {WASM_REF_CAST_NULL(WASM_LOCAL_GET(0),
+ WASM_HEAP_TYPE(to_heap))});
} else {
std::string error_message =
"local.get of type " + cast_reps[1].name() +
@@ -4374,6 +4287,16 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
{WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_HEAP_TYPE(to_heap))},
kAppendEnd,
("Invalid types for ref.cast: " + error_message).c_str());
+ ExpectFailure(
+ &test_sig,
+ {WASM_REF_TEST_NULL(WASM_LOCAL_GET(0), WASM_HEAP_TYPE(to_heap))},
+ kAppendEnd,
+ ("Invalid types for ref.test null: " + error_message).c_str());
+ ExpectFailure(
+ &cast_sig,
+ {WASM_REF_CAST_NULL(WASM_LOCAL_GET(0), WASM_HEAP_TYPE(to_heap))},
+ kAppendEnd,
+ ("Invalid types for ref.cast null: " + error_message).c_str());
}
}
@@ -4407,6 +4330,7 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
byte super_struct = builder.AddStruct({F(kWasmI16, true)});
byte sub_struct =
builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)}, super_struct);
+ byte fct_type = builder.AddSignature(sigs.i_i(), kNoSuperType);
ValueType supertype = ValueType::RefNull(super_struct);
ValueType subtype = ValueType::RefNull(sub_struct);
@@ -4422,6 +4346,10 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
ExpectValidates(
FunctionSig::Build(this->zone(), {kWasmI32, supertype}, {supertype}),
{WASM_I32V(42), WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct)});
+ ExpectValidates(
+ FunctionSig::Build(this->zone(), {kWasmI32, supertype}, {supertype}),
+ {WASM_I32V(42), WASM_LOCAL_GET(0),
+ WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct)});
// Wrong branch type.
ExpectFailure(
@@ -4433,6 +4361,11 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
{WASM_I32V(42), WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct)},
kAppendEnd,
"type error in branch[0] (expected (ref null 1), got (ref null 0))");
+ ExpectFailure(FunctionSig::Build(this->zone(), {subtype}, {supertype}),
+ {WASM_I32V(42), WASM_LOCAL_GET(0),
+ WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct)},
+ kAppendEnd,
+ "type error in branch[0] (expected (ref null 1), got (ref 0))");
// Wrong fallthrough type.
ExpectFailure(
@@ -4443,20 +4376,44 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
FunctionSig::Build(this->zone(), {supertype}, {supertype}),
{WASM_BLOCK_I(WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct))},
kAppendEnd, "type error in branch[0] (expected i32, got (ref null 0))");
+ ExpectFailure(FunctionSig::Build(this->zone(), {supertype}, {supertype}),
+ {WASM_BLOCK_I(WASM_LOCAL_GET(0),
+ WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct))},
+ kAppendEnd,
+ "type error in branch[0] (expected i32, got (ref 0))");
// Argument type error.
+ ExpectFailure(FunctionSig::Build(this->zone(), {subtype}, {kWasmExternRef}),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST(0, sub_struct),
+ WASM_GC_OP(kExprRefCast), sub_struct},
+ kAppendEnd,
+ "Invalid types for br_on_cast: local.get of type externref has "
+ "to be in the same reference type hierarchy as (ref 1)");
ExpectFailure(
FunctionSig::Build(this->zone(), {subtype}, {kWasmExternRef}),
- {WASM_LOCAL_GET(0), WASM_BR_ON_CAST(0, sub_struct),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST_NULL(0, sub_struct),
WASM_GC_OP(kExprRefCast), sub_struct},
kAppendEnd,
- "br_on_cast[0] expected subtype of (ref null func), (ref null struct) or "
- "(ref null array), found local.get of type externref");
+ "Invalid types for br_on_cast null: local.get of type externref has "
+ "to be in the same reference type hierarchy as (ref 1)");
ExpectFailure(
FunctionSig::Build(this->zone(), {supertype}, {kWasmExternRef}),
{WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct)}, kAppendEnd,
- "br_on_cast_fail[0] expected subtype of (ref null func), (ref null "
- "struct) or (ref null array), found local.get of type externref");
+ "Invalid types for br_on_cast_fail: local.get of type externref has to "
+ "be in the same reference type hierarchy as (ref 1)");
+ ExpectFailure(
+ FunctionSig::Build(this->zone(), {supertype}, {kWasmExternRef}),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct)}, kAppendEnd,
+ "Invalid types for br_on_cast_fail null: local.get of type "
+ "externref has to be in the same reference type hierarchy as (ref 1)");
+
+ // Cast between types of different type hierarchies is invalid.
+ ExpectFailure(
+ FunctionSig::Build(this->zone(), {subtype}, {supertype}),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST(0, fct_type), WASM_UNREACHABLE},
+ kAppendEnd,
+ "Invalid types for br_on_cast: local.get of type (ref null 0) has "
+ "to be in the same reference type hierarchy as (ref 2)");
}
TEST_F(FunctionBodyDecoderTest, BrOnAbstractType) {
@@ -4817,7 +4774,7 @@ TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
ExpectLength(2, kExprGlobalSet);
ExpectLength(2, kExprCallFunction);
ExpectLength(3, kExprCallIndirect);
- ExpectLength(3, kExprSelectWithType, 1);
+ ExpectLength(3, kExprSelectWithType, 1, kI32Code);
}
TEST_F(WasmOpcodeLengthTest, I32Const) {
@@ -4933,7 +4890,7 @@ TEST_F(WasmOpcodeLengthTest, IllegalRefIndices) {
TEST_F(WasmOpcodeLengthTest, GCOpcodes) {
// br_on_cast{,_fail}: prefix + opcode + br_depth + type_index
- ExpectLength(4, 0xfb, kExprBrOnCast & 0xFF);
+ ExpectLength(4, 0xfb, kExprBrOnCastDeprecated & 0xFF);
ExpectLength(4, 0xfb, kExprBrOnCastFail & 0xFF);
// struct.new, with leb immediate operand.
@@ -4963,9 +4920,10 @@ class TypeReaderTest : public TestWithZone {
public:
HeapType DecodeHeapType(const byte* start, const byte* end) {
Decoder decoder(start, end);
- uint32_t length;
- return value_type_reader::read_heap_type<Decoder::FullValidationTag>(
- &decoder, start, &length, enabled_features_);
+ auto [heap_type, length] =
+ value_type_reader::read_heap_type<Decoder::FullValidationTag>(
+ &decoder, start, enabled_features_);
+ return heap_type;
}
// This variable is modified by WASM_FEATURE_SCOPE.
@@ -5025,7 +4983,6 @@ TEST_F(TypeReaderTest, HeapTypeDecodingTest) {
class LocalDeclDecoderTest : public TestWithZone {
public:
- v8::internal::AccountingAllocator allocator;
WasmFeatures enabled_features_;
size_t ExpectRun(ValueType* local_types, size_t pos, ValueType expected,
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index 362db55f14..71faa6f3d5 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -97,9 +97,8 @@ TEST_F(LEBHelperTest, sizeof_i32v) {
EXPECT_EQ(LEBHelper::sizeof_##name(val), \
static_cast<size_t>(ptr - buffer)); \
Decoder decoder(buffer, buffer + kSize); \
- unsigned length = 0; \
- ctype result = \
- decoder.read_##name<Decoder::NoValidationTag>(buffer, &length); \
+ auto [result, length] = \
+ decoder.read_##name<Decoder::NoValidationTag>(buffer); \
EXPECT_EQ(val, result); \
EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
}
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 65945932ae..2e7c75c1c9 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -26,8 +26,10 @@ class WasmLoopAssignmentAnalyzerTest : public TestWithZone {
TestSignatures sigs;
uint32_t num_locals;
- BitVector* Analyze(const byte* start, const byte* end) {
- return AnalyzeLoopAssignmentForTesting(zone(), num_locals, start, end);
+ BitVector* Analyze(const byte* start, const byte* end,
+ bool* loop_is_innermost = nullptr) {
+ return AnalyzeLoopAssignmentForTesting(zone(), num_locals, start, end,
+ loop_is_innermost);
}
};
@@ -175,6 +177,29 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
}
}
+TEST_F(WasmLoopAssignmentAnalyzerTest, NestedLoop) {
+ num_locals = 5;
+ byte code[] = {WASM_LOOP(WASM_LOOP(WASM_LOCAL_SET(0, 1)))};
+
+ bool outer_is_innermost = false;
+ BitVector* outer_assigned =
+ Analyze(code, code + arraysize(code), &outer_is_innermost);
+ for (int j = 0; j < outer_assigned->length(); j++) {
+ bool expected = j == 0;
+ EXPECT_EQ(expected, outer_assigned->Contains(j));
+ }
+ EXPECT_FALSE(outer_is_innermost);
+
+ bool inner_is_innermost = false;
+ BitVector* inner_assigned =
+ Analyze(code + 2, code + arraysize(code), &inner_is_innermost);
+ for (int j = 0; j < inner_assigned->length(); j++) {
+ bool expected = j == 0;
+ EXPECT_EQ(expected, inner_assigned->Contains(j));
+ }
+ EXPECT_TRUE(inner_is_innermost);
+}
+
TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
byte code[] = {kExprLoop, kVoidCode, kExprF32Neg, kExprBrTable, 0x0E, 'h',
'e', 'l', 'l', 'o', ',', ' ',
diff --git a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
index 7ae062709d..120197bba0 100644
--- a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
+++ b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
@@ -21,15 +21,11 @@
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock-matchers.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
+namespace v8::internal::wasm {
enum MemoryProtectionMode {
kNoProtection,
kPku,
- kMprotect,
- kPkuWithMprotectFallback
};
const char* MemoryProtectionModeToString(MemoryProtectionMode mode) {
@@ -38,10 +34,6 @@ const char* MemoryProtectionModeToString(MemoryProtectionMode mode) {
return "NoProtection";
case kPku:
return "Pku";
- case kMprotect:
- return "Mprotect";
- case kPkuWithMprotectFallback:
- return "PkuWithMprotectFallback";
}
}
@@ -50,15 +42,10 @@ class MemoryProtectionTest : public TestWithNativeContext {
void Initialize(MemoryProtectionMode mode) {
v8_flags.wasm_lazy_compilation = false;
mode_ = mode;
- bool enable_pku = mode == kPku || mode == kPkuWithMprotectFallback;
- v8_flags.wasm_memory_protection_keys = enable_pku;
+ v8_flags.wasm_memory_protection_keys = (mode == kPku);
// The key is initially write-protected.
CHECK_IMPLIES(WasmCodeManager::HasMemoryProtectionKeySupport(),
!WasmCodeManager::MemoryProtectionKeyWritable());
-
- bool enable_mprotect =
- mode == kMprotect || mode == kPkuWithMprotectFallback;
- v8_flags.wasm_write_protect_code_memory = enable_mprotect;
}
void CompileModule() {
@@ -72,11 +59,7 @@ class MemoryProtectionTest : public TestWithNativeContext {
WasmCode* code() const { return code_; }
bool code_is_protected() {
- return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku() || uses_mprotect();
- }
-
- void MakeCodeWritable() {
- native_module_->MakeWritable(base::AddressRegionOf(code_->instructions()));
+ return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku();
}
void WriteToCode() { code_->instructions()[0] = 0; }
@@ -87,28 +70,18 @@ class MemoryProtectionTest : public TestWithNativeContext {
WriteToCode();
return;
}
- // Tier-up might be running and unprotecting the code region temporarily (if
- // using mprotect). In that case, repeatedly write to the code region to
- // make us eventually crash.
ASSERT_DEATH_IF_SUPPORTED(
- do {
+ {
WriteToCode();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
- } while (uses_mprotect()),
+ },
"");
}
- bool uses_mprotect() {
- // M1 always uses MAP_JIT.
- if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
- return mode_ == kMprotect ||
- (mode_ == kPkuWithMprotectFallback && !uses_pku());
- }
-
bool uses_pku() {
// M1 always uses MAP_JIT.
if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
- bool param_has_pku = mode_ == kPku || mode_ == kPkuWithMprotectFallback;
+ bool param_has_pku = mode_ == kPku;
return param_has_pku && WasmCodeManager::HasMemoryProtectionKeySupport();
}
@@ -121,11 +94,8 @@ class MemoryProtectionTest : public TestWithNativeContext {
SECTION(Code, ENTRY_COUNT(1), ADD_COUNT(0 /* locals */, kExprEnd))};
ModuleResult result =
- DecodeWasmModule(WasmFeatures::All(), std::begin(module_bytes),
- std::end(module_bytes), false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(),
- DecodingMethod::kSync, GetWasmEngine()->allocator());
+ DecodeWasmModule(WasmFeatures::All(), base::ArrayVector(module_bytes),
+ false, kWasmOrigin);
CHECK(result.ok());
ErrorThrower thrower(isolate(), "");
@@ -160,8 +130,7 @@ std::string PrintMemoryProtectionTestParam(
}
INSTANTIATE_TEST_SUITE_P(MemoryProtection, ParameterizedMemoryProtectionTest,
- ::testing::Values(kNoProtection, kPku, kMprotect,
- kPkuWithMprotectFallback),
+ ::testing::Values(kNoProtection, kPku),
PrintMemoryProtectionTestParam);
TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) {
@@ -172,7 +141,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) {
TEST_P(ParameterizedMemoryProtectionTest, CodeWritableWithinScope) {
CompileModule();
CodeSpaceWriteScope write_scope(native_module());
- MakeCodeWritable();
WriteToCode();
}
@@ -180,7 +148,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterScope) {
CompileModule();
{
CodeSpaceWriteScope write_scope(native_module());
- MakeCodeWritable();
WriteToCode();
}
AssertCodeEventuallyProtected();
@@ -267,8 +234,7 @@ std::string PrintMemoryProtectionAndSignalHandlingTestParam(
INSTANTIATE_TEST_SUITE_P(
MemoryProtection, ParameterizedMemoryProtectionTestWithSignalHandling,
- ::testing::Combine(::testing::Values(kNoProtection, kPku, kMprotect,
- kPkuWithMprotectFallback),
+ ::testing::Combine(::testing::Values(kNoProtection, kPku),
::testing::Bool(), ::testing::Bool()),
PrintMemoryProtectionAndSignalHandlingTestParam);
@@ -306,16 +272,12 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) {
// second parameter, and not a matcher as {ASSERT_DEATH}.
#if GTEST_HAS_DEATH_TEST
ASSERT_DEATH(
- // The signal handler should crash, but it might "accidentally"
- // succeed if tier-up is running in the background and using mprotect
- // to unprotect the code for the whole process. In that case we
- // repeatedly send the signal until we crash.
- do {
+ {
base::Optional<CodeSpaceWriteScope> write_scope;
if (open_write_scope) write_scope.emplace(native_module());
pthread_kill(pthread_self(), SIGPROF);
base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
- } while (uses_mprotect()), // Only loop for mprotect.
+ },
// Check that the subprocess tried to write, but did not succeed.
::testing::AnyOf(
// non-sanitizer builds:
@@ -343,6 +305,4 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) {
}
#endif // V8_OS_POSIX && !V8_OS_FUCHSIA
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm
diff --git a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
index 1510e6d4ec..53bb240d64 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
@@ -31,12 +31,8 @@ class Memory64DecodingTest : public TestWithIsolateAndZone {
module_bytes.insert(module_bytes.end(), module_body_bytes);
static constexpr WasmFeatures kEnabledFeatures{
WasmFeature::kFeature_memory64};
- return DecodeWasmModule(
- kEnabledFeatures, module_bytes.data(),
- module_bytes.data() + module_bytes.size(), false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(), DecodingMethod::kSync,
- wasm::GetWasmEngine()->allocator());
+ return DecodeWasmModule(kEnabledFeatures, base::VectorOf(module_bytes),
+ false, kWasmOrigin);
}
};
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 1a1aa8fc42..9b1d676121 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -116,27 +116,27 @@ namespace module_decoder_unittest {
kWasmArrayTypeCode, type, (mutability ? 1 : 0)
#define WASM_FUNCTION_DEF(...) kWasmFunctionTypeCode, __VA_ARGS__
-#define EXPECT_VERIFIES(data) \
- do { \
- ModuleResult _result = DecodeModule(data, data + sizeof(data)); \
- EXPECT_OK(_result); \
+#define EXPECT_VERIFIES(data) \
+ do { \
+ ModuleResult _result = DecodeModule(base::ArrayVector(data)); \
+ EXPECT_OK(_result); \
} while (false)
-#define EXPECT_FAILURE_LEN(data, length) \
- do { \
- ModuleResult _result = DecodeModule(data, data + length); \
- EXPECT_FALSE(_result.ok()); \
+#define EXPECT_FAILURE_LEN(data, length) \
+ do { \
+ ModuleResult _result = DecodeModule(base::VectorOf(data, length)); \
+ EXPECT_FALSE(_result.ok()); \
} while (false)
#define EXPECT_FAILURE(data) EXPECT_FAILURE_LEN(data, sizeof(data))
-#define EXPECT_FAILURE_WITH_MSG(data, msg) \
- do { \
- ModuleResult _result = DecodeModule(data, data + sizeof(data)); \
- EXPECT_FALSE(_result.ok()); \
- if (!_result.ok()) { \
- EXPECT_THAT(_result.error().message(), HasSubstr(msg)); \
- } \
+#define EXPECT_FAILURE_WITH_MSG(data, msg) \
+ do { \
+ ModuleResult _result = DecodeModule(base::ArrayVector(data)); \
+ EXPECT_FALSE(_result.ok()); \
+ if (!_result.ok()) { \
+ EXPECT_THAT(_result.error().message(), HasSubstr(msg)); \
+ } \
} while (false)
#define EXPECT_OFF_END_FAILURE(data, min) \
@@ -200,38 +200,30 @@ class WasmModuleVerifyTest : public TestWithIsolateAndZone {
public:
WasmFeatures enabled_features_ = WasmFeatures::None();
- ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
+ ModuleResult DecodeModule(base::Vector<const uint8_t> module_bytes) {
// Add the wasm magic and version number automatically.
- size_t size = static_cast<size_t>(module_end - module_start);
+ size_t size = module_bytes.size();
byte header[] = {WASM_MODULE_HEADER};
size_t total = sizeof(header) + size;
auto temp = new byte[total];
memcpy(temp, header, sizeof(header));
if (size > 0) {
- memcpy(temp + sizeof(header), module_start, size);
+ memcpy(temp + sizeof(header), module_bytes.begin(), size);
}
ModuleResult result = DecodeWasmModule(
- enabled_features_, temp, temp + total, false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(), DecodingMethod::kSync,
- GetWasmEngine()->allocator());
+ enabled_features_, base::VectorOf(temp, total), false, kWasmOrigin);
delete[] temp;
return result;
}
- ModuleResult DecodeModuleNoHeader(const byte* module_start,
- const byte* module_end) {
- return DecodeWasmModule(
- enabled_features_, module_start, module_end, false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(), DecodingMethod::kSync,
- GetWasmEngine()->allocator());
+ ModuleResult DecodeModuleNoHeader(base::Vector<const uint8_t> bytes) {
+ return DecodeWasmModule(enabled_features_, bytes, false, kWasmOrigin);
}
};
TEST_F(WasmModuleVerifyTest, WrongMagic) {
for (uint32_t x = 1; x; x <<= 1) {
const byte data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_FALSE(result.ok());
}
}
@@ -239,7 +231,7 @@ TEST_F(WasmModuleVerifyTest, WrongMagic) {
TEST_F(WasmModuleVerifyTest, WrongVersion) {
for (uint32_t x = 1; x; x <<= 1) {
const byte data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_FALSE(result.ok());
}
}
@@ -247,12 +239,12 @@ TEST_F(WasmModuleVerifyTest, WrongVersion) {
TEST_F(WasmModuleVerifyTest, WrongSection) {
constexpr byte kInvalidSection = 0x1c;
const byte data[] = {kInvalidSection, 0};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_FALSE(result.ok());
}
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
- ModuleResult result = DecodeModule(nullptr, nullptr);
+ ModuleResult result = DecodeModule(base::VectorOf<uint8_t>(nullptr, 0));
EXPECT_TRUE(result.ok());
}
@@ -267,7 +259,7 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
{
// Should decode to exactly one global.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -292,7 +284,7 @@ TEST_F(WasmModuleVerifyTest, S128Global) {
kS128Code, // memory type
0, // immutable
WASM_SIMD_CONSTANT(v.data()), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
if (!CheckHardwareSupportsSimd()) {
EXPECT_NOT_OK(result, "Wasm SIMD unsupported");
} else {
@@ -332,7 +324,7 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
{
// Should decode to two globals.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(2u, result.value()->functions.size());
@@ -375,7 +367,7 @@ TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
TWO_EMPTY_BODIES};
{
// Should decode to two globals.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(2u, result.value()->functions.size());
@@ -424,7 +416,7 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobalWithGlobalInit) {
{
// Should decode to exactly one global.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -455,7 +447,7 @@ TEST_F(WasmModuleVerifyTest, NullGlobalWithGlobalInit) {
{
// Should decode to exactly one global.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
std::cout << result.error().message() << std::endl;
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
@@ -654,7 +646,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
static const byte data[] = {SECTION(Global, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -724,7 +716,7 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
buffer.insert(buffer.end(), data, data + sizeof(data));
}
- ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
+ ModuleResult result = DecodeModule(base::VectorOf(buffer));
EXPECT_OK(result);
}
}
@@ -741,7 +733,7 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
{
// Should decode to exactly two globals.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -766,7 +758,7 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
TEST_F(WasmModuleVerifyTest, RefNullGlobal) {
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kFuncRefCode, 1,
WASM_REF_NULL(kFuncRefCode), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -774,7 +766,7 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid1) {
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kRefNullCode, 0,
1, WASM_REF_NULL(0), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "Type index 0 is out of bounds");
}
@@ -782,7 +774,7 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid2) {
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kFuncRefCode, 1,
kExprRefNull, U32V_5(1000001), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result,
"Type index 1000001 is greater than the maximum number 1000000 "
"of type definitions supported by V8");
@@ -1066,17 +1058,68 @@ TEST_F(WasmModuleVerifyTest, InvalidSupertypeInRecGroup) {
static const byte invalid_supertype[] = {
SECTION(Type, ENTRY_COUNT(1), // --
kWasmRecursiveTypeGroupCode, ENTRY_COUNT(2), // --
- kWasmArrayTypeCode, kI32Code, 0, // --
- kWasmSubtypeCode, 1, 0, // supertype count, supertype
+ kWasmSubtypeCode, 0, // 0 supertypes, non-final
+ kWasmArrayTypeCode, kI32Code, 0, // --
+ kWasmSubtypeCode, 1, 0, // supertype count, supertype
kWasmArrayTypeCode, kI64Code, 0)};
EXPECT_FAILURE_WITH_MSG(invalid_supertype,
"type 1 has invalid explicit supertype 0");
}
+// Tests supertype declaration with 0 supertypes.
+TEST_F(WasmModuleVerifyTest, SuperTypeDeclarationWith0Supertypes) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte zero_supertypes[] = {
+ SECTION(Type, ENTRY_COUNT(1), // --
+ kWasmSubtypeCode, 0, // supertype count
+ kWasmArrayTypeCode, kI32Code, 0)};
+
+ EXPECT_VERIFIES(zero_supertypes);
+}
+
+TEST_F(WasmModuleVerifyTest, NoSupertypeSupertype) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte no_supertype[] = {
+ SECTION(Type, ENTRY_COUNT(1), // --
+ kWasmSubtypeCode, 1, // supertype count
+ 0xff, 0xff, 0xff, 0xff, 0x0f, // supertype = "kNoSuperType"
+ kWasmArrayTypeCode, kI32Code, 0)};
+
+ EXPECT_FAILURE_WITH_MSG(
+ no_supertype, "is greater than the maximum number of type definitions");
+}
+
+TEST_F(WasmModuleVerifyTest, NonSpecifiedFinalType) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(wasm_final_types);
+ static const byte final_supertype[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmStructTypeCode, 1, kI32Code, 1, // --
+ kWasmSubtypeCode, 1, 0, // --
+ kWasmStructTypeCode, 2, kI32Code, 1, kI32Code, 1)};
+ EXPECT_FAILURE_WITH_MSG(final_supertype, "type 1 extends final type 0");
+}
+
+TEST_F(WasmModuleVerifyTest, SpecifiedFinalType) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(wasm_final_types);
+ static const byte final_supertype[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmSubtypeFinalCode, 0, // --
+ kWasmStructTypeCode, 1, kI32Code, 1, // --
+ kWasmSubtypeCode, 1, 0, // --
+ kWasmStructTypeCode, 2, kI32Code, 1, kI32Code, 1)};
+ EXPECT_FAILURE_WITH_MSG(final_supertype, "type 1 extends final type 0");
+}
+
TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(0u, result.value()->tags.size());
}
@@ -1086,7 +1129,7 @@ TEST_F(WasmModuleVerifyTest, OneI32Exception) {
SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_x(kI32Code)), // sig#0 (i32)
SECTION(Tag, ENTRY_COUNT(1),
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->tags.size());
@@ -1103,7 +1146,7 @@ TEST_F(WasmModuleVerifyTest, TwoExceptions) {
SECTION(Tag, ENTRY_COUNT(2),
EXCEPTION_ENTRY(SIG_INDEX(1)), // except[0] (sig#1)
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[1] (sig#0)
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->tags.size());
const WasmTag& e0 = result.value()->tags.front();
@@ -1121,8 +1164,8 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_index) {
EXCEPTION_ENTRY(
SIG_INDEX(23)))}; // except[0] (sig#23 [out-of-bounds])
// Should fail decoding exception section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "signature index 23 out of bounds");
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "no signature at index 23 (1 signatures)");
}
TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) {
@@ -1132,7 +1175,7 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) {
EXCEPTION_ENTRY(
SIG_INDEX(0)))}; // except[0] (sig#0 [invalid-return-type])
// Should fail decoding exception section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "tag signature 0 has non-void return");
}
@@ -1142,7 +1185,7 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_attribute) {
SECTION(Tag, ENTRY_COUNT(1), 23,
SIG_INDEX(0))}; // except[0] (sig#0) [invalid-attribute]
// Should fail decoding exception section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "exception attribute 23 not supported");
}
@@ -1150,14 +1193,14 @@ TEST_F(WasmModuleVerifyTest, TagSectionCorrectPlacement) {
static const byte data[] = {SECTION(Memory, ENTRY_COUNT(0)),
SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Global, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
TEST_F(WasmModuleVerifyTest, TagSectionAfterGlobal) {
static const byte data[] = {SECTION(Global, ENTRY_COUNT(0)),
SECTION(Tag, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result,
"The Tag section must appear before the Global section");
}
@@ -1165,7 +1208,7 @@ TEST_F(WasmModuleVerifyTest, TagSectionAfterGlobal) {
TEST_F(WasmModuleVerifyTest, TagSectionBeforeMemory) {
static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Memory, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
@@ -1174,7 +1217,7 @@ TEST_F(WasmModuleVerifyTest, TagSectionAfterTableBeforeMemory) {
static const byte data[] = {SECTION(Table, ENTRY_COUNT(0)),
SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Memory, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
@@ -1187,7 +1230,7 @@ TEST_F(WasmModuleVerifyTest, TagImport) {
ADD_COUNT('e', 'x'), // tag name
kExternalTag, // import kind
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->tags.size());
EXPECT_EQ(1u, result.value()->import_table.size());
@@ -1202,7 +1245,7 @@ TEST_F(WasmModuleVerifyTest, ExceptionExport) {
NO_NAME, // --
kExternalTag, // --
EXCEPTION_INDEX(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->tags.size());
EXPECT_EQ(1u, result.value()->export_table.size());
@@ -1229,7 +1272,7 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
SIG_ENTRY_x_xx(kI32Code, kF64Code, kF64Code)), // f64,f64 -> i32
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(3u, result.value()->types.size());
if (result.value()->types.size() == 3) {
@@ -1261,7 +1304,7 @@ TEST_F(WasmModuleVerifyTest, CanonicalTypeIds) {
WASM_ARRAY_DEF(kI32Code, true)) // Array definition
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
const WasmModule* module = result.value().get();
@@ -1296,7 +1339,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
U32V_1(3), // source size
'a', 'b', 'c') // data bytes
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -1346,7 +1389,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
{
EXPECT_VERIFIES(data);
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(0u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -1380,7 +1423,7 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
};
{
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(0u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -1468,7 +1511,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
// code ----------------------------------------------------------------
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(1u, result.value()->types.size());
@@ -1555,7 +1598,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
// code ----------------------------------------------------------------
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->types.size());
EXPECT_EQ(1u, result.value()->functions.size());
@@ -1583,7 +1626,7 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
FUNC_INDEX(2), FUNC_INDEX(3))),
FOUR_EMPTY_BODIES};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->types.size());
EXPECT_EQ(4u, result.value()->functions.size());
@@ -1987,7 +2030,7 @@ TEST_F(WasmModuleVerifyTest, MultipleTables) {
11), // table 2: minimum size
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->tables.size());
@@ -2009,7 +2052,7 @@ TEST_F(WasmModuleVerifyTest, TypedFunctionTable) {
kRefNullCode, 0, // table 0: type
0, 10)}; // table 0: limits
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(ValueType::RefNull(0), result.value()->tables[0].type);
}
@@ -2052,7 +2095,7 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
// Add table limits
data.insert(data.end(), {byte{0}, byte{10}});
- auto result = DecodeModule(data.data(), data.data() + data.size());
+ auto result = DecodeModule(base::VectorOf(data));
EXPECT_NOT_OK(result, "Only reference types can be used as table types");
}
}
@@ -2070,7 +2113,7 @@ TEST_F(WasmModuleVerifyTest, TableWithInitializer) {
0, 10, // table 0: limits
kExprRefFunc, 0, kExprEnd), // table 0: initial value
SECTION(Code, ENTRY_COUNT(1), NOP_BODY)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(ValueType::RefNull(0), result.value()->tables[0].type);
}
@@ -2088,7 +2131,7 @@ TEST_F(WasmModuleVerifyTest, NonNullableTable) {
0, 10, // table 0: limits
kExprRefFunc, 0, kExprEnd), // table 0: initial value
SECTION(Code, ENTRY_COUNT(1), NOP_BODY)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(ValueType::Ref(0), result.value()->tables[0].type);
}
@@ -2120,7 +2163,7 @@ TEST_F(WasmModuleVerifyTest, TieringCompilationHints) {
SECTION(Code, ENTRY_COUNT(3), NOP_BODY, NOP_BODY, NOP_BODY),
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(3u, result.value()->compilation_hints.size());
@@ -2158,7 +2201,7 @@ TEST_F(WasmModuleVerifyTest, BranchHinting) {
ADD_COUNT(0, /*no locals*/
WASM_BLOCK(WASM_BR_IF(0, WASM_I32V_1(1))), WASM_END))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->branch_hints.size());
@@ -2172,18 +2215,18 @@ class WasmSignatureDecodeTest : public TestWithZone {
public:
WasmFeatures enabled_features_ = WasmFeatures::None();
- const FunctionSig* DecodeSig(const byte* start, const byte* end) {
+ const FunctionSig* DecodeSig(base::Vector<const uint8_t> bytes) {
Result<const FunctionSig*> res =
- DecodeWasmSignatureForTesting(enabled_features_, zone(), start, end);
+ DecodeWasmSignatureForTesting(enabled_features_, zone(), bytes);
EXPECT_TRUE(res.ok()) << res.error().message() << " at offset "
<< res.error().offset();
return res.ok() ? res.value() : nullptr;
}
- V8_NODISCARD testing::AssertionResult DecodeSigError(const byte* start,
- const byte* end) {
+ V8_NODISCARD testing::AssertionResult DecodeSigError(
+ base::Vector<const uint8_t> bytes) {
Result<const FunctionSig*> res =
- DecodeWasmSignatureForTesting(enabled_features_, zone(), start, end);
+ DecodeWasmSignatureForTesting(enabled_features_, zone(), bytes);
if (res.ok()) {
return testing::AssertionFailure() << "unexpected valid signature";
}
@@ -2195,7 +2238,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
static const byte data[] = {SIG_ENTRY_v_v};
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
ASSERT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
@@ -2209,7 +2252,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair ret_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_x(ret_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Return type " + ret_type.type.name());
ASSERT_TRUE(sig != nullptr);
@@ -2226,7 +2269,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair param_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Param type " + param_type.type.name());
ASSERT_TRUE(sig != nullptr);
@@ -2245,7 +2288,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueTypePair param_type = kValueTypes[j];
const byte data[] = {SIG_ENTRY_x_x(ret_type.code, param_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Param type " + param_type.type.name());
ASSERT_TRUE(sig != nullptr);
@@ -2267,7 +2310,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
ValueTypePair p1_type = kValueTypes[j];
const byte data[] = {
SIG_ENTRY_x_xx(kI32Code, p0_type.code, p1_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Signature i32(" + p0_type.type.name() + ", " +
p1_type.type.name() + ")");
@@ -2290,7 +2333,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_tt_tt) {
ValueTypePair p1_type = kValueTypes[j];
const byte data[] = {SIG_ENTRY_xx_xx(p0_type.code, p1_type.code,
p0_type.code, p1_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("p0 = " + p0_type.type.name() +
", p1 = " + p1_type.type.name());
@@ -2309,10 +2352,10 @@ TEST_F(WasmSignatureDecodeTest, Simd) {
WASM_FEATURE_SCOPE(simd);
const byte data[] = {SIG_ENTRY_x(kS128Code)};
if (!CheckHardwareSupportsSimd()) {
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)))
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)))
<< "Type S128 should not be allowed on this hardware";
} else {
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
ASSERT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
EXPECT_EQ(1u, sig->return_count());
@@ -2324,14 +2367,14 @@ TEST_F(WasmSignatureDecodeTest, TooManyParams) {
static const byte data[] = {kWasmFunctionTypeCode,
WASM_I32V_3(kV8MaxWasmFunctionParams + 1),
kI32Code, 0};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
TEST_F(WasmSignatureDecodeTest, TooManyReturns) {
for (int i = 0; i < 2; i++) {
byte data[] = {kWasmFunctionTypeCode, 0,
WASM_I32V_3(kV8MaxWasmFunctionReturns + 1), kI32Code};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
}
@@ -2343,7 +2386,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
for (int i = 0; i < p + 1; i++) {
// Should fall off the end for all signatures.
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
}
}
@@ -2354,35 +2397,32 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte data[] = {SIG_ENTRY_x_xx(kI32Code, kI32Code, kI32Code)};
if (i >= arraysize(data)) break;
data[i] = kInvalidType;
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_ret_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kVoidCode, kI32Code)};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kI32Code, kVoidCode)};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
static const byte data[] = {SIG_ENTRY_x_xx(kI32Code, kI32Code, kVoidCode)};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
class WasmFunctionVerifyTest : public TestWithIsolateAndZone {
public:
- FunctionResult DecodeWasmFunction(const ModuleWireBytes& wire_bytes,
- const WasmModule* module,
- const byte* function_start,
- const byte* function_end) {
- WasmFeatures enabled_features;
- return DecodeWasmFunctionForTesting(enabled_features, zone(), wire_bytes,
- module, function_start, function_end,
- isolate()->counters());
+ FunctionResult DecodeWasmFunction(
+ ModuleWireBytes wire_bytes, const WasmModule* module,
+ base::Vector<const uint8_t> function_bytes) {
+ return DecodeWasmFunctionForTesting(WasmFeatures::All(), zone(), wire_bytes,
+ module, function_bytes);
}
};
@@ -2402,8 +2442,8 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
};
WasmModule module;
- FunctionResult result = DecodeWasmFunction(ModuleWireBytes({}), &module, data,
- data + sizeof(data));
+ FunctionResult result =
+ DecodeWasmFunction(ModuleWireBytes({}), &module, base::ArrayVector(data));
EXPECT_OK(result);
if (result.value() && result.ok()) {
@@ -2521,7 +2561,7 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
0, // exported
WASM_INIT_EXPR_I32V_1(33)), // init
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->globals.size());
@@ -2658,7 +2698,7 @@ TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
SECTION(Export, ENTRY_COUNT(0)), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->functions.size());
@@ -2686,7 +2726,7 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne) {
kExternalFunction, // --
FUNC_INDEX(0)), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->functions.size());
@@ -2723,7 +2763,7 @@ TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
FUNC_INDEX(0)), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->functions.size());
@@ -2746,7 +2786,7 @@ TEST_F(WasmModuleVerifyTest, ExportTableThree) {
kExternalFunction,
FUNC_INDEX(2)), // --
THREE_EMPTY_BODIES};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(3u, result.value()->functions.size());
@@ -2824,7 +2864,7 @@ TEST_F(WasmModuleVerifyTest, FunctionBodySizeLimit) {
size_t total = sizeof(data) + body_size;
byte* buffer = reinterpret_cast<byte*>(calloc(1, total));
memcpy(buffer, data, sizeof(data));
- ModuleResult result = DecodeModule(buffer, buffer + total);
+ ModuleResult result = DecodeModule(base::VectorOf(buffer, total));
if (body_size <= kV8MaxWasmFunctionSize) {
EXPECT_TRUE(result.ok());
} else {
@@ -2940,13 +2980,13 @@ TEST_F(WasmModuleVerifyTest, FunctionSectionWithoutCodeSection) {
TYPE_SECTION(1, SIG_ENTRY_v_v), // Type section.
FUNCTION_SECTION(1, 0), // Function section.
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "function count is 1, but code section is absent");
}
TEST_F(WasmModuleVerifyTest, CodeSectionWithoutFunctionSection) {
static const byte data[] = {ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "function body count 1 mismatch (0 expected)");
}
@@ -2988,10 +3028,10 @@ TEST_F(WasmModuleVerifyTest, Section_Name_No_UTF8) {
class WasmModuleCustomSectionTest : public TestWithIsolateAndZone {
public:
- void CheckSections(const byte* module_start, const byte* module_end,
+ void CheckSections(base::Vector<const uint8_t> wire_bytes,
const CustomSectionOffset* expected, size_t num_expected) {
std::vector<CustomSectionOffset> custom_sections =
- DecodeCustomSections(module_start, module_end);
+ DecodeCustomSections(wire_bytes);
CHECK_EQ(num_expected, custom_sections.size());
@@ -3026,7 +3066,7 @@ TEST_F(WasmModuleCustomSectionTest, ThreeUnknownSections) {
{{27, 8}, {28, 5}, {33, 2}}, // --
};
- CheckSections(data, data + sizeof(data), expected, arraysize(expected));
+ CheckSections(base::ArrayVector(data), expected, arraysize(expected));
}
TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
@@ -3045,18 +3085,18 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
{{29, 8}, {30, 5}, {35, 2}}, // --
};
- CheckSections(data, data + sizeof(data), expected, arraysize(expected));
+ CheckSections(base::ArrayVector(data), expected, arraysize(expected));
}
TEST_F(WasmModuleVerifyTest, SourceMappingURLSection) {
static const byte data[] = {
WASM_MODULE_HEADER,
SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 'y', 'z', '.', 'c')};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(WasmDebugSymbols::Type::SourceMap,
result.value()->debug_symbols.type);
- ModuleWireBytes wire_bytes(data, data + sizeof(data));
+ ModuleWireBytes wire_bytes(base::ArrayVector(data));
WasmName external_url =
wire_bytes.GetNameOrNull(result.value()->debug_symbols.external_url);
EXPECT_EQ("src/xyz.c", std::string(external_url.data(), external_url.size()));
@@ -3066,7 +3106,7 @@ TEST_F(WasmModuleVerifyTest, BadSourceMappingURLSection) {
static const byte data[] = {
WASM_MODULE_HEADER,
SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c')};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(WasmDebugSymbols::Type::None, result.value()->debug_symbols.type);
EXPECT_EQ(0u, result.value()->debug_symbols.external_url.length());
@@ -3076,11 +3116,11 @@ TEST_F(WasmModuleVerifyTest, MultipleSourceMappingURLSections) {
static const byte data[] = {WASM_MODULE_HEADER,
SECTION_SRC_MAP('a', 'b', 'c'),
SECTION_SRC_MAP('p', 'q', 'r')};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(WasmDebugSymbols::Type::SourceMap,
result.value()->debug_symbols.type);
- ModuleWireBytes wire_bytes(data, data + sizeof(data));
+ ModuleWireBytes wire_bytes(base::ArrayVector(data));
WasmName external_url =
wire_bytes.GetNameOrNull(result.value()->debug_symbols.external_url);
EXPECT_EQ("abc", std::string(external_url.data(), external_url.size()));
@@ -3090,7 +3130,7 @@ TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
static const byte data[] = {
SECTION_NAMES(0, ADD_COUNT(ADD_COUNT('a', 'b', 'c'))),
SECTION_NAMES(0, ADD_COUNT(ADD_COUNT('p', 'q', 'r', 's')))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(3u, result.value()->name.length());
}
@@ -3098,7 +3138,7 @@ TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
TEST_F(WasmModuleVerifyTest, BadNameSection) {
static const byte data[] = {SECTION_NAMES(
0, ADD_COUNT(ADD_COUNT('s', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c')))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(0u, result.value()->name.length());
}
@@ -3229,7 +3269,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionCorrectPlacement) {
TEST_F(WasmModuleVerifyTest, DataCountSectionAfterCode) {
static const byte data[] = {SECTION(Code, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result,
"The DataCount section must appear before the Code section");
}
@@ -3237,7 +3277,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterCode) {
TEST_F(WasmModuleVerifyTest, DataCountSectionBeforeElement) {
static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
SECTION(Element, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Element>");
}
@@ -3253,14 +3293,14 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterStartBeforeElement) {
SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
SECTION(Element, ENTRY_COUNT(0)) // Element section.
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Element>");
}
TEST_F(WasmModuleVerifyTest, MultipleDataCountSections) {
static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "Multiple DataCount sections not allowed");
}
@@ -3279,7 +3319,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_greater) {
SECTION(Memory, ENTRY_COUNT(1), 0, 1), // Memory section.
SECTION(DataCount, ENTRY_COUNT(3)), // DataCount section.
SECTION(Data, ENTRY_COUNT(0))}; // Data section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "data segments count 0 mismatch (3 expected)");
}
@@ -3289,14 +3329,14 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_less) {
SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0, // Data section.
WASM_INIT_EXPR_I32V_1(12), ADD_COUNT('a', 'b', 'c'))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "data segments count 1 mismatch (0 expected)");
}
TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
static const byte data[] = {SECTION(Memory, ENTRY_COUNT(1), 0, 1),
SECTION(DataCount, ENTRY_COUNT(1))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "data segments count 0 mismatch (1 expected)");
}
@@ -3313,7 +3353,7 @@ TEST_F(WasmModuleVerifyTest, GcStructIdsPass) {
WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(WASM_OPT_REF(0), true),
STRUCT_FIELD(WASM_OPT_REF(2), true)),
WASM_ARRAY_DEF(WASM_OPT_REF(0), true))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -3321,28 +3361,64 @@ TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInGlobal) {
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {
SECTION(Global, ENTRY_COUNT(1), kRefCode, 0, WASM_REF_NULL(0), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "Type index 0 is out of bounds");
}
TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInType) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
+ static const byte data[] = {SECTION(
+ Type, ENTRY_COUNT(1),
+ WASM_STRUCT_DEF(FIELD_COUNT(1),
+ STRUCT_FIELD(WASM_REF_TYPE(ValueType::Ref(1)), true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "Type index 1 is out of bounds");
+}
+
+TEST_F(WasmModuleVerifyTest, RecursiveTypeOutsideRecursiveGroup) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kRefCode, true)))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "Type index 1 is out of bounds");
+ WASM_STRUCT_DEF(
+ FIELD_COUNT(1),
+ STRUCT_FIELD(WASM_REF_TYPE(ValueType::RefNull(0)), true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_OK(result);
+}
+
+TEST_F(WasmModuleVerifyTest, OutOfBoundsSupertype) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1),
+ kWasmSubtypeCode, ENTRY_COUNT(1), 1,
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "type 0: supertype 1 out of bounds");
}
-TEST_F(WasmModuleVerifyTest, ForwardSupertype) {
+TEST_F(WasmModuleVerifyTest, ForwardSupertypeSameType) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1), kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1),
kWasmSubtypeCode, ENTRY_COUNT(1), 0,
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kRefCode, true)))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "type 0: forward-declared supertype 0");
+}
+
+TEST_F(WasmModuleVerifyTest, ForwardSupertypeSameRecGroup) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), kWasmRecursiveTypeGroupCode, ENTRY_COUNT(2),
+ kWasmSubtypeCode, ENTRY_COUNT(1), 0,
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "type 0: forward-declared supertype 0");
}
@@ -3353,7 +3429,7 @@ TEST_F(WasmModuleVerifyTest, IllegalPackedFields) {
static const byte data[] = {
SECTION(Global, ENTRY_COUNT(1), kI16Code, 0, WASM_INIT_EXPR_I32V_1(13))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "invalid value type");
}
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index dfeb4739e2..857520f364 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -20,10 +20,10 @@ namespace wasm {
struct MockStreamingResult {
size_t num_sections = 0;
size_t num_functions = 0;
- WasmError error;
- base::OwnedVector<uint8_t> received_bytes;
+ bool error;
+ base::OwnedVector<const uint8_t> received_bytes;
- bool ok() const { return !error.has_error(); }
+ bool ok() const { return !error; }
MockStreamingResult() = default;
};
@@ -42,13 +42,13 @@ class MockStreamingProcessor : public StreamingProcessor {
bool ProcessModuleHeader(base::Vector<const uint8_t> bytes,
uint32_t offset) override {
Decoder decoder(bytes.begin(), bytes.end());
- NoTracer no_tracer;
- uint32_t magic_word = decoder.consume_u32("wasm magic", no_tracer);
+ uint32_t magic_word = decoder.consume_u32("wasm magic", ITracer::NoTrace);
if (decoder.failed() || magic_word != kWasmMagic) {
result_->error = WasmError(0, "expected wasm magic");
return false;
}
- uint32_t magic_version = decoder.consume_u32("wasm version", no_tracer);
+ uint32_t magic_version =
+ decoder.consume_u32("wasm version", ITracer::NoTrace);
if (decoder.failed() || magic_version != kWasmVersion) {
result_->error = WasmError(4, "expected wasm version");
return false;
@@ -72,22 +72,19 @@ class MockStreamingProcessor : public StreamingProcessor {
}
// Process a function body.
- void ProcessFunctionBody(base::Vector<const uint8_t> bytes,
+ bool ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t offset) override {
++result_->num_functions;
+ return true;
}
void OnFinishedChunk() override {}
// Finish the processing of the stream.
- void OnFinishedStream(base::OwnedVector<uint8_t> bytes) override {
+ void OnFinishedStream(base::OwnedVector<const uint8_t> bytes,
+ bool after_error) override {
result_->received_bytes = std::move(bytes);
- }
-
- // Report an error detected in the StreamingDecoder.
- void OnError(const WasmError& error) override {
- result_->error = error;
- CHECK(!result_->ok());
+ result_->error = after_error;
}
void OnAbort() override {}
@@ -119,8 +116,7 @@ class WasmStreamingDecoderTest : public ::testing::Test {
}
}
- void ExpectFailure(base::Vector<const uint8_t> data, uint32_t error_offset,
- const char* message) {
+ void ExpectFailure(base::Vector<const uint8_t> data) {
for (int split = 0; split <= data.length(); ++split) {
MockStreamingResult result;
auto stream = StreamingDecoder::CreateAsyncStreamingDecoder(
@@ -129,8 +125,7 @@ class WasmStreamingDecoderTest : public ::testing::Test {
stream->OnBytesReceived(data.SubVector(split, data.length()));
stream->Finish();
EXPECT_FALSE(result.ok());
- EXPECT_EQ(error_offset, result.error.offset());
- EXPECT_EQ(message, result.error.message());
+ EXPECT_TRUE(result.error);
}
}
};
@@ -154,8 +149,7 @@ TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
EXPECT_FALSE(result.ok());
}
for (uint32_t length = 1; length < sizeof(data); ++length) {
- ExpectFailure(base::VectorOf(data, length), length - 1,
- "unexpected end of stream");
+ ExpectFailure(base::VectorOf(data, length));
}
}
@@ -167,14 +161,14 @@ TEST_F(WasmStreamingDecoderTest, MagicAndVersion) {
TEST_F(WasmStreamingDecoderTest, BadMagic) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
- ExpectFailure(base::ArrayVector(data), 0, "expected wasm magic");
+ ExpectFailure(base::ArrayVector(data));
}
}
TEST_F(WasmStreamingDecoderTest, BadVersion) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
- ExpectFailure(base::ArrayVector(data), 4, "expected wasm version");
+ ExpectFailure(base::ArrayVector(data));
}
}
@@ -261,8 +255,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload1) {
0x0, // 4
0x0 // 5
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "unexpected end of stream");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
@@ -273,8 +266,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
0x6, // Section Length
0x0 // Payload
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "unexpected end of stream");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
@@ -288,8 +280,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
0x80, // --
0x80, // --
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "expected section length");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoLongSections) {
@@ -404,8 +395,7 @@ TEST_F(WasmStreamingDecoderTest, EmptyFunction) {
0x1, // Number of Functions
0x0, // Function Length -- ERROR
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "invalid function length (0)");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoFunctions) {
@@ -462,8 +452,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthZero) {
kCodeSectionCode, // Section ID
0x0, // Section Length
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "code section cannot have size 0");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
@@ -484,8 +473,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "not all code section bytes were used");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
@@ -496,8 +484,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
0xD, // Section Length
0x0, // Number of Functions
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "not all code section bytes were used");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
@@ -518,8 +505,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
0x1, // Function Length <8> -- ERROR
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 2,
- "read past code section end");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
@@ -542,7 +528,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), 12, "invalid code section length");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
@@ -567,7 +553,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), 15, "read past code section end");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
@@ -588,8 +574,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "unexpected end of stream");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
@@ -607,8 +592,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
0x1, // Function Length
0x0 // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 3,
- "not all code section bytes were used");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
@@ -626,8 +610,7 @@ TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 5,
- "code section can only appear once");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, UnknownSection) {
@@ -668,14 +651,13 @@ TEST_F(WasmStreamingDecoderTest, UnknownSectionSandwich) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 5,
- "code section can only appear once");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, InvalidSectionCode) {
uint8_t kInvalidSectionCode = 61;
const uint8_t data[] = {WASM_MODULE_HEADER, SECTION(Invalid)};
- ExpectFailure(base::ArrayVector(data), 8, "invalid section code");
+ ExpectFailure(base::ArrayVector(data));
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/struct-types-unittest.cc b/deps/v8/test/unittests/wasm/struct-types-unittest.cc
new file mode 100644
index 0000000000..3ddf8b84f7
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/struct-types-unittest.cc
@@ -0,0 +1,70 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/struct-types.h"
+
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8::internal::wasm {
+namespace struct_types_unittest {
+
+class StructTypesTest : public TestWithZone {};
+
+TEST_F(StructTypesTest, Empty) {
+ StructType::Builder builder(this->zone(), 0);
+ StructType* type = builder.Build();
+ EXPECT_EQ(0u, type->total_fields_size());
+}
+
+TEST_F(StructTypesTest, OneField) {
+ StructType::Builder builder(this->zone(), 1);
+ builder.AddField(kWasmI32, true);
+ StructType* type = builder.Build();
+ uint32_t expected = std::max(kUInt32Size, kTaggedSize);
+ EXPECT_EQ(expected, type->total_fields_size());
+ EXPECT_EQ(0u, type->field_offset(0));
+}
+
+TEST_F(StructTypesTest, Packing) {
+ StructType::Builder builder(this->zone(), 5);
+ builder.AddField(kWasmI64, true);
+ builder.AddField(kWasmI8, true);
+ builder.AddField(kWasmI32, true);
+ builder.AddField(kWasmI16, true);
+ builder.AddField(kWasmI8, true);
+ StructType* type = builder.Build();
+ EXPECT_EQ(16u, type->total_fields_size());
+ EXPECT_EQ(0u, type->field_offset(0));
+ EXPECT_EQ(8u, type->field_offset(1));
+ EXPECT_EQ(12u, type->field_offset(2));
+ EXPECT_EQ(10u, type->field_offset(3));
+ EXPECT_EQ(9u, type->field_offset(4));
+}
+
+TEST_F(StructTypesTest, CopyingOffsets) {
+ StructType::Builder builder(this->zone(), 5);
+ builder.AddField(kWasmI64, true);
+ builder.AddField(kWasmI8, true);
+ builder.AddField(kWasmI32, true);
+ builder.AddField(kWasmI16, true);
+ builder.AddField(kWasmI8, true);
+ StructType* type = builder.Build();
+
+ StructType::Builder copy_builder(this->zone(), type->field_count());
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ copy_builder.AddField(type->field(i), type->mutability(i),
+ type->field_offset(i));
+ }
+ copy_builder.set_total_fields_size(type->total_fields_size());
+
+ StructType* copy = copy_builder.Build();
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ EXPECT_EQ(type->field_offset(i), copy->field_offset(i));
+ }
+ EXPECT_EQ(type->total_fields_size(), copy->total_fields_size());
+}
+
+} // namespace struct_types_unittest
+} // namespace v8::internal::wasm
diff --git a/deps/v8/test/unittests/wasm/subtyping-unittest.cc b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
index 2602be49ba..fd085d0e75 100644
--- a/deps/v8/test/unittests/wasm/subtyping-unittest.cc
+++ b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
@@ -25,25 +25,25 @@ FieldInit mut(ValueType type) { return FieldInit(type, true); }
FieldInit immut(ValueType type) { return FieldInit(type, false); }
void DefineStruct(WasmModule* module, std::initializer_list<FieldInit> fields,
- uint32_t supertype = kNoSuperType,
+ uint32_t supertype = kNoSuperType, bool is_final = false,
bool in_singleton_rec_group = true) {
- StructType::Builder builder(module->signature_zone.get(),
+ StructType::Builder builder(&module->signature_zone,
static_cast<uint32_t>(fields.size()));
for (FieldInit field : fields) {
builder.AddField(field.first, field.second);
}
- module->add_struct_type(builder.Build(), supertype);
+ module->add_struct_type(builder.Build(), supertype, is_final);
if (in_singleton_rec_group) {
GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
}
}
void DefineArray(WasmModule* module, FieldInit element_type,
- uint32_t supertype = kNoSuperType,
+ uint32_t supertype = kNoSuperType, bool is_final = false,
bool in_singleton_rec_group = true) {
- module->add_array_type(module->signature_zone->New<ArrayType>(
+ module->add_array_type(module->signature_zone.New<ArrayType>(
element_type.first, element_type.second),
- supertype);
+ supertype, is_final);
if (in_singleton_rec_group) {
GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
}
@@ -52,11 +52,11 @@ void DefineArray(WasmModule* module, FieldInit element_type,
void DefineSignature(WasmModule* module,
std::initializer_list<ValueType> params,
std::initializer_list<ValueType> returns,
- uint32_t supertype = kNoSuperType,
+ uint32_t supertype = kNoSuperType, bool is_final = false,
bool in_singleton_rec_group = true) {
module->add_signature(
- FunctionSig::Build(module->signature_zone.get(), returns, params),
- supertype);
+ FunctionSig::Build(&module->signature_zone, returns, params), supertype,
+ is_final);
if (in_singleton_rec_group) {
GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
}
@@ -64,10 +64,9 @@ void DefineSignature(WasmModule* module,
TEST_F(WasmSubtypingTest, Subtyping) {
FLAG_SCOPE(experimental_wasm_gc);
- FLAG_VALUE_SCOPE(wasm_gc_structref_as_dataref, false);
v8::internal::AccountingAllocator allocator;
- WasmModule module1_(std::make_unique<Zone>(&allocator, ZONE_NAME));
- WasmModule module2_(std::make_unique<Zone>(&allocator, ZONE_NAME));
+ WasmModule module1_;
+ WasmModule module2_;
WasmModule* module1 = &module1_;
WasmModule* module2 = &module2_;
@@ -97,34 +96,43 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Rec. group.
/* 18 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17,
false);
- /* 19 */ DefineArray(module, {mut(refNull(21))}, kNoSuperType, false);
+ /* 19 */ DefineArray(module, {mut(refNull(21))}, kNoSuperType, false,
+ false);
/* 20 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
- false);
- /* 21 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false);
+ false, false);
+ /* 21 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false, false);
GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
// Identical rec. group.
/* 22 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17,
- false);
- /* 23 */ DefineArray(module, {mut(refNull(25))}, kNoSuperType, false);
+ false, false);
+ /* 23 */ DefineArray(module, {mut(refNull(25))}, kNoSuperType, false,
+ false);
/* 24 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
- false);
- /* 25 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 24, false);
+ false, false);
+ /* 25 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 24, false, false);
GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
// Nonidentical rec. group: the last function extends a type outside the
// recursive group.
/* 26 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17,
- false);
- /* 27 */ DefineArray(module, {mut(refNull(29))}, kNoSuperType, false);
+ false, false);
+ /* 27 */ DefineArray(module, {mut(refNull(29))}, kNoSuperType, false,
+ false);
/* 28 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
- false);
- /* 29 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false);
+ false, false);
+ /* 29 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false, false);
GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
/* 30 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(18))}, 18);
/* 31 */ DefineStruct(
module, {mut(ref(2)), immut(refNull(2)), immut(kWasmS128)}, 1);
+
+ // Final types
+ /* 32 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, true);
+ /* 33 */ DefineStruct(module, {mut(kWasmI32), mut(kWasmI64)}, 32, true);
+ /* 34 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, true);
+ /* 35 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, false);
}
constexpr ValueType numeric_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
@@ -162,12 +170,13 @@ TEST_F(WasmSubtypingTest, Subtyping) {
#define DISTINCT(index1, index2) \
EXPECT_FALSE(EquivalentTypes(ValueType::RefNull(index1), \
ValueType::RefNull(index2), module1, module));
-// Union always expresses the result in terms of module1.
-#define UNION(type1, type2, type_result) \
- EXPECT_EQ(Union(type1, type2, module1, module), \
- TypeInModule(type_result, module1))
-// Intersection might return either module, so we have a version which checks
-// the module and one which deos not.
+// For union and intersection, we have a version that also checks the module,
+// and one that does not.
+#define UNION(type1, type2, type_result) \
+ EXPECT_EQ(Union(type1, type2, module1, module).type, type_result)
+#define UNION_M(type1, type2, type_result, module_result) \
+ EXPECT_EQ(Union(type1, type2, module1, module), \
+ TypeInModule(type_result, module_result))
#define INTERSECTION(type1, type2, type_result) \
EXPECT_EQ(Intersection(type1, type2, module1, module).type, type_result)
#define INTERSECTION_M(type1, type2, type_result, module_result) \
@@ -306,32 +315,38 @@ TEST_F(WasmSubtypingTest, Subtyping) {
VALID_SUBTYPE(ref(10), ref(10));
VALID_SUBTYPE(ref(11), ref(11));
- {
- // Canonicalization tests.
+ // Canonicalization tests.
- // Groups should only be canonicalized to identical groups.
- IDENTICAL(18, 22);
- IDENTICAL(19, 23);
- IDENTICAL(20, 24);
- IDENTICAL(21, 25);
+ // Groups should only be canonicalized to identical groups.
+ IDENTICAL(18, 22);
+ IDENTICAL(19, 23);
+ IDENTICAL(20, 24);
+ IDENTICAL(21, 25);
- DISTINCT(18, 26);
- DISTINCT(19, 27);
- DISTINCT(20, 28);
- DISTINCT(21, 29);
+ DISTINCT(18, 26);
+ DISTINCT(19, 27);
+ DISTINCT(20, 28);
+ DISTINCT(21, 29);
- // A type should not be canonicalized to an identical one with a different
- // group structure.
- DISTINCT(18, 17);
+ // A type should not be canonicalized to an identical one with a different
+ // group structure.
+ DISTINCT(18, 17);
- // A subtype should also be subtype of an equivalent type.
- VALID_SUBTYPE(ref(30), ref(18));
- VALID_SUBTYPE(ref(30), ref(22));
- NOT_SUBTYPE(ref(30), ref(26));
+ // A subtype should also be subtype of an equivalent type.
+ VALID_SUBTYPE(ref(30), ref(18));
+ VALID_SUBTYPE(ref(30), ref(22));
+ NOT_SUBTYPE(ref(30), ref(26));
- // Rtts of identical types are subtype-related.
- SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(17));
- }
+ // Final types
+
+ // A type is not a valid subtype of a final type.
+ NOT_VALID_SUBTYPE(ref(33), ref(32));
+ IDENTICAL(32, 34);
+ // A final and a non-final
+ DISTINCT(32, 35);
+
+ // Rtts of identical types are subtype-related.
+ SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(17));
// Unions and intersections.
@@ -395,6 +410,21 @@ TEST_F(WasmSubtypingTest, Subtyping) {
UNION(kWasmAnyRef, kWasmNullRef, kWasmAnyRef);
UNION(kWasmExternRef, kWasmNullExternRef, kWasmExternRef);
UNION(kWasmFuncRef, kWasmNullFuncRef, kWasmFuncRef);
+ UNION(kWasmFuncRef, kWasmStructRef, kWasmBottom);
+ UNION(kWasmFuncRef, kWasmArrayRef, kWasmBottom);
+ UNION(kWasmFuncRef, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmFuncRef, kWasmEqRef, kWasmBottom);
+ UNION(kWasmStringRef, kWasmAnyRef, kWasmAnyRef);
+ UNION(kWasmStringRef, kWasmStructRef, kWasmAnyRef);
+ UNION(kWasmStringRef, kWasmArrayRef, kWasmAnyRef);
+ UNION(kWasmStringRef, kWasmFuncRef, kWasmBottom);
+ UNION(kWasmStringViewIter, kWasmStringRef, kWasmBottom);
+ UNION(kWasmStringViewWtf8, kWasmStringRef, kWasmBottom);
+ UNION(kWasmStringViewWtf16, kWasmStringRef, kWasmBottom);
+ UNION(kWasmStringViewIter, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmStringViewWtf8, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmStringViewWtf16, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmNullFuncRef, kWasmEqRef, kWasmBottom);
INTERSECTION(kWasmExternRef, kWasmEqRef, kWasmBottom);
INTERSECTION(kWasmExternRef, kWasmStructRef, kWasmBottom);
@@ -443,11 +473,15 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Abstract vs indexed types.
UNION(kWasmFuncRef, function_type, kWasmFuncRef);
+ UNION(kWasmFuncRef, struct_type, kWasmBottom);
+ UNION(kWasmFuncRef, array_type, kWasmBottom);
INTERSECTION(kWasmFuncRef, struct_type, kWasmBottom);
INTERSECTION(kWasmFuncRef, array_type, kWasmBottom);
- INTERSECTION(kWasmFuncRef, function_type, function_type);
+ INTERSECTION_M(kWasmFuncRef, function_type, function_type, module);
UNION(kWasmNullFuncRef, function_type, function_type.AsNullable());
+ UNION(kWasmNullFuncRef, struct_type, kWasmBottom);
+ UNION(kWasmNullFuncRef, array_type, kWasmBottom);
INTERSECTION(kWasmNullFuncRef, struct_type, kWasmBottom);
INTERSECTION(kWasmNullFuncRef, struct_type.AsNullable(), kWasmBottom);
INTERSECTION(kWasmNullFuncRef, array_type, kWasmBottom);
@@ -464,7 +498,8 @@ TEST_F(WasmSubtypingTest, Subtyping) {
UNION(kWasmStructRef, struct_type, kWasmStructRef);
UNION(kWasmStructRef, array_type, kWasmEqRef);
- INTERSECTION(kWasmStructRef, struct_type, struct_type);
+ UNION(kWasmStructRef, function_type, kWasmBottom);
+ INTERSECTION_M(kWasmStructRef, struct_type, struct_type, module);
INTERSECTION(kWasmStructRef, array_type, kWasmBottom);
INTERSECTION(kWasmStructRef, function_type, kWasmBottom);
@@ -476,17 +511,22 @@ TEST_F(WasmSubtypingTest, Subtyping) {
UNION(kWasmArrayRef, struct_type, kWasmEqRef);
UNION(kWasmArrayRef, array_type, kWasmArrayRef);
+ UNION(kWasmArrayRef, function_type, kWasmBottom);
INTERSECTION(kWasmArrayRef, struct_type, kWasmBottom);
- INTERSECTION(kWasmArrayRef, array_type, array_type);
+ INTERSECTION_M(kWasmArrayRef, array_type, array_type, module);
INTERSECTION(kWasmArrayRef, function_type, kWasmBottom);
- UNION(kWasmNullRef, struct_type, struct_type.AsNullable());
- UNION(kWasmNullRef, array_type, array_type.AsNullable());
- UNION(kWasmNullRef, function_type, function_type.AsNullable());
+ UNION_M(kWasmNullRef, struct_type, struct_type.AsNullable(), module);
+ UNION_M(kWasmNullRef, array_type, array_type.AsNullable(), module);
+ UNION(kWasmNullRef, function_type, kWasmBottom);
INTERSECTION(kWasmNullRef, struct_type, kWasmBottom);
INTERSECTION(kWasmNullRef, array_type, kWasmBottom);
INTERSECTION(kWasmNullRef, function_type, kWasmBottom);
+ UNION(struct_type, kWasmStringRef, kWasmAnyRef);
+ UNION(array_type, kWasmStringRef, kWasmAnyRef);
+ UNION(function_type, kWasmStringRef, kWasmBottom);
+
// Indexed types of different kinds.
UNION(struct_type, array_type, kWasmEqRef.AsNonNull());
INTERSECTION(struct_type, array_type, kWasmBottom);
@@ -502,11 +542,11 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Concrete types of the same kind.
// Subtyping relation.
- UNION(refNull(4), ref(1), refNull(1));
+ UNION_M(refNull(4), ref(1), refNull(1), module1);
INTERSECTION_M(refNull(4), ref(1), ref(4), module1);
INTERSECTION_M(refNull(1), refNull(4), refNull(4), module);
// Common ancestor.
- UNION(ref(4), ref(31), ref(1));
+ UNION_M(ref(4), ref(31), ref(1), module1);
INTERSECTION(ref(4), ref(31), kWasmBottom);
// No common ancestor.
UNION(ref(6), refNull(2), kWasmArrayRef.AsNullable());
@@ -524,6 +564,7 @@ TEST_F(WasmSubtypingTest, Subtyping) {
#undef IDENTICAL
#undef DISTINCT
#undef UNION
+#undef UNION_M
#undef INTERSECTION
#undef INTERSECTION_M
}
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-arm64-unittest.cc
index 8225944e2a..d06c780c35 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-arm64-unittest.cc
@@ -33,17 +33,25 @@
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
+#if V8_TRAP_HANDLER_SUPPORTED
+
+#if V8_HOST_ARCH_ARM64 && !V8_OS_DARWIN
+#error Unsupported platform
+#endif
+
namespace v8 {
namespace internal {
namespace wasm {
namespace {
+#if V8_HOST_ARCH_X64
constexpr Register scratch = r10;
+#endif
bool g_test_handler_executed = false;
#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
struct sigaction g_old_segv_action;
-struct sigaction g_old_fpe_action;
-struct sigaction g_old_bus_action; // We get SIGBUS on Mac sometimes.
+struct sigaction g_old_other_action; // FPE or TRAP, depending on x64 or arm64.
+struct sigaction g_old_bus_action; // We get SIGBUS on Mac sometimes.
#elif V8_OS_WIN
void* g_registered_handler = nullptr;
#endif
@@ -107,8 +115,15 @@ class TrapHandlerTest : public TestWithIsolate,
EXPECT_EQ(0, sigaction(SIGSEGV, &action, &g_old_segv_action));
// SIGBUS happens for wasm oob memory accesses on macOS.
EXPECT_EQ(0, sigaction(SIGBUS, &action, &g_old_bus_action));
+#if V8_HOST_ARCH_X64
// SIGFPE to simulate crashes which are not handled by the trap handler.
- EXPECT_EQ(0, sigaction(SIGFPE, &action, &g_old_fpe_action));
+ EXPECT_EQ(0, sigaction(SIGFPE, &action, &g_old_other_action));
+#elif V8_HOST_ARCH_ARM64
+ // SIGTRAP to simulate crashes which are not handled by the trap handler.
+ EXPECT_EQ(0, sigaction(SIGTRAP, &action, &g_old_other_action));
+#else
+#error Unsupported platform
+#endif
#elif V8_OS_WIN
g_registered_handler =
AddVectoredExceptionHandler(/*first=*/0, TestHandler);
@@ -129,8 +144,14 @@ class TrapHandlerTest : public TestWithIsolate,
// The test handler cleans up the signal handler setup in the test. If the
// test handler was not called, we have to do the cleanup ourselves.
EXPECT_EQ(0, sigaction(SIGSEGV, &g_old_segv_action, nullptr));
- EXPECT_EQ(0, sigaction(SIGFPE, &g_old_fpe_action, nullptr));
EXPECT_EQ(0, sigaction(SIGBUS, &g_old_bus_action, nullptr));
+#if V8_HOST_ARCH_X64
+ EXPECT_EQ(0, sigaction(SIGFPE, &g_old_other_action, nullptr));
+#elif V8_HOST_ARCH_ARM64
+ EXPECT_EQ(0, sigaction(SIGTRAP, &g_old_other_action, nullptr));
+#else
+#error Unsupported platform
+#endif
#elif V8_OS_WIN
RemoveVectoredExceptionHandler(g_registered_handler);
g_registered_handler = nullptr;
@@ -147,7 +168,9 @@ class TrapHandlerTest : public TestWithIsolate,
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
recovery_buffer_->CreateView());
int recovery_offset = __ pc_offset();
+#if V8_HOST_ARCH_X64
__ Pop(scratch);
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -167,16 +190,24 @@ class TrapHandlerTest : public TestWithIsolate,
// Reset the signal handler, to avoid that this signal handler is called
// repeatedly.
sigaction(SIGSEGV, &g_old_segv_action, nullptr);
- sigaction(SIGFPE, &g_old_fpe_action, nullptr);
+#if V8_HOST_ARCH_X64
+ sigaction(SIGFPE, &g_old_other_action, nullptr);
+#elif V8_HOST_ARCH_ARM64
+ sigaction(SIGTRAP, &g_old_other_action, nullptr);
+#else
+#error Unsupported platform
+#endif
sigaction(SIGBUS, &g_old_bus_action, nullptr);
g_test_handler_executed = true;
// Set the $rip to the recovery code.
ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
-#if V8_OS_LINUX
- uc->uc_mcontext.gregs[REG_RIP] = g_recovery_address;
-#elif V8_OS_DARWIN
+#if V8_OS_DARWIN && V8_HOST_ARCH_ARM64
+ uc->uc_mcontext->__ss.__pc = g_recovery_address;
+#elif V8_OS_DARWIN && V8_HOST_ARCH_X64
uc->uc_mcontext->__ss.__rip = g_recovery_address;
+#elif V8_OS_LINUX && V8_HOST_ARCH_X64
+ uc->uc_mcontext.gregs[REG_RIP] = g_recovery_address;
#elif V8_OS_FREEBSD
uc->uc_mcontext.mc_rip = g_recovery_address;
#else
@@ -208,17 +239,39 @@ class TrapHandlerTest : public TestWithIsolate,
public:
void GenerateSetThreadInWasmFlagCode(MacroAssembler* masm) {
+#if V8_HOST_ARCH_X64
masm->Move(scratch,
i_isolate()->thread_local_top()->thread_in_wasm_flag_address_,
RelocInfo::NO_INFO);
masm->movl(MemOperand(scratch, 0), Immediate(1));
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(masm);
+ Register addr = temps.AcquireX();
+ masm->Mov(addr,
+ i_isolate()->thread_local_top()->thread_in_wasm_flag_address_);
+ Register one = temps.AcquireX();
+ masm->Mov(one, 1);
+ masm->Str(one, MemOperand(addr));
+#else
+#error Unsupported platform
+#endif
}
void GenerateResetThreadInWasmFlagCode(MacroAssembler* masm) {
+#if V8_HOST_ARCH_X64
masm->Move(scratch,
i_isolate()->thread_local_top()->thread_in_wasm_flag_address_,
RelocInfo::NO_INFO);
masm->movl(MemOperand(scratch, 0), Immediate(0));
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(masm);
+ Register addr = temps.AcquireX();
+ masm->Mov(addr,
+ i_isolate()->thread_local_top()->thread_in_wasm_flag_address_);
+ masm->Str(xzr, MemOperand(addr));
+#else
+#error Unsupported platform
+#endif
}
bool GetThreadInWasmFlag() {
@@ -275,6 +328,7 @@ TEST_P(TrapHandlerTest, TestTrapHandlerRecovery) {
// wasm code (we fake the wasm code and the access violation).
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
@@ -283,6 +337,18 @@ TEST_P(TrapHandlerTest, TestTrapHandlerRecovery) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -300,6 +366,7 @@ TEST_P(TrapHandlerTest, TestReleaseHandlerData) {
// recover from the specific memory access violation anymore.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
@@ -308,6 +375,18 @@ TEST_P(TrapHandlerTest, TestReleaseHandlerData) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -332,12 +411,23 @@ TEST_P(TrapHandlerTest, TestNoThreadInWasmFlag) {
// get active.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
uint32_t crash_offset = __ pc_offset();
__ testl(MemOperand(scratch, 0), Immediate(1));
uint32_t recovery_offset = __ pc_offset();
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -355,6 +445,7 @@ TEST_P(TrapHandlerTest, TestCrashInWasmNoProtectedInstruction) {
// protected, then the trap handler does not handle it.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
uint32_t no_crash_offset = __ pc_offset();
@@ -364,6 +455,19 @@ TEST_P(TrapHandlerTest, TestCrashInWasmNoProtectedInstruction) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ uint32_t no_crash_offset = __ pc_offset();
+ __ Mov(scratch, crash_address_);
+ __ Ldr(scratch, MemOperand(scratch));
+ // Offset where the crash is not happening.
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -381,6 +485,7 @@ TEST_P(TrapHandlerTest, TestCrashInWasmWrongCrashType) {
// wasm trap handler does not handle it.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
__ xorq(scratch, scratch);
@@ -390,6 +495,17 @@ TEST_P(TrapHandlerTest, TestCrashInWasmWrongCrashType) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ uint32_t crash_offset = __ pc_offset();
+ __ Trap();
+ // Offset where the crash is not happening.
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -443,12 +559,23 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
// set.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
uint32_t crash_offset = __ pc_offset();
__ testl(MemOperand(scratch, 0), Immediate(1));
uint32_t recovery_offset = __ pc_offset();
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -480,3 +607,5 @@ INSTANTIATE_TEST_SUITE_P(Traps, TrapHandlerTest,
} // namespace wasm
} // namespace internal
} // namespace v8
+
+#endif
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc
new file mode 100644
index 0000000000..fa559146e5
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc
@@ -0,0 +1,145 @@
+// This Wasm module has a name section which is invalid in that it
+// contains each sub-section twice.
+
+ 0x00, 0x61, 0x73, 0x6d, // wasm magic
+ 0x01, 0x00, 0x00, 0x00, // wasm version
+
+ // The only purpose of this table section is to trigger lazy decoding
+ // of the name section.
+ 0x04, // section kind: Table
+ 0x04, // section length 4
+ 0x01, 0x70, 0x00, // table count 1: funcref no maximum
+ 0x00, // initial size 0
+
+ 0x00, // section kind: Unknown
+ 0xb3, 0x01, // section length 179
+ 0x04, // section name length: 4
+ 0x6e, 0x61, 0x6d, 0x65, // section name: name
+
+ 0x01, // name type: function
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x02, // name type: local
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x03, // name type: label
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x04, // name type: type
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x05, // name type: table
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x06, // name type: memory
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x07, // name type: global
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x08, // name type: element segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x09, // name type: data segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x0a, // name type: field
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x0b, // name type: tag
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x01, // name type: function
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x02, // name type: local
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x03, // name type: label
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x04, // name type: type
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x05, // name type: table
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x06, // name type: memory
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x07, // name type: global
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x08, // name type: element segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x09, // name type: data segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x0a, // name type: field
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x0b, // name type: tag
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc
new file mode 100644
index 0000000000..01fdedfd1b
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc
@@ -0,0 +1,135 @@
+ 0x00, 0x61, 0x73, 0x6d, // wasm magic
+ 0x01, 0x00, 0x00, 0x00, // wasm version
+
+ 0x01, // section kind: Type
+ 0x3c, // section length 60
+ 0x0b, // types count 11
+ 0x50, 0x00, 0x5f, 0x00, // type #0 $type0 subtype, supertype count 0, kind: struct, field count 0
+ 0x5f, 0x01, 0x7f, 0x00, // type #1 $type1 kind: struct, field count 1: i32 immutable
+ 0x5f, 0x02, // type #2 $type2 kind: struct, field count 2
+ 0x7f, 0x01, // i32 mutable
+ 0x7e, 0x01, // i64 mutable
+ 0x5f, 0x02, // type #3 $type3 kind: struct, field count 2
+ 0x7a, 0x00, // i8 immutable
+ 0x79, 0x01, // i16 mutable
+ 0x5e, 0x7e, 0x00, // type #4 $type4 kind: array i64 immutable
+ 0x5e, 0x7e, 0x01, // type #5 $type5 kind: array i64 mutable
+ 0x5e, 0x7a, 0x00, // type #6 $type6 kind: array i8 immutable
+ 0x5f, 0x01, 0x6b, 0x00, 0x00, // type #7 $type7 kind: struct, field count 1: (ref $type0) immutable
+ 0x4f, // rec. group definition
+ 0x02, // recursive group size 2
+ 0x5f, 0x01, 0x6b, 0x09, 0x00, // type #8 $type8 kind: struct, field count 1: (ref $type9) immutable
+ 0x5f, 0x01, 0x6b, 0x08, 0x00, // type #9 $type9 kind: struct, field count 1: (ref $type8) immutable
+ 0x50, 0x01, 0x00, // type #10 $type10 subtype, supertype count 1: supertype 0
+ 0x5f, 0x01, 0x7f, 0x00, // kind: struct, field count 1: i32 immutable
+ 0x60, // type #11 $type11 kind: func
+ 0x02, // param count 2
+ 0x6b, 0x01, 0x6d, // (ref $type1) eqref
+ 0x00, // return count 0
+
+ 0x02, // section kind: Import
+ 0x30, // section length 48
+ 0x02, // imports count 2
+ // import #0
+ 0x03, // module name length: 3
+ 0x65, 0x6e, 0x76, // module name: env
+ 0x0f, // field name length: 15
+ 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64,
+ 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c,
+ // field name: imported_global
+ 0x03, 0x6b, 0x07, 0x00, // kind: global (ref $type7) immutable
+ // import #1
+ 0x03, // module name length: 3
+ 0x65, 0x6e, 0x76, // module name: env
+ 0x0e, // field name length: 14
+ 0x61, 0x6e, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f,
+ 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c,
+ // field name: another_global
+ 0x03, 0x6b, 0x08, 0x00, // kind: global (ref $type8) immutable
+
+ 0x03, // section kind: Function
+ 0x02, // section length 2
+ 0x01, 0x0b, // functions count 1: 0 $func0 (param (ref $type1) eqref)
+
+ 0x06, // section kind: Global
+ 0x0b, // section length 11
+ 0x02, // globals count 2
+ 0x6e, 0x00, // global #2: anyref immutable
+ 0xd0, 0x65, 0x0b, // ref.null none
+ 0x6d, 0x01, // global #3: eqref mutable
+ 0xd0, 0x65, 0x0b, // ref.null none
+
+ 0x0a, // section kind: Code
+ 0x99, 0x01, // section length 153
+ 0x01, // functions count 1
+ // function #0 $func0
+ 0x96, 0x01, // body size 150
+ 0x00, // 0 entries in locals list
+ 0xfb, 0x08, 0x01, // struct.new_default $type1
+ 0xfb, 0x03, 0x01, 0x00, // struct.get $type1 $field0
+ 0x1a, // drop
+ 0xfb, 0x08, 0x02, // struct.new_default $type2
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x06, 0x02, 0x00, // struct.set $type2 $field0
+ 0xfb, 0x08, 0x03, // struct.new_default $type3
+ 0xfb, 0x04, 0x03, 0x00, // struct.get_s $type3 $field0
+ 0x1a, // drop
+ 0xfb, 0x08, 0x03, // struct.new_default $type3
+ 0xfb, 0x05, 0x03, 0x01, // struct.get_u $type3 $field1
+ 0x1a, // drop
+ 0xfb, 0x1a, 0x04, 0x00, // array.new_fixed $type4 0
+ 0x1a, // drop
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x04, // array.new_default $type4
+ 0xfb, 0x19, // array.len
+ 0x1a, // drop
+ 0x42, 0x00, // i64.const 0
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1b, 0x04, // array.new $type4
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x13, 0x04, // array.get $type4
+ 0x1a, // drop
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x05, // array.new_default $type5
+ 0x41, 0x00, // i32.const 0
+ 0x42, 0x00, // i64.const 0
+ 0xfb, 0x16, 0x05, // array.set $type5
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x06, // array.new_default $type6
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x14, 0x06, // array.get_s $type6
+ 0x1a, // drop
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x06, // array.new_default $type6
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x15, 0x06, // array.get_u $type6
+ 0x1a, // drop
+ 0x20, 0x01, // local.get $var1
+ 0x20, 0x01, // local.get $var1
+ 0xd5, // ref.eq
+ 0x1a, // drop
+ 0x20, 0x01, // local.get $var1
+ 0xfb, 0x44, 0x00, // ref.test $type0
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x45, 0x00, // ref.cast $type0
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x48, 0x00, // ref.test null $type0
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x49, 0x00, // ref.cast null $type0
+ 0x1a, // drop
+ 0x02, 0x6b, 0x01, // block (result (ref $type1)) $label0
+ 0x20, 0x00, // local.get $var0
+ 0xd6, 0x00, // br_on_non_null $label0
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x46, 0x00, 0x01, // br_on_cast $label0 $type1
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x47, 0x00, 0x01, // br_on_cast_fail $label0 $type1
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0x0b, // end $label0
+ 0x1a, // drop
+ 0x0b, // end
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc
new file mode 100644
index 0000000000..1189fffa14
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc
@@ -0,0 +1,107 @@
+;; expected = R"---(;; This is a polyglot C++/WAT file.
+;; Comment lines are ignored and not expected in the disassembler output.
+(module
+ ;; Structs.
+ (type $type0 (struct))
+ (type $type1 (struct (field $field0 i32)))
+ (type $type2 (struct (field $field0 (mut i32)) (field $field1 (mut i64))))
+ (type $type3 (struct (field $field0 i8) (field $field1 (mut i16))))
+ ;; Arrays.
+ (type $type4 (array (field i64)))
+ (type $type5 (array (field (mut i64))))
+ (type $type6 (array (field i8)))
+ ;; References to other types, mutual recursion.
+ (type $type7 (struct (field $field0 (ref $type0))))
+ ;; TODO: rec-groups are supported in the binary format, but they are not
+ ;; printed yet. Once that is implemented in the disassembler, uncomment:
+ ;; (rec
+ (type $type8 (struct (field $field0 (ref $type9))))
+ (type $type9 (struct (field $field0 (ref $type8))))
+ ;; )
+ ;; Subtyping constraints.
+ ;; TODO: Change to `sub` keyword, once that is standardized.
+ (type $type10 (struct_subtype (field $field0 i32) $type0))
+ ;; Globals using reference types.
+ (global $env.imported_global (;0;) (import "env" "imported_global") (ref $type7))
+ (global $env.another_global (;1;) (import "env" "another_global") (ref $type8))
+ (global $global2 anyref (ref.null none))
+ (global $global3 (mut eqref) (ref.null none))
+ ;; Function with GC instructions and taking GC types as parameters.
+ (func $func0 (param $var0 (ref $type1)) (param $var1 eqref)
+ ;; Structs.
+ struct.new_default $type1
+ struct.get $type1 $field0
+ drop
+ struct.new_default $type2
+ i32.const 0
+ struct.set $type2 $field0
+ struct.new_default $type3
+ struct.get_s $type3 $field0
+ drop
+ struct.new_default $type3
+ struct.get_u $type3 $field1
+ drop
+ ;; Arrays.
+ array.new_fixed $type4 0
+ drop
+ i32.const 0
+ array.new_default $type4
+ array.len
+ drop
+ i64.const 0
+ i32.const 0
+ array.new $type4
+ i32.const 0
+ array.get $type4
+ drop
+ i32.const 0
+ array.new_default $type5
+ i32.const 0
+ i64.const 0
+ array.set $type5
+ i32.const 0
+ array.new_default $type6
+ i32.const 0
+ array.get_s $type6
+ drop
+ i32.const 0
+ array.new_default $type6
+ i32.const 0
+ array.get_u $type6
+ drop
+ ;; References.
+ local.get $var1
+ local.get $var1
+ ref.eq
+ drop
+ local.get $var1
+ ref.test $type0
+ drop
+ local.get $var0
+ ref.cast $type0
+ drop
+ local.get $var0
+ ref.test null $type0
+ drop
+ local.get $var0
+ ref.cast null $type0
+ drop
+ ;; Branches.
+ block $label0 (result (ref $type1))
+ local.get $var0
+ br_on_non_null $label0
+ local.get $var0
+ br_on_cast $label0 $type1
+ drop
+ local.get $var0
+ br_on_cast_fail $label0 $type1
+ drop
+ ;; TODO: Once `br_on_cast null` is implemented, uncomment:
+ ;; local.get $var0
+ ;; br_on_cast $label0 null $type1
+ local.get $var0
+ end $label0
+ drop
+ )
+)
+;;)---";
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc
index 30328bab67..2a438a9ab4 100644
--- a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc
@@ -484,6 +484,5 @@
)
;; Data and element sections.
(data (global.get $env.imported_global) "foo\0a\00")
- ;; TODO(dlehmann): Wasm extensions, name and extended name section.
)
;;)---";
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc
new file mode 100644
index 0000000000..41a43397fe
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc
@@ -0,0 +1,31 @@
+0x00, 0x61, 0x73, 0x6d, // wasm magic
+0x01, 0x00, 0x00, 0x00, // wasm version
+
+0x01, // section kind: Type
+0x04, // section length 4
+0x01, 0x60, // types count 1: kind: func
+0x00, // param count 0
+0x00, // return count 0
+
+0x03, // section kind: Function
+0x02, // section length 2
+0x01, 0x00, // functions count 1: 0 $doubleEnd
+
+0x07, // section kind: Export
+0x0d, // section length 13
+0x01, // exports count 1: export # 0
+0x09, // field name length: 9
+0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x45, 0x6e,
+0x64, // field name: doubleEnd
+0x00, 0x00, // kind: function index: 0
+
+0x0a, // section kind: Code
+0x07, // section length 7
+0x01, // functions count 1
+ // function #0 $doubleEnd
+0x05, // body size 5
+0x00, // 0 entries in locals list
+0x01, // nop
+0x0b, // end
+0x0b, // end
+0x0b, // end
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc
new file mode 100644
index 0000000000..593ed16e2d
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc
@@ -0,0 +1,9 @@
+;; expected = R"---(;; This is a polyglot C++/WAT file.
+(module
+ (func $doubleEnd (;0;) (export "doubleEnd")
+ nop
+ )
+ ;; Unexpected end byte
+ ;; Unexpected end byte
+)
+;;)---";
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc
index 68d505db0c..39b14b46ce 100644
--- a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc
@@ -24,8 +24,7 @@ void CheckDisassemblerOutput(base::Vector<const byte> module_bytes,
std::string expected_output) {
AccountingAllocator allocator;
- ModuleResult module_result = DecodeWasmModuleForDisassembler(
- module_bytes.begin(), module_bytes.end(), &allocator);
+ ModuleResult module_result = DecodeWasmModuleForDisassembler(module_bytes);
DCHECK(module_result.ok());
WasmModule* module = module_result.value().get();
@@ -44,10 +43,11 @@ void CheckDisassemblerOutput(base::Vector<const byte> module_bytes,
// Remove comment lines from expected output since they cannot be recovered
// by a disassembler.
// They were also used as part of the C++/WAT polyglot trick described below.
- expected_output =
- std::regex_replace(expected_output, std::regex(" *;;[^\\n]*\\n?"), "");
+ std::regex comment_regex(" *;;[^\\n]*\\n?");
+ expected_output = std::regex_replace(expected_output, comment_regex, "");
+ std::string output_str = std::regex_replace(output.str(), comment_regex, "");
- EXPECT_EQ(output.str(), expected_output);
+ EXPECT_EQ(expected_output, output_str);
}
TEST_F(WasmDisassemblerTest, Mvp) {
@@ -90,6 +90,17 @@ TEST_F(WasmDisassemblerTest, Names) {
CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
}
+TEST_F(WasmDisassemblerTest, InvalidNameSection) {
+ constexpr byte module_bytes[] = {
+#include "wasm-disassembler-unittest-bad-name-section.wasm.inc"
+ };
+ std::string expected(
+ "(module\n"
+ " (table $x (;0;) 0 funcref)\n"
+ ")\n");
+ CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
+}
+
TEST_F(WasmDisassemblerTest, Simd) {
constexpr byte module_bytes[] = {
#include "wasm-disassembler-unittest-simd.wasm.inc"
@@ -99,6 +110,34 @@ TEST_F(WasmDisassemblerTest, Simd) {
CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
}
+TEST_F(WasmDisassemblerTest, Gc) {
+ // Since WABT's `wat2wasm` didn't support some GC features yet, I used
+ // Binaryen's `wasm-as --enable-gc --hybrid` here to produce the binary.
+ constexpr byte module_bytes[] = {
+#include "wasm-disassembler-unittest-gc.wasm.inc"
+ };
+ std::string expected;
+#include "wasm-disassembler-unittest-gc.wat.inc"
+ CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
+}
+
+TEST_F(WasmDisassemblerTest, TooManyends) {
+ constexpr byte module_bytes[] = {
+#include "wasm-disassembler-unittest-too-many-ends.wasm.inc"
+ };
+ std::string expected;
+#include "wasm-disassembler-unittest-too-many-ends.wat.inc"
+ CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
+}
+
+// TODO(dlehmann): Add tests for the following Wasm features and extensions:
+// - custom name section for Wasm GC constructs (struct and array type names,
+// struct fields).
+// - exception-related instructions (try, catch, catch_all, delegate) and named
+// exception tags.
+// - atomic instructions (threads proposal, 0xfe prefix).
+// - some "numeric" instructions (0xfc prefix).
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc b/deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc
deleted file mode 100644
index 1f1081805c..0000000000
--- a/deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc
+++ /dev/null
@@ -1,1135 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/web-snapshot/web-snapshot.h"
-
-#include "include/v8-function.h"
-#include "src/api/api-inl.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-class WebSnapshotTest : public TestWithContext {
- protected:
- void TestWebSnapshotExtensive(
- const char* snapshot_source, const char* test_source,
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester,
- uint32_t string_count, uint32_t symbol_count,
- uint32_t builtin_object_count, uint32_t map_count, uint32_t context_count,
- uint32_t function_count, uint32_t object_count, uint32_t array_count) {
- v8::Isolate* isolate = v8_isolate();
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports =
- v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- CHECK_EQ(string_count, serializer.string_count());
- CHECK_EQ(symbol_count, serializer.symbol_count());
- CHECK_EQ(map_count, serializer.map_count());
- CHECK_EQ(builtin_object_count, serializer.builtin_object_count());
- CHECK_EQ(context_count, serializer.context_count());
- CHECK_EQ(function_count, serializer.function_count());
- CHECK_EQ(object_count, serializer.object_count());
- CHECK_EQ(array_count, serializer.array_count());
- }
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
- tester(isolate, new_context);
- CHECK_EQ(string_count, deserializer.string_count());
- CHECK_EQ(symbol_count, deserializer.symbol_count());
- CHECK_EQ(map_count, deserializer.map_count());
- CHECK_EQ(builtin_object_count, deserializer.builtin_object_count());
- CHECK_EQ(context_count, deserializer.context_count());
- CHECK_EQ(function_count, deserializer.function_count());
- CHECK_EQ(object_count, deserializer.object_count());
- CHECK_EQ(array_count, deserializer.array_count());
- }
- }
-
- void TestWebSnapshot(const char* snapshot_source, const char* test_source,
- const char* expected_result, uint32_t string_count,
- uint32_t symbol_count, uint32_t map_count,
- uint32_t builtin_object_count, uint32_t context_count,
- uint32_t function_count, uint32_t object_count,
- uint32_t array_count) {
- TestWebSnapshotExtensive(
- snapshot_source, test_source,
- [this, test_source, expected_result](
- v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
- v8::Local<v8::String> result = RunJS(test_source).As<v8::String>();
- CHECK(result->Equals(new_context, NewString(expected_result))
- .FromJust());
- },
- string_count, symbol_count, map_count, builtin_object_count,
- context_count, function_count, object_count, array_count);
- }
-
- void VerifyFunctionKind(const v8::Local<v8::Object>& result,
- const v8::Local<v8::Context>& context,
- const char* property_name,
- FunctionKind expected_kind) {
- v8::Local<v8::Function> v8_function =
- result->Get(context, NewString(property_name))
- .ToLocalChecked()
- .As<v8::Function>();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_function));
- CHECK_EQ(function->shared().kind(), expected_kind);
- }
-};
-
-} // namespace
-
-TEST_F(WebSnapshotTest, Minimal) {
- const char* snapshot_source = "var foo = {'key': 'lol'};";
- const char* test_source = "foo.key";
- const char* expected_result = "lol";
- uint32_t kStringCount = 2; // 'foo', 'Object.prototype'; 'key' is in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, EmptyObject) {
- const char* snapshot_source = "var foo = {}";
- const char* test_source = "foo";
- uint32_t kStringCount = 2; // 'foo', 'Object.prototype'
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Handle<JSReceiver> foo(v8::Utils::OpenHandle(*result));
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- CHECK_EQ(foo->map(),
- i_isolate->native_context()->object_function().initial_map());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, Numbers) {
- const char* snapshot_source =
- "var foo = {'a': 6,\n"
- " 'b': -11,\n"
- " 'c': 11.6,\n"
- " 'd': NaN,\n"
- " 'e': Number.POSITIVE_INFINITY,\n"
- " 'f': Number.NEGATIVE_INFINITY,\n"
- "}";
- const char* test_source = "foo";
- uint32_t kStringCount =
- 2; // 'foo', 'Object.prototype'; 'a'...'f' are in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
-
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- int32_t a = result->Get(new_context, NewString("a"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(a, 6);
- int32_t b = result->Get(new_context, NewString("b"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(b, -11);
- double c = result->Get(new_context, NewString("c"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(c, 11.6);
- double d = result->Get(new_context, NewString("d"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK(std::isnan(d));
- double e = result->Get(new_context, NewString("e"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(e, std::numeric_limits<double>::infinity());
- double f = result->Get(new_context, NewString("f"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(f, -std::numeric_limits<double>::infinity());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, Oddballs) {
- const char* snapshot_source =
- "var foo = {'a': false,\n"
- " 'b': true,\n"
- " 'c': null,\n"
- " 'd': undefined,\n"
- "}";
- const char* test_source = "foo";
- // 'foo', 'Object.prototype'; 'a'...'d' are in-place.
- uint32_t kStringCount = 2;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Local<Value> a =
- result->Get(new_context, NewString("a")).ToLocalChecked();
- CHECK(a->IsFalse());
- Local<Value> b =
- result->Get(new_context, NewString("b")).ToLocalChecked();
- CHECK(b->IsTrue());
- Local<Value> c =
- result->Get(new_context, NewString("c")).ToLocalChecked();
- CHECK(c->IsNull());
- Local<Value> d =
- result->Get(new_context, NewString("d")).ToLocalChecked();
- CHECK(d->IsUndefined());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, Function) {
- const char* snapshot_source =
- "var foo = {'key': function() { return '11525'; }};";
- const char* test_source = "foo.key()";
- const char* expected_result = "11525";
- // 'foo', 'Object.prototype', 'Function.prototype', function source code.
- // 'key' is in-place.
- uint32_t kStringCount = 4;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 1;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, InnerFunctionWithContext) {
- const char* snapshot_source =
- "var foo = {'key': (function() {\n"
- " let result = '11525';\n"
- " function inner() { return result; }\n"
- " return inner;\n"
- " })()};";
- const char* test_source = "foo.key()";
- const char* expected_result = "11525";
- // Strings: 'foo', 'result', 'Object.prototype', 'Function.prototype'.
- // function source code (inner). 'key' is in-place.
- uint32_t kStringCount = 5;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 1;
- uint32_t kFunctionCount = 1;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, InnerFunctionWithContextAndParentContext) {
- const char* snapshot_source =
- "var foo = {'key': (function() {\n"
- " let part1 = '11';\n"
- " function inner() {\n"
- " let part2 = '525';\n"
- " function innerinner() {\n"
- " return part1 + part2;\n"
- " }\n"
- " return innerinner;\n"
- " }\n"
- " return inner();\n"
- " })()};";
- const char* test_source = "foo.key()";
- const char* expected_result = "11525";
- // Strings: 'foo', 'Object.prototype', 'Function.prototype', function source
- // code (innerinner), 'part1', 'part2'.
- uint32_t kStringCount = 6;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 2;
- uint32_t kFunctionCount = 1;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RegExp) {
- const char* snapshot_source = "var foo = {'re': /ab+c/gi}";
- const char* test_source = "foo";
- // 'foo', 'Object.prototype', RegExp pattern, RegExp flags
- uint32_t kStringCount = 4;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Local<v8::RegExp> re = result->Get(new_context, NewString("re"))
- .ToLocalChecked()
- .As<v8::RegExp>();
- CHECK(re->IsRegExp());
- CHECK(
- re->GetSource()->Equals(new_context, NewString("ab+c")).FromJust());
- CHECK_EQ(v8::RegExp::kGlobal | v8::RegExp::kIgnoreCase, re->GetFlags());
- v8::Local<v8::Object> match =
- re->Exec(new_context, NewString("aBc")).ToLocalChecked();
- CHECK(match->IsArray());
- v8::Local<v8::Object> no_match =
- re->Exec(new_context, NewString("ac")).ToLocalChecked();
- CHECK(no_match->IsNull());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RegExpNoFlags) {
- const char* snapshot_source = "var foo = {'re': /ab+c/}";
- const char* test_source = "foo";
- // 'foo', , 'Object.prototype RegExp pattern, RegExp flags
- uint32_t kStringCount = 4;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Local<v8::RegExp> re = result->Get(new_context, NewString("re"))
- .ToLocalChecked()
- .As<v8::RegExp>();
- CHECK(re->IsRegExp());
- CHECK(
- re->GetSource()->Equals(new_context, NewString("ab+c")).FromJust());
- CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
- v8::Local<v8::Object> match =
- re->Exec(new_context, NewString("abc")).ToLocalChecked();
- CHECK(match->IsArray());
- v8::Local<v8::Object> no_match =
- re->Exec(new_context, NewString("ac")).ToLocalChecked();
- CHECK(no_match->IsNull());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplication) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.outer = function(a) {\n"
- " return function() {\n"
- " return a;\n"
- " }\n"
- "}\n"
- "foo.inner = foo.outer('hi');";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_inner = "foo.inner";
- const char* create_new_inner = "foo.outer()";
-
- // Verify that foo.inner and the JSFunction which is the result of calling
- // foo.outer() after deserialization share the SFI.
- v8::Local<v8::Function> v8_inner1 = RunJS(get_inner).As<v8::Function>();
- v8::Local<v8::Function> v8_inner2 =
- RunJS(create_new_inner).As<v8::Function>();
-
- Handle<JSFunction> inner1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner1));
- Handle<JSFunction> inner2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner2));
-
- CHECK_EQ(inner1->shared(), inner2->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationClasses) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.create = function(a) {\n"
- " return class {\n"
- " constructor(x) {this.x = x;};\n"
- " }\n"
- "}\n"
- "foo.class = foo.create('hi');";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_class = "foo.class";
- const char* create_new_class = "foo.create()";
-
- // Verify that foo.inner and the JSFunction which is the result of calling
- // foo.outer() after deserialization share the SFI.
- v8::Local<v8::Function> v8_class1 = RunJS(get_class).As<v8::Function>();
- v8::Local<v8::Function> v8_class2 =
- RunJS(create_new_class).As<v8::Function>();
-
- Handle<JSFunction> class1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class1));
- Handle<JSFunction> class2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class2));
-
- CHECK_EQ(class1->shared(), class2->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationAfterBytecodeFlushing) {
- v8_flags.stress_flush_code = true;
- v8_flags.flush_bytecode = true;
- v8::Isolate* isolate = v8_isolate();
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.outer = function() {\n"
- " let a = 'hello';\n"
- " return function() {\n"
- " return a;\n"
- " }\n"
- "}\n"
- "foo.inner = foo.outer();";
-
- TryRunJS(snapshot_source);
-
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- CollectAllGarbage();
- CollectAllGarbage();
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_outer = "foo.outer";
- const char* get_inner = "foo.inner";
- const char* create_new_inner = "foo.outer()";
-
- v8::Local<v8::Function> v8_outer = RunJS(get_outer).As<v8::Function>();
- Handle<JSFunction> outer =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_outer));
- CHECK(!outer->shared().is_compiled());
-
- v8::Local<v8::Function> v8_inner1 = RunJS(get_inner).As<v8::Function>();
- v8::Local<v8::Function> v8_inner2 =
- RunJS(create_new_inner).As<v8::Function>();
-
- Handle<JSFunction> inner1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner1));
- Handle<JSFunction> inner2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner2));
-
- CHECK(outer->shared().is_compiled());
- CHECK_EQ(inner1->shared(), inner2->shared());
-
- // Force bytecode flushing of "foo.outer".
- CollectAllGarbage();
- CollectAllGarbage();
-
- CHECK(!outer->shared().is_compiled());
-
- // Create another inner function.
- v8::Local<v8::Function> v8_inner3 =
- RunJS(create_new_inner).As<v8::Function>();
- Handle<JSFunction> inner3 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner3));
-
- // Check that it shares the SFI with the original inner function which is in
- // the snapshot.
- CHECK_EQ(inner1->shared(), inner3->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationAfterBytecodeFlushingClasses) {
- v8_flags.stress_flush_code = true;
- v8_flags.flush_bytecode = true;
- v8::Isolate* isolate = v8_isolate();
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.create = function(a) {\n"
- " return class {\n"
- " constructor(x) {this.x = x;};\n"
- " }\n"
- "}\n"
- "foo.class = foo.create('hi');";
-
- TryRunJS(snapshot_source);
-
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- CollectAllGarbage();
- CollectAllGarbage();
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_create = "foo.create";
- const char* get_class = "foo.class";
- const char* create_new_class = "foo.create()";
-
- v8::Local<v8::Function> v8_create = RunJS(get_create).As<v8::Function>();
- Handle<JSFunction> create =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_create));
- CHECK(!create->shared().is_compiled());
-
- v8::Local<v8::Function> v8_class1 = RunJS(get_class).As<v8::Function>();
- v8::Local<v8::Function> v8_class2 =
- RunJS(create_new_class).As<v8::Function>();
-
- Handle<JSFunction> class1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class1));
- Handle<JSFunction> class2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class2));
-
- CHECK(create->shared().is_compiled());
- CHECK_EQ(class1->shared(), class2->shared());
-
- // Force bytecode flushing of "foo.outer".
- CollectAllGarbage();
- CollectAllGarbage();
-
- CHECK(!create->shared().is_compiled());
-
- // Create another inner function.
- v8::Local<v8::Function> v8_class3 =
- RunJS(create_new_class).As<v8::Function>();
- Handle<JSFunction> class3 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class3));
-
- // Check that it shares the SFI with the original inner function which is in
- // the snapshot.
- CHECK_EQ(class1->shared(), class3->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationOfFunctionsNotInSnapshot) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.outer = function(a) {\n"
- " return function() {\n"
- " return a;\n"
- " }\n"
- "}\n";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* create_new_inner = "foo.outer()";
-
- // Verify that repeated invocations of foo.outer() return functions which
- // share the SFI.
- v8::Local<v8::Function> v8_inner1 =
- RunJS(create_new_inner).As<v8::Function>();
- v8::Local<v8::Function> v8_inner2 =
- RunJS(create_new_inner).As<v8::Function>();
-
- Handle<JSFunction> inner1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner1));
- Handle<JSFunction> inner2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner2));
-
- CHECK_EQ(inner1->shared(), inner2->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, FunctionKinds) {
- const char* snapshot_source =
- "var foo = {a: function() {},\n"
- " b: () => {},\n"
- " c: async function() {},\n"
- " d: async () => {},\n"
- " e: function*() {},\n"
- " f: async function*() {}\n"
- "}";
- const char* test_source = "foo";
- // 'foo', 'Object.prototype', 'Function.prototype', 'AsyncFunction.prototype',
- // 'AsyncGeneratorFunction.prototype", "GeneratorFunction.prototype", source
- // code. 'a'...'f' in-place.
- uint32_t kStringCount = 7;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 5;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 6;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- // Verify all FunctionKinds.
- VerifyFunctionKind(result, new_context, "a",
- FunctionKind::kNormalFunction);
- VerifyFunctionKind(result, new_context, "b",
- FunctionKind::kArrowFunction);
- VerifyFunctionKind(result, new_context, "c",
- FunctionKind::kAsyncFunction);
- VerifyFunctionKind(result, new_context, "d",
- FunctionKind::kAsyncArrowFunction);
- VerifyFunctionKind(result, new_context, "e",
- FunctionKind::kGeneratorFunction);
- VerifyFunctionKind(result, new_context, "f",
- FunctionKind::kAsyncGeneratorFunction);
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-// Test that concatenating JS code to the snapshot works.
-TEST_F(WebSnapshotTest, Concatenation) {
- v8::Isolate* isolate = v8_isolate();
-
- const char* snapshot_source = "var foo = {a: 1};\n";
- const char* source_to_append = "var bar = {a: 10};";
- const char* test_source = "foo.a + bar.a";
- uint32_t kObjectCount = 1;
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- CHECK_EQ(kObjectCount, serializer.object_count());
- }
-
- auto buffer_size = snapshot_data.buffer_size + strlen(source_to_append);
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- memcpy(buffer.get(), snapshot_data.buffer, snapshot_data.buffer_size);
- memcpy(buffer.get() + snapshot_data.buffer_size, source_to_append,
- strlen(source_to_append));
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, buffer.get(), buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
- CHECK_EQ(kObjectCount, deserializer.object_count());
-
- v8::Local<v8::Number> result = RunJS(test_source).As<v8::Number>();
- CHECK_EQ(11, result->Value());
- }
-}
-
-// Test that errors from invalid concatenated code are handled correctly.
-TEST_F(WebSnapshotTest, ConcatenationErrors) {
- v8::Isolate* isolate = v8_isolate();
-
- const char* snapshot_source = "var foo = {a: 1};\n";
- const char* source_to_append = "wontparse+[)";
- uint32_t kObjectCount = 1;
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- CHECK_EQ(kObjectCount, serializer.object_count());
- }
-
- auto buffer_size = snapshot_data.buffer_size + strlen(source_to_append);
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- memcpy(buffer.get(), snapshot_data.buffer, snapshot_data.buffer_size);
- memcpy(buffer.get() + snapshot_data.buffer_size, source_to_append,
- strlen(source_to_append));
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, buffer.get(), buffer_size);
- CHECK(!deserializer.Deserialize());
- }
-}
-
-TEST_F(WebSnapshotTest, CompactedSourceCode) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "function foo() { 'foo' }\n"
- "function bar() { 'bar' }\n"
- "function baz() { 'baz' }\n"
- "let e = [foo, bar, baz]";
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "e").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_function = "e[0]";
-
- // Verify that the source code got compacted.
- v8::Local<v8::Function> v8_function =
- RunJS(get_function).As<v8::Function>();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_function));
- Handle<String> function_script_source =
- handle(String::cast(Script::cast(function->shared().script()).source()),
- i_isolate());
- const char* raw_expected_source = "() { 'foo' }() { 'bar' }() { 'baz' }";
-
- Handle<String> expected_source = Utils::OpenHandle(
- *v8::String::NewFromUtf8(isolate, raw_expected_source).ToLocalChecked(),
- i_isolate());
- CHECK(function_script_source->Equals(*expected_source));
- }
-}
-
-TEST_F(WebSnapshotTest, InPlaceStringsInArrays) {
- const char* snapshot_source = "var foo = ['one', 'two', 'three'];";
- const char* test_source = "foo.join('');";
- const char* expected_result = "onetwothree";
- uint32_t kStringCount = 1; // 'foo'; Other strings are in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 0;
- uint32_t kMapCount = 0;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 0;
- uint32_t kArrayCount = 1;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RepeatedInPlaceStringsInArrays) {
- const char* snapshot_source = "var foo = ['one', 'two', 'one'];";
- const char* test_source = "foo.join('');";
- const char* expected_result = "onetwoone";
- uint32_t kStringCount = 2; // 'foo', 'one'; Other strings are in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 0;
- uint32_t kMapCount = 0;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 0;
- uint32_t kArrayCount = 1;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, InPlaceStringsInObjects) {
- const char* snapshot_source = "var foo = {a: 'one', b: 'two', c: 'three'};";
- const char* test_source = "foo.a + foo.b + foo.c;";
- const char* expected_result = "onetwothree";
- // 'foo', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 2;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RepeatedInPlaceStringsInObjects) {
- const char* snapshot_source = "var foo = {a: 'one', b: 'two', c: 'one'};";
- const char* test_source = "foo.a + foo.b + foo.c;";
- const char* expected_result = "onetwoone";
- // 'foo', 'one', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 3;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, BuiltinObjects) {
- const char* snapshot_source = "var foo = {a: Error.prototype};";
- const char* test_source = "foo.a == Error.prototype ? \"pass\" : \"fail\"";
- const char* expected_result = "pass";
- // 'foo', 'Error.prototype', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 3;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, BuiltinObjectsDeduplicated) {
- const char* snapshot_source =
- "var foo = {a: Error.prototype, b: Error.prototype}";
- const char* test_source = "foo.a === Error.prototype ? \"pass\" : \"fail\"";
- const char* expected_result = "pass";
- // 'foo', 'Error.prototype', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 3;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, ConstructorFunctionKinds) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "class Base { constructor() {} };\n"
- "class Derived extends Base { constructor() {} };\n"
- "class BaseDefault {};\n"
- "class DerivedDefault extends BaseDefault {};\n";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 4);
- exports->Set(isolate, 0,
- v8::String::NewFromUtf8(isolate, "Base").ToLocalChecked());
- exports->Set(isolate, 1,
- v8::String::NewFromUtf8(isolate, "Derived").ToLocalChecked());
- exports->Set(
- isolate, 2,
- v8::String::NewFromUtf8(isolate, "BaseDefault").ToLocalChecked());
- exports->Set(
- isolate, 3,
- v8::String::NewFromUtf8(isolate, "DerivedDefault").ToLocalChecked());
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- v8::Local<v8::Function> v8_base = RunJS("Base").As<v8::Function>();
- Handle<JSFunction> base =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_base));
- CHECK_EQ(FunctionKind::kBaseConstructor, base->shared().kind());
-
- v8::Local<v8::Function> v8_derived = RunJS("Derived").As<v8::Function>();
- Handle<JSFunction> derived =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_derived));
- CHECK_EQ(FunctionKind::kDerivedConstructor, derived->shared().kind());
-
- v8::Local<v8::Function> v8_base_default =
- RunJS("BaseDefault").As<v8::Function>();
- Handle<JSFunction> base_default =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_base_default));
- CHECK_EQ(FunctionKind::kDefaultBaseConstructor,
- base_default->shared().kind());
-
- v8::Local<v8::Function> v8_derived_default =
- RunJS("DerivedDefault").As<v8::Function>();
- Handle<JSFunction> derived_default =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_derived_default));
- CHECK_EQ(FunctionKind::kDefaultDerivedConstructor,
- derived_default->shared().kind());
- }
-}
-
-TEST_F(WebSnapshotTest, SlackElementsInObjects) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "var foo = {};"
- "for (let i = 0; i < 100; ++i) {"
- " foo[i] = i;"
- "}"
- "var bar = {};"
- "for (let i = 0; i < 100; ++i) {"
- " bar[i] = {};"
- "}";
-
- RunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 2);
- exports->Set(isolate, 0,
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked());
- exports->Set(isolate, 1,
- v8::String::NewFromUtf8(isolate, "bar").ToLocalChecked());
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- Handle<JSObject> foo =
- Handle<JSObject>::cast(Utils::OpenHandle<v8::Object, JSReceiver>(
- RunJS("foo").As<v8::Object>()));
- CHECK_EQ(100, foo->elements().length());
- CHECK_EQ(HOLEY_ELEMENTS, foo->GetElementsKind());
-
- Handle<JSObject> bar =
- Handle<JSObject>::cast(Utils::OpenHandle<v8::Object, JSReceiver>(
- RunJS("bar").As<v8::Object>()));
- CHECK_EQ(100, bar->elements().length());
- CHECK_EQ(HOLEY_ELEMENTS, bar->GetElementsKind());
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/zone/zone-vector-unittest.cc b/deps/v8/test/unittests/zone/zone-vector-unittest.cc
new file mode 100644
index 0000000000..d2406f8b45
--- /dev/null
+++ b/deps/v8/test/unittests/zone/zone-vector-unittest.cc
@@ -0,0 +1,373 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <optional>
+
+#include "src/zone/zone-containers.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8::internal {
+
+template <class T>
+class LiveSet {
+ public:
+ void Add(const T* new_entry) {
+ CHECK(!Contains(new_entry));
+ set_.insert(new_entry);
+ }
+
+ void Remove(const T* old_entry) {
+ CHECK(Contains(old_entry));
+ set_.erase(old_entry);
+ }
+
+ void CheckContainsAll(ZoneVector<T>& vector) {
+ CHECK_EQ(vector.size(), set_.size());
+ for (const T* m = vector.begin(); m != vector.end(); m++) {
+ CHECK(Contains(m));
+ }
+ }
+
+ void CheckEmpty() { CHECK_EQ(0, set_.size()); }
+
+ private:
+ bool Contains(const T* entry) {
+ // std::set::contains is a C++20 extension.
+ return set_.find(entry) != set_.end();
+ }
+
+ std::set<const T*> set_;
+};
+
+template <typename T>
+LiveSet<T>& live_set() {
+ static LiveSet<T> static_live_set;
+ return static_live_set;
+}
+
+class Trivial {
+ public:
+ Trivial() : id_(0) {}
+ explicit Trivial(int id) : id_(id) {}
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(std::is_trivially_copyable_v<Trivial>);
+
+template <>
+class LiveSet<Trivial> {
+ public:
+ void Add(const Trivial* new_entry) { UNREACHABLE(); }
+ void Remove(const Trivial* old_entry) { UNREACHABLE(); }
+ void CheckContainsAll(ZoneVector<Trivial>&) {}
+ void CheckEmpty() {}
+};
+
+class CopyAssignable {
+ public:
+ CopyAssignable() : id_(0) { live_set<CopyAssignable>().Add(this); }
+ explicit CopyAssignable(int id) : id_(id) {
+ live_set<CopyAssignable>().Add(this);
+ }
+ CopyAssignable(const CopyAssignable& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<CopyAssignable>().Add(this);
+ }
+ ~CopyAssignable() { live_set<CopyAssignable>().Remove(this); }
+ CopyAssignable& operator=(const CopyAssignable& other) V8_NOEXCEPT = default;
+
+ CopyAssignable(CopyAssignable&& other) = delete;
+ CopyAssignable& operator=(CopyAssignable&& other) = delete;
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(!std::is_trivially_copyable_v<CopyAssignable>);
+static_assert(std::is_copy_assignable_v<CopyAssignable>);
+static_assert(!std::is_move_assignable_v<CopyAssignable>);
+
+class MoveAssignable {
+ public:
+ MoveAssignable() : id_(0) { live_set<MoveAssignable>().Add(this); }
+ explicit MoveAssignable(int id) : id_(id) {
+ live_set<MoveAssignable>().Add(this);
+ }
+ MoveAssignable(const MoveAssignable& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<MoveAssignable>().Add(this);
+ }
+ MoveAssignable(MoveAssignable&& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<MoveAssignable>().Add(this);
+ }
+ MoveAssignable& operator=(const MoveAssignable& other) = delete;
+ MoveAssignable& operator=(MoveAssignable&& other) V8_NOEXCEPT {
+ id_ = other.id_;
+ return *this;
+ }
+ ~MoveAssignable() { live_set<MoveAssignable>().Remove(this); }
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(!std::is_trivially_copyable_v<MoveAssignable>);
+static_assert(std::is_move_assignable_v<MoveAssignable>);
+static_assert(!std::is_copy_assignable_v<MoveAssignable>);
+
+class NotAssignable {
+ public:
+ NotAssignable() : id_(0) { live_set<NotAssignable>().Add(this); }
+ explicit NotAssignable(int id) : id_(id) {
+ live_set<NotAssignable>().Add(this);
+ }
+ NotAssignable(const NotAssignable& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<NotAssignable>().Add(this);
+ }
+ NotAssignable& operator=(const NotAssignable& other) = delete;
+ ~NotAssignable() { live_set<NotAssignable>().Remove(this); }
+
+ NotAssignable(NotAssignable&& other) = delete;
+ NotAssignable& operator=(NotAssignable&& other) = delete;
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(!std::is_trivially_copyable_v<NotAssignable>);
+static_assert(!std::is_copy_assignable_v<NotAssignable>);
+static_assert(!std::is_move_assignable_v<NotAssignable>);
+
+class ZoneVectorTest : public TestWithZone {
+ public:
+ template <class T>
+ void CheckConsistency(ZoneVector<T>& vector, std::initializer_list<int> ids) {
+ live_set<T>().CheckContainsAll(vector);
+ CHECK_EQ(vector.size(), ids.size());
+ auto it = ids.begin();
+ for (size_t i = 0; i < ids.size(); i++) {
+ CHECK_EQ(*it++, vector[i].id());
+ }
+ }
+
+ template <class T>
+ void Basic() {
+ {
+ // Constructor with definition.
+ ZoneVector<T> v(1, T(1), zone());
+ CheckConsistency(v, {1});
+ }
+ live_set<T>().CheckEmpty();
+
+ {
+ // Constructor with initializer list.
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ CheckConsistency(v, {1, 2, 3});
+ }
+ live_set<T>().CheckEmpty();
+
+ {
+ std::optional<ZoneVector<T>> v1;
+ v1.emplace({T(1), T(2), T(3)}, zone());
+ CheckConsistency(v1.value(), {1, 2, 3});
+ {
+ // Copy assignment with growth.
+ ZoneVector<T> v2 = v1.value();
+ v1.reset();
+ CheckConsistency(v2, {1, 2, 3});
+ }
+ v1.emplace({T(1), T(2), T(3)}, zone());
+ CheckConsistency(v1.value(), {1, 2, 3});
+
+ // Copy assignment without growth.
+ ZoneVector<T> v3({T(4), T(5), T(6)}, zone());
+ v3 = v1.value();
+ v1.reset();
+ CheckConsistency(v3, {1, 2, 3});
+
+ // Move assignment.
+ {
+ ZoneVector<T> v4(std::move(v3));
+ CheckConsistency(v4, {1, 2, 3});
+ }
+ CheckConsistency(v3, {});
+ }
+ live_set<T>().CheckEmpty();
+ }
+
+ template <class T>
+ void Assign() {
+ {
+ // Assign with sufficient capacity.
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.assign(2, T(4));
+ CheckConsistency(v, {4, 4});
+ // This time, capacity > size.
+ v.assign(3, T(5));
+ CheckConsistency(v, {5, 5, 5});
+ }
+
+ {
+ // Assign with capacity growth.
+ ZoneVector<T> v({T(1)}, zone());
+ v.assign(2, T(4));
+ CheckConsistency(v, {4, 4});
+ }
+
+ live_set<T>().CheckEmpty();
+ }
+
+ template <class T>
+ void Insert() {
+ // Check that we can insert (by iterator) in the right positions.
+ {
+ ZoneVector<T> v({T(2), T(4)}, zone());
+ {
+ T src1[] = {T(1)};
+ T src3[] = {T(3)};
+ T src5[] = {T(5)};
+ v.insert(&v.at(0), src1, std::end(src1));
+ v.insert(&v.at(2), src3, std::end(src3));
+ v.insert(v.end(), src5, std::end(src5));
+ }
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ }
+
+ // Check that we can insert (by count) in the right positions.
+ {
+ ZoneVector<T> v({T(2), T(4)}, zone());
+ v.insert(&v.at(0), 1, T(1));
+ v.insert(&v.at(2), 1, T(3));
+ v.insert(v.end(), 1, T(5));
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ }
+
+ // Test the "insufficient capacity" case in PrepareForInsertion.
+ {
+ ZoneVector<T> v(zone());
+ CHECK_EQ(0, v.capacity());
+ v.insert(v.begin(), 1, T(5));
+ CheckConsistency(v, {5});
+ {
+ T src[] = {T(1), T(2), T(3), T(4)};
+ v.insert(v.begin(), src, std::end(src));
+ }
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ }
+
+ // Test "case 1" of sufficient capacity in PrepareForInsertion.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4), T(5)}, zone());
+ v.reserve(10);
+ CHECK_EQ(10, v.capacity());
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ {
+ T src[] = {T(11), T(12), T(13), T(14)};
+ v.insert(&v.at(3), src, std::end(src));
+ }
+ CheckConsistency(v, {1, 2, 3, 11, 12, 13, 14, 4, 5});
+ }
+
+ // Test "case 2" of sufficient capacity in PrepareForInsertion.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4), T(5)}, zone());
+ v.reserve(10);
+ {
+ T src[] = {T(11), T(12)};
+ v.insert(&v.at(2), src, std::end(src));
+ }
+ CheckConsistency(v, {1, 2, 11, 12, 3, 4, 5});
+ }
+ live_set<T>().CheckEmpty();
+
+ // For good measure, test the edge case where we're inserting exactly
+ // as many elements as we're moving.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4)}, zone());
+ v.reserve(10);
+ {
+ T src[] = {T(11), T(12)};
+ v.insert(&v.at(2), src, std::end(src));
+ }
+ }
+ }
+
+ template <class T>
+ void Erase() {
+ // Erase one element.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(&v.at(1));
+ CheckConsistency(v, {1, 3});
+ }
+ // Erase a range.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4)}, zone());
+ v.erase(&v.at(1), &v.at(3));
+ CheckConsistency(v, {1, 4});
+ }
+ // Erase first element.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(v.begin());
+ CheckConsistency(v, {2, 3});
+ }
+ // Erase last element.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(&v.at(2));
+ CheckConsistency(v, {1, 2});
+ }
+ // Erase nothing (empty range).
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(v.begin(), v.begin());
+ CheckConsistency(v, {1, 2, 3});
+ v.erase(&v.at(1), &v.at(1));
+ CheckConsistency(v, {1, 2, 3});
+ v.erase(v.end(), v.end());
+ CheckConsistency(v, {1, 2, 3});
+ }
+ live_set<T>().CheckEmpty();
+ }
+};
+
+TEST_F(ZoneVectorTest, Basic) {
+ Basic<Trivial>();
+ Basic<CopyAssignable>();
+ Basic<MoveAssignable>();
+ Basic<NotAssignable>();
+}
+
+TEST_F(ZoneVectorTest, Assign) {
+ Assign<Trivial>();
+ Assign<CopyAssignable>();
+ Assign<MoveAssignable>();
+ Assign<NotAssignable>();
+}
+
+TEST_F(ZoneVectorTest, Insert) {
+ Insert<Trivial>();
+ Insert<CopyAssignable>();
+ Insert<MoveAssignable>();
+ Insert<NotAssignable>();
+}
+
+TEST_F(ZoneVectorTest, Erase) {
+ Erase<Trivial>();
+ Erase<CopyAssignable>();
+ Erase<MoveAssignable>();
+ Erase<NotAssignable>();
+}
+
+} // namespace v8::internal