summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2022-04-19 09:00:36 +0200
committerMichaël Zasso <targos@protonmail.com>2022-04-21 11:54:15 +0200
commit6bbc5596b13828a5274a8aeaea4929bdc22168a4 (patch)
tree3fa11feb9240c699aff627e049d33c358a7320a4
parent0d58c0be3e1c3013959c02d42a2a2f21dd31c5f8 (diff)
downloadnode-new-6bbc5596b13828a5274a8aeaea4929bdc22168a4.tar.gz
deps: update V8 to 10.2.154.2
PR-URL: https://github.com/nodejs/node/pull/42740 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Richard Lau <rlau@redhat.com>
-rw-r--r--deps/v8/AUTHORS8
-rw-r--r--deps/v8/BUILD.bazel74
-rw-r--r--deps/v8/BUILD.gn40
-rw-r--r--deps/v8/DEPS125
-rw-r--r--deps/v8/bazel/config/BUILD.bazel9
-rw-r--r--deps/v8/gni/OWNERS2
-rw-r--r--deps/v8/gni/release_branch_toggle.gni2
-rw-r--r--deps/v8/include/cppgc/default-platform.h9
-rw-r--r--deps/v8/include/cppgc/internal/api-constants.h3
-rw-r--r--deps/v8/include/cppgc/internal/caged-heap-local-data.h35
-rw-r--r--deps/v8/include/cppgc/internal/finalizer-trait.h3
-rw-r--r--deps/v8/include/cppgc/internal/write-barrier.h6
-rw-r--r--deps/v8/include/cppgc/type-traits.h19
-rw-r--r--deps/v8/include/js_protocol.pdl37
-rw-r--r--deps/v8/include/v8-cppgc.h4
-rw-r--r--deps/v8/include/v8-inspector.h16
-rw-r--r--deps/v8/include/v8-internal.h4
-rw-r--r--deps/v8/include/v8-metrics.h30
-rw-r--r--deps/v8/include/v8-script.h1
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/infra/mb/mb_config.pyl2
-rw-r--r--deps/v8/infra/testing/builders.pyl44
-rw-r--r--deps/v8/samples/cppgc/hello-world.cc2
-rw-r--r--deps/v8/src/api/api-arguments-inl.h69
-rw-r--r--deps/v8/src/api/api-macros.h9
-rw-r--r--deps/v8/src/api/api-natives.cc2
-rw-r--r--deps/v8/src/api/api.cc157
-rw-r--r--deps/v8/src/ast/OWNERS1
-rw-r--r--deps/v8/src/ast/ast.cc22
-rw-r--r--deps/v8/src/ast/ast.h24
-rw-r--r--deps/v8/src/base/atomic-utils.h9
-rw-r--r--deps/v8/src/base/atomicops.h12
-rw-r--r--deps/v8/src/base/bits.h10
-rw-r--r--deps/v8/src/base/macros.h6
-rw-r--r--deps/v8/src/base/once.h2
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc27
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc18
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc32
-rw-r--r--deps/v8/src/base/platform/platform.h24
-rw-r--r--deps/v8/src/base/template-utils.h47
-rw-r--r--deps/v8/src/base/threaded-list.h11
-rw-r--r--deps/v8/src/base/type-traits.h48
-rw-r--r--deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h29
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h29
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h9
-rw-r--r--deps/v8/src/baseline/baseline-batch-compiler.cc8
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc59
-rw-r--r--deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h33
-rw-r--r--deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h17
-rw-r--r--deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h17
-rw-r--r--deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h17
-rw-r--r--deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h246
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h18
-rw-r--r--deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h71
-rw-r--r--deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h23
-rw-r--r--deps/v8/src/builtins/accessors.cc51
-rw-r--r--deps/v8/src/builtins/accessors.h7
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc168
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc151
-rw-r--r--deps/v8/src/builtins/arraybuffer.tq7
-rw-r--r--deps/v8/src/builtins/base.tq4
-rw-r--r--deps/v8/src/builtins/builtins-api.cc1
-rw-r--r--deps/v8/src/builtins/builtins-array.cc58
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc31
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc3
-rw-r--r--deps/v8/src/builtins/builtins-date.cc86
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h10
-rw-r--r--deps/v8/src/builtins/builtins-function.cc73
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc78
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.h8
-rw-r--r--deps/v8/src/builtins/builtins-shadow-realms.cc44
-rw-r--r--deps/v8/src/builtins/builtins-shadowrealm-gen.cc94
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc15
-rw-r--r--deps/v8/src/builtins/builtins-struct.cc2
-rw-r--r--deps/v8/src/builtins/builtins-temporal.cc79
-rw-r--r--deps/v8/src/builtins/builtins.cc2
-rw-r--r--deps/v8/src/builtins/cast.tq7
-rw-r--r--deps/v8/src/builtins/function.tq8
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc155
-rw-r--r--deps/v8/src/builtins/loong64/builtins-loong64.cc168
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc169
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc166
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq8
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc581
-rw-r--r--deps/v8/src/builtins/promise-constructor.tq6
-rw-r--r--deps/v8/src/builtins/promise-misc.tq2
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc158
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc203
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq54
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq16
-rw-r--r--deps/v8/src/builtins/typed-array-sort.tq41
-rw-r--r--deps/v8/src/builtins/wasm.tq7
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc224
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc5
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h56
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc53
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h34
-rw-r--r--deps/v8/src/codegen/arm64/reglist-arm64.h8
-rw-r--r--deps/v8/src/codegen/bailout-reason.h1
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc7
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h10
-rw-r--r--deps/v8/src/codegen/compiler.cc595
-rw-r--r--deps/v8/src/codegen/compiler.h106
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h14
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc5
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h24
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc5
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h7
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc5
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h24
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h7
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc3
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc121
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h44
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h115
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc35
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h18
-rw-r--r--deps/v8/src/codegen/register.h22
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc5
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h35
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc47
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h31
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc6
-rw-r--r--deps/v8/src/codegen/tnode.h2
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc2
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc36
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h91
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc39
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h4
-rw-r--r--deps/v8/src/common/globals.h115
-rw-r--r--deps/v8/src/common/message-template.h4
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc54
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h14
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc7
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc15
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc25
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h2
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc8
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc6
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc41
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h61
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.cc2
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc98
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc14
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.cc2
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc3
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc64
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc13
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc127
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc26
-rw-r--r--deps/v8/src/compiler/branch-condition-duplicator.cc109
-rw-r--r--deps/v8/src/compiler/branch-condition-duplicator.h85
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc12
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc16
-rw-r--r--deps/v8/src/compiler/code-assembler.h36
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc76
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h8
-rw-r--r--deps/v8/src/compiler/common-operator.cc150
-rw-r--r--deps/v8/src/compiler/common-operator.h53
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc47
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc32
-rw-r--r--deps/v8/src/compiler/graph-assembler.h7
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc256
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h30
-rw-r--r--deps/v8/src/compiler/graph.h2
-rw-r--r--deps/v8/src/compiler/heap-refs.cc10
-rw-r--r--deps/v8/src/compiler/heap-refs.h3
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc554
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h9
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc34
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h7
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc41
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h5
-rw-r--r--deps/v8/src/compiler/linkage.cc1
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc128
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h6
-rw-r--r--deps/v8/src/compiler/opcodes.h1
-rw-r--r--deps/v8/src/compiler/pipeline.cc133
-rw-r--r--deps/v8/src/compiler/pipeline.h6
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h20
-rw-r--r--deps/v8/src/compiler/representation-change.cc21
-rw-r--r--deps/v8/src/compiler/representation-change.h2
-rw-r--r--deps/v8/src/compiler/simplified-lowering-verifier.cc75
-rw-r--r--deps/v8/src/compiler/simplified-lowering-verifier.h54
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc98
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h5
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc19
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/typer.cc1
-rw-r--r--deps/v8/src/compiler/verifier.cc8
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc71
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h11
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc187
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h65
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.cc23
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.h2
-rw-r--r--deps/v8/src/d8/d8.cc183
-rw-r--r--deps/v8/src/d8/d8.h18
-rw-r--r--deps/v8/src/date/date.cc61
-rw-r--r--deps/v8/src/date/date.h14
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc2
-rw-r--r--deps/v8/src/debug/debug-evaluate.h16
-rw-r--r--deps/v8/src/debug/debug-interface.cc48
-rw-r--r--deps/v8/src/debug/debug-interface.h7
-rw-r--r--deps/v8/src/debug/debug.cc9
-rw-r--r--deps/v8/src/debug/debug.h6
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc4
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc3
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc106
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h30
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc3
-rw-r--r--deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc3
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc3
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc3
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc4
-rw-r--r--deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc3
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc4
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc4
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc18
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc2
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc1
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc146
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc30
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc16
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc4
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc4
-rw-r--r--deps/v8/src/execution/clobber-registers.cc25
-rw-r--r--deps/v8/src/execution/frames.cc4
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h6
-rw-r--r--deps/v8/src/execution/isolate.cc108
-rw-r--r--deps/v8/src/execution/isolate.h23
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.cc12
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc132
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc32
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.h8
-rw-r--r--deps/v8/src/execution/stack-guard.cc12
-rw-r--r--deps/v8/src/execution/stack-guard.h3
-rw-r--r--deps/v8/src/execution/thread-local-top.cc6
-rw-r--r--deps/v8/src/execution/thread-local-top.h10
-rw-r--r--deps/v8/src/execution/tiering-manager.cc233
-rw-r--r--deps/v8/src/execution/tiering-manager.h10
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc16
-rw-r--r--deps/v8/src/flags/flag-definitions.h29
-rw-r--r--deps/v8/src/handles/global-handles.cc88
-rw-r--r--deps/v8/src/handles/handles.cc5
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc2
-rw-r--r--deps/v8/src/heap/barrier.h87
-rw-r--r--deps/v8/src/heap/base-space.h12
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.cc40
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h23
-rw-r--r--deps/v8/src/heap/code-range.cc28
-rw-r--r--deps/v8/src/heap/collection-barrier.cc1
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc1
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc178
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h35
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h59
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc32
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h38
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc34
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h16
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap-local-data.cc8
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc1
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.cc18
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.h7
-rw-r--r--deps/v8/src/heap/cppgc/default-platform.cc14
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc17
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h3
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc6
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc46
-rw-r--r--deps/v8/src/heap/cppgc/marker.h54
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc8
-rw-r--r--deps/v8/src/heap/cppgc/metric-recorder.h6
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc10
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h2
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h24
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc7
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.cc5
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc66
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h15
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc49
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc4
-rw-r--r--deps/v8/src/heap/factory-base.cc20
-rw-r--r--deps/v8/src/heap/factory-base.h4
-rw-r--r--deps/v8/src/heap/factory.cc24
-rw-r--r--deps/v8/src/heap/factory.h3
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc1
-rw-r--r--deps/v8/src/heap/gc-tracer-inl.h174
-rw-r--r--deps/v8/src/heap/gc-tracer.cc570
-rw-r--r--deps/v8/src/heap/gc-tracer.h137
-rw-r--r--deps/v8/src/heap/heap-allocator-inl.h6
-rw-r--r--deps/v8/src/heap/heap-allocator.cc9
-rw-r--r--deps/v8/src/heap/heap-allocator.h3
-rw-r--r--deps/v8/src/heap/heap.cc115
-rw-r--r--deps/v8/src/heap/heap.h10
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h8
-rw-r--r--deps/v8/src/heap/incremental-marking.cc149
-rw-r--r--deps/v8/src/heap/incremental-marking.h14
-rw-r--r--deps/v8/src/heap/large-spaces.cc42
-rw-r--r--deps/v8/src/heap/large-spaces.h34
-rw-r--r--deps/v8/src/heap/local-heap.cc3
-rw-r--r--deps/v8/src/heap/mark-compact.cc127
-rw-r--r--deps/v8/src/heap/mark-compact.h1
-rw-r--r--deps/v8/src/heap/marking-barrier.h2
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h1
-rw-r--r--deps/v8/src/heap/memory-allocator.cc410
-rw-r--r--deps/v8/src/heap/memory-allocator.h87
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.cc3
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h1
-rw-r--r--deps/v8/src/heap/memory-chunk.cc107
-rw-r--r--deps/v8/src/heap/memory-chunk.h23
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h68
-rw-r--r--deps/v8/src/heap/new-spaces.cc151
-rw-r--r--deps/v8/src/heap/new-spaces.h113
-rw-r--r--deps/v8/src/heap/objects-visiting.h2
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h101
-rw-r--r--deps/v8/src/heap/paged-spaces.cc106
-rw-r--r--deps/v8/src/heap/paged-spaces.h99
-rw-r--r--deps/v8/src/heap/read-only-heap.cc6
-rw-r--r--deps/v8/src/heap/read-only-heap.h6
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc41
-rw-r--r--deps/v8/src/heap/read-only-spaces.h15
-rw-r--r--deps/v8/src/heap/remembered-set.h45
-rw-r--r--deps/v8/src/heap/safepoint.cc42
-rw-r--r--deps/v8/src/heap/safepoint.h2
-rw-r--r--deps/v8/src/heap/scavenge-job.cc7
-rw-r--r--deps/v8/src/heap/scavenger.cc28
-rw-r--r--deps/v8/src/heap/scavenger.h1
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc1
-rw-r--r--deps/v8/src/heap/slot-set.cc30
-rw-r--r--deps/v8/src/heap/slot-set.h11
-rw-r--r--deps/v8/src/heap/spaces-inl.h116
-rw-r--r--deps/v8/src/heap/spaces.cc47
-rw-r--r--deps/v8/src/heap/spaces.h83
-rw-r--r--deps/v8/src/heap/sweeper.cc148
-rw-r--r--deps/v8/src/heap/sweeper.h33
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc83
-rw-r--r--deps/v8/src/ic/call-optimization.cc4
-rw-r--r--deps/v8/src/ic/ic.cc62
-rw-r--r--deps/v8/src/ic/ic.h4
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc328
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h8
-rw-r--r--deps/v8/src/init/bootstrapper.cc114
-rw-r--r--deps/v8/src/init/v8.cc9
-rw-r--r--deps/v8/src/init/v8.h2
-rw-r--r--deps/v8/src/inspector/BUILD.gn2
-rw-r--r--deps/v8/src/inspector/injected-script.cc12
-rw-r--r--deps/v8/src/inspector/v8-debugger.h7
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc30
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-webdriver-serializer.cc375
-rw-r--r--deps/v8/src/inspector/v8-webdriver-serializer.h25
-rw-r--r--deps/v8/src/inspector/value-mirror.cc160
-rw-r--r--deps/v8/src/inspector/value-mirror.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc58
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc2
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc15
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc10
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h10
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc46
-rw-r--r--deps/v8/src/logging/counters-definitions.h1
-rw-r--r--deps/v8/src/logging/log.cc122
-rw-r--r--deps/v8/src/logging/log.h44
-rw-r--r--deps/v8/src/logging/runtime-call-stats.h7
-rw-r--r--deps/v8/src/maglev/OWNERS1
-rw-r--r--deps/v8/src/maglev/maglev-code-gen-state.h73
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.cc317
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.cc6
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.h6
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.cc14
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.h21
-rw-r--r--deps/v8/src/maglev/maglev-compiler.cc88
-rw-r--r--deps/v8/src/maglev/maglev-compiler.h12
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.cc32
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.h9
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.cc585
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.h302
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.cc162
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.h7
-rw-r--r--deps/v8/src/maglev/maglev-graph-processor.h239
-rw-r--r--deps/v8/src/maglev/maglev-graph-verifier.h143
-rw-r--r--deps/v8/src/maglev/maglev-interpreter-frame-state.h348
-rw-r--r--deps/v8/src/maglev/maglev-ir.cc330
-rw-r--r--deps/v8/src/maglev/maglev-ir.h677
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.cc112
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.h6
-rw-r--r--deps/v8/src/maglev/maglev-vreg-allocator.h2
-rw-r--r--deps/v8/src/maglev/maglev.cc2
-rw-r--r--deps/v8/src/objects/call-site-info.cc18
-rw-r--r--deps/v8/src/objects/call-site-info.h1
-rw-r--r--deps/v8/src/objects/code-inl.h184
-rw-r--r--deps/v8/src/objects/code-kind.cc2
-rw-r--r--deps/v8/src/objects/code-kind.h6
-rw-r--r--deps/v8/src/objects/code.cc224
-rw-r--r--deps/v8/src/objects/code.h155
-rw-r--r--deps/v8/src/objects/code.tq12
-rw-r--r--deps/v8/src/objects/contexts-inl.h4
-rw-r--r--deps/v8/src/objects/contexts.h5
-rw-r--r--deps/v8/src/objects/contexts.tq2
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h3
-rw-r--r--deps/v8/src/objects/debug-objects.cc11
-rw-r--r--deps/v8/src/objects/debug-objects.h13
-rw-r--r--deps/v8/src/objects/debug-objects.tq5
-rw-r--r--deps/v8/src/objects/elements.cc53
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h8
-rw-r--r--deps/v8/src/objects/feedback-vector.cc58
-rw-r--r--deps/v8/src/objects/feedback-vector.h44
-rw-r--r--deps/v8/src/objects/feedback-vector.tq8
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array.h13
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h39
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc110
-rw-r--r--deps/v8/src/objects/js-array-buffer.h9
-rw-r--r--deps/v8/src/objects/js-array-inl.h6
-rw-r--r--deps/v8/src/objects/js-array.h6
-rw-r--r--deps/v8/src/objects/js-function-inl.h51
-rw-r--r--deps/v8/src/objects/js-function.cc253
-rw-r--r--deps/v8/src/objects/js-function.h46
-rw-r--r--deps/v8/src/objects/js-objects-inl.h22
-rw-r--r--deps/v8/src/objects/js-objects.cc117
-rw-r--r--deps/v8/src/objects/js-objects.h27
-rw-r--r--deps/v8/src/objects/js-temporal-objects.cc1392
-rw-r--r--deps/v8/src/objects/js-temporal-objects.h89
-rw-r--r--deps/v8/src/objects/lookup.cc31
-rw-r--r--deps/v8/src/objects/lookup.h1
-rw-r--r--deps/v8/src/objects/map-inl.h15
-rw-r--r--deps/v8/src/objects/map-updater.cc10
-rw-r--r--deps/v8/src/objects/map.cc41
-rw-r--r--deps/v8/src/objects/map.h4
-rw-r--r--deps/v8/src/objects/module.cc12
-rw-r--r--deps/v8/src/objects/object-macros.h10
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h23
-rw-r--r--deps/v8/src/objects/objects-definitions.h1
-rw-r--r--deps/v8/src/objects/objects.cc86
-rw-r--r--deps/v8/src/objects/objects.h17
-rw-r--r--deps/v8/src/objects/oddball-inl.h20
-rw-r--r--deps/v8/src/objects/oddball.h33
-rw-r--r--deps/v8/src/objects/oddball.tq2
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.cc129
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.h93
-rw-r--r--deps/v8/src/objects/property-array-inl.h19
-rw-r--r--deps/v8/src/objects/property-array.h4
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h3
-rw-r--r--deps/v8/src/objects/shared-function-info.h5
-rw-r--r--deps/v8/src/objects/shared-function-info.tq1
-rw-r--r--deps/v8/src/objects/swiss-hash-table-helpers.h151
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h23
-rw-r--r--deps/v8/src/objects/tagged-field.h12
-rw-r--r--deps/v8/src/objects/value-serializer.cc36
-rw-r--r--deps/v8/src/objects/value-serializer.h5
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc148
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h10
-rw-r--r--deps/v8/src/profiler/profile-generator.cc37
-rw-r--r--deps/v8/src/profiler/profile-generator.h6
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc4
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc2
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc2
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc2
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc2
-rw-r--r--deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc2
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc2
-rw-r--r--deps/v8/src/roots/roots.h3
-rw-r--r--deps/v8/src/runtime/runtime-array.cc2
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc31
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc250
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc14
-rw-r--r--deps/v8/src/runtime/runtime-object.cc27
-rw-r--r--deps/v8/src/runtime/runtime-shadow-realm.cc22
-rw-r--r--deps/v8/src/runtime/runtime-test.cc135
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc22
-rw-r--r--deps/v8/src/runtime/runtime.h39
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc159
-rw-r--r--deps/v8/src/snapshot/code-serializer.h3
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc8
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc83
-rw-r--r--deps/v8/src/snapshot/context-serializer.h2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc29
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data-inl.h159
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc72
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.h71
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc2
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc7
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h1
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc5
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc18
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h1
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc7
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc16
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.h7
-rw-r--r--deps/v8/src/snapshot/serializer.cc311
-rw-r--r--deps/v8/src/snapshot/serializer.h24
-rw-r--r--deps/v8/src/snapshot/shared-heap-serializer.cc19
-rw-r--r--deps/v8/src/snapshot/snapshot.cc4
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc12
-rw-r--r--deps/v8/src/torque/constants.h3
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc40
-rw-r--r--deps/v8/src/torque/torque-parser.cc6
-rw-r--r--deps/v8/src/torque/types.h21
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc2
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h2
-rw-r--r--deps/v8/src/utils/memcopy.cc2
-rw-r--r--deps/v8/src/utils/utils.h18
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h49
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h29
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h117
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc60
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h7
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc167
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h18
-rw-r--r--deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h65
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h55
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h62
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h23
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h165
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h144
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h34
-rw-r--r--deps/v8/src/wasm/canonical-types.cc155
-rw-r--r--deps/v8/src/wasm/canonical-types.h125
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h2
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc1
-rw-r--r--deps/v8/src/wasm/function-compiler.cc7
-rw-r--r--deps/v8/src/wasm/function-compiler.h4
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc14
-rw-r--r--deps/v8/src/wasm/init-expr-interface.cc2
-rw-r--r--deps/v8/src/wasm/module-compiler.cc84
-rw-r--r--deps/v8/src/wasm/module-decoder.cc46
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc45
-rw-r--r--deps/v8/src/wasm/struct-types.h12
-rw-r--r--deps/v8/src/wasm/value-type.h84
-rw-r--r--deps/v8/src/wasm/wasm-arguments.h4
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc11
-rw-r--r--deps/v8/src/wasm/wasm-engine.h5
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc2
-rw-r--r--deps/v8/src/wasm/wasm-js.cc38
-rw-r--r--deps/v8/src/wasm/wasm-limits.h2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc22
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h19
-rw-r--r--deps/v8/src/wasm/wasm-module.h44
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h4
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc17
-rw-r--r--deps/v8/src/wasm/wasm-objects.h10
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq1
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc35
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.h20
-rw-r--r--deps/v8/src/wasm/wasm-value.h6
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc26
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.h7
-rw-r--r--deps/v8/test/cctest/BUILD.gn2
-rw-r--r--deps/v8/test/cctest/cctest.cc283
-rw-r--r--deps/v8/test/cctest/cctest.h152
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction-scheduler.cc11
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc1020
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc29
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc19
-rw-r--r--deps/v8/test/cctest/heap/test-memory-measurement.cc14
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc58
-rw-r--r--deps/v8/test/cctest/heap/test-unmapper.cc15
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompareBoolean.golden368
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden70
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc56
-rw-r--r--deps/v8/test/cctest/test-allocation.cc28
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc61
-rw-r--r--deps/v8/test/cctest/test-api.cc48
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc8
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc7
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc47
-rw-r--r--deps/v8/test/cctest/test-debug.cc44
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc29
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc4
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc14
-rw-r--r--deps/v8/test/cctest/test-js-to-wasm.cc1
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc3
-rw-r--r--deps/v8/test/cctest/test-log.cc3
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc7
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm64.cc7
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-loong64.cc7
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc7
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc7
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-riscv64.cc7
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc7
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc52
-rw-r--r--deps/v8/test/cctest/test-roots.cc1
-rw-r--r--deps/v8/test/cctest/test-serialize.cc2
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc45
-rw-r--r--deps/v8/test/cctest/test-unwinder-code-pages.cc2
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc74
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc28
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-metrics.cc34
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc2
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h2
-rw-r--r--deps/v8/test/fuzzer/inspector/regress-1307449529
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage-block.js1
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage.js1
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints.js6
-rw-r--r--deps/v8/test/inspector/isolate-data.cc14
-rw-r--r--deps/v8/test/inspector/runtime/add-web-driver-value-expected.txt633
-rw-r--r--deps/v8/test/inspector/runtime/add-web-driver-value.js132
-rw-r--r--deps/v8/test/inspector/runtime/remote-object-expected.txt68
-rw-r--r--deps/v8/test/inspector/runtime/remote-object.js36
-rw-r--r--deps/v8/test/intl/intl.status8
-rw-r--r--deps/v8/test/js-perf-test/Array/includes.js67
-rw-r--r--deps/v8/test/js-perf-test/Array/index-of.js67
-rw-r--r--deps/v8/test/js-perf-test/Array/run.js3
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/compare.js114
-rw-r--r--deps/v8/test/js-perf-test/JSTests2.json21
-rw-r--r--deps/v8/test/js-perf-test/JSTests3.json1
-rw-r--r--deps/v8/test/js-perf-test/JSTests5.json12
-rw-r--r--deps/v8/test/js-perf-test/ObjectDestructuringAssignment/run.js98
-rw-r--r--deps/v8/test/message/message.status28
-rw-r--r--deps/v8/test/message/unicode-filename-🎅🎄.js5
-rw-r--r--deps/v8/test/message/unicode-filename-🎅🎄.out1
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-promise-all-settled.js45
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1302572.js15
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js36
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-prototype-groupby-fast-path-assumptions.js37
-rw-r--r--deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy-no-lazy-feedback.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy-no-lazy-feedback.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/private-reference-logical-assignment-short-circuit.js135
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-bind.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props-stack.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props.js121
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarray-set-length-detach.js19
-rw-r--r--deps/v8/test/mjsunit/maglev/19.js25
-rw-r--r--deps/v8/test/mjsunit/maglev/add-smi.js41
-rw-r--r--deps/v8/test/mjsunit/maglev/argument-over-under-application.js21
-rw-r--r--deps/v8/test/mjsunit/maglev/lazy-deopt-with-onstack-activation.js33
-rw-r--r--deps/v8/test/mjsunit/maglev/lazy-deopt-without-onstack-activation.js24
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js36
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status38
-rw-r--r--deps/v8/test/mjsunit/optimized-array-includes.js358
-rw-r--r--deps/v8/test/mjsunit/optimized-array-indexof.js360
-rw-r--r--deps/v8/test/mjsunit/optimized-string-includes.js152
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1309769.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1312022.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1312310.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1313419.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1313475.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-331444.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-454725.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-840106.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1302527-no-lazy-feedback.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1302527.js612
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1306929.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1307310.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-10817.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-11614.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12219.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12421.js140
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12632.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12705.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12729-1.mjs9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12729.mjs8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12762.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5697.js8
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1299183.js215
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1308333.js260
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1314363.js17
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js16
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-without-map-space.js12
-rw-r--r--deps/v8/test/mjsunit/smi-ops-inlined.js2
-rw-r--r--deps/v8/test/mjsunit/smi-ops.js2
-rw-r--r--deps/v8/test/mjsunit/stack-traces-class-fields.js8
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-negated.js2
-rw-r--r--deps/v8/test/mjsunit/testcfg.py6
-rw-r--r--deps/v8/test/mjsunit/thin-strings.js15
-rw-r--r--deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js117
-rw-r--r--deps/v8/test/mjsunit/typedarray-helpers.js18
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js143
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js345
-rw-r--r--deps/v8/test/mjsunit/wasm/call-ref.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js34
-rw-r--r--deps/v8/test/mjsunit/wasm/generic-wrapper.js23
-rw-r--r--deps/v8/test/mjsunit/wasm/imported-function-types.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-globals.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-tables.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/resizablearraybuffer-growablesharedarraybuffer-wasm.js15
-rw-r--r--deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js67
-rw-r--r--deps/v8/test/mjsunit/wasm/speculative-inlining.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-memory.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-running-empty-loop-interruptible.js32
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc1
-rw-r--r--deps/v8/test/mozilla/testcfg.py1
-rw-r--r--deps/v8/test/test262/test262.status344
-rw-r--r--deps/v8/test/test262/testcfg.py3
-rw-r--r--deps/v8/test/unittests/BUILD.gn4
-rw-r--r--deps/v8/test/unittests/api/deserialize-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc25
-rw-r--r--deps/v8/test/unittests/base/template-utils-unittest.cc57
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc75
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/persistent-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc145
-rw-r--r--deps/v8/test/unittests/compiler/zone-stats-unittest.cc2
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc11
-rw-r--r--deps/v8/test/unittests/heap/barrier-unittest.cc153
-rw-r--r--deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc16
-rw-r--r--deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc108
-rw-r--r--deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc17
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.cc20
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc1000
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc98
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.h36
-rw-r--r--deps/v8/test/unittests/heap/lab-unittest.cc (renamed from deps/v8/test/cctest/heap/test-lab.cc)227
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/unmapper-unittest.cc57
-rw-r--r--deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc82
-rw-r--r--deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc13
-rw-r--r--deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc215
-rw-r--r--deps/v8/test/unittests/objects/swiss-hash-table-helpers-unittest.cc109
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc48
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc29
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc8
-rw-r--r--deps/v8/test/unittests/test-utils.cc18
-rw-r--r--deps/v8/test/unittests/test-utils.h196
-rw-r--r--deps/v8/test/unittests/testcfg.py3
-rw-r--r--deps/v8/test/unittests/utils/allocation-unittest.cc10
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc35
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc59
-rw-r--r--deps/v8/test/unittests/wasm/subtyping-unittest.cc107
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc3
-rw-r--r--deps/v8/test/unittests/zone/zone-allocator-unittest.cc14
-rw-r--r--deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc33
-rw-r--r--deps/v8/test/unittests/zone/zone-unittest.cc9
-rw-r--r--deps/v8/test/wasm-api-tests/testcfg.py3
-rw-r--r--deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h4
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/roll.py5
-rw-r--r--deps/v8/third_party/zlib/README.chromium3
-rw-r--r--deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c28
-rw-r--r--deps/v8/third_party/zlib/contrib/optimizations/inflate.c14
-rw-r--r--deps/v8/third_party/zlib/crc32.c6
-rw-r--r--deps/v8/third_party/zlib/deflate.c34
-rw-r--r--deps/v8/third_party/zlib/google/zip.cc27
-rw-r--r--deps/v8/third_party/zlib/google/zip.h6
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.cc192
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.h33
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader_unittest.cc18
-rw-r--r--deps/v8/third_party/zlib/google/zip_unittest.cc564
-rw-r--r--deps/v8/third_party/zlib/gzguts.h2
-rw-r--r--deps/v8/third_party/zlib/gzlib.c4
-rw-r--r--deps/v8/third_party/zlib/gzread.c10
-rw-r--r--deps/v8/third_party/zlib/gzwrite.c25
-rw-r--r--deps/v8/third_party/zlib/inffast.c28
-rw-r--r--deps/v8/third_party/zlib/inflate.c14
-rw-r--r--deps/v8/third_party/zlib/inflate.h3
-rw-r--r--deps/v8/third_party/zlib/patches/0009-infcover-oob.patch24
-rw-r--r--deps/v8/third_party/zlib/trees.c5
-rw-r--r--deps/v8/third_party/zlib/zlib.h26
-rw-r--r--deps/v8/third_party/zlib/zutil.c4
-rw-r--r--deps/v8/third_party/zlib/zutil.h8
-rw-r--r--deps/v8/tools/PRESUBMIT.py3
-rwxr-xr-xdeps/v8/tools/chrome/linux-perf-renderer-cmd.sh45
-rwxr-xr-xdeps/v8/tools/chrome/linux_perf.py207
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py6
-rwxr-xr-xdeps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py2
-rwxr-xr-xdeps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py2
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py8
-rw-r--r--deps/v8/tools/csvparser.mjs18
-rw-r--r--deps/v8/tools/debug_helper/gen-heap-constants.py2
-rw-r--r--deps/v8/tools/disasm.py2
-rw-r--r--deps/v8/tools/dumpcpp.mjs6
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn2
-rw-r--r--deps/v8/tools/gcmole/gcmole.cc3
-rwxr-xr-xdeps/v8/tools/gcmole/gcmole.py61
-rwxr-xr-xdeps/v8/tools/gcmole/run-gcmole.py5
-rw-r--r--deps/v8/tools/gcmole/suspects.allowlist (renamed from deps/v8/tools/gcmole/suspects.whitelist)0
-rwxr-xr-xdeps/v8/tools/get_landmines.py5
-rwxr-xr-xdeps/v8/tools/grokdump.py113
-rw-r--r--deps/v8/tools/js/helper.mjs19
-rw-r--r--deps/v8/tools/js/log-file-reader-template.html38
-rw-r--r--deps/v8/tools/js/web-api-helper.mjs60
-rw-r--r--deps/v8/tools/logreader.mjs121
-rw-r--r--deps/v8/tools/mb/PRESUBMIT.py3
-rwxr-xr-xdeps/v8/tools/mb/mb_test.py (renamed from deps/v8/tools/mb/mb_unittest.py)431
-rw-r--r--deps/v8/tools/parse-processor.mjs19
-rw-r--r--deps/v8/tools/profile.mjs36
-rwxr-xr-xdeps/v8/tools/run.py4
-rw-r--r--deps/v8/tools/run_perf.py27
-rw-r--r--deps/v8/tools/system-analyzer/app-model.mjs13
-rw-r--r--deps/v8/tools/system-analyzer/helper.mjs36
-rw-r--r--deps/v8/tools/system-analyzer/index.css19
-rw-r--r--deps/v8/tools/system-analyzer/index.html14
-rw-r--r--deps/v8/tools/system-analyzer/index.mjs151
-rw-r--r--deps/v8/tools/system-analyzer/log/api.mjs24
-rw-r--r--deps/v8/tools/system-analyzer/log/code.mjs62
-rw-r--r--deps/v8/tools/system-analyzer/log/timer.mjs4
-rw-r--r--deps/v8/tools/system-analyzer/processor.mjs91
-rw-r--r--deps/v8/tools/system-analyzer/view/code-panel-template.html4
-rw-r--r--deps/v8/tools/system-analyzer/view/code-panel.mjs160
-rw-r--r--deps/v8/tools/system-analyzer/view/events.mjs4
-rw-r--r--deps/v8/tools/system-analyzer/view/property-link-table-template.html48
-rw-r--r--deps/v8/tools/system-analyzer/view/property-link-table.mjs34
-rw-r--r--deps/v8/tools/system-analyzer/view/script-panel-template.html2
-rw-r--r--deps/v8/tools/system-analyzer/view/script-panel.mjs3
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline-panel.mjs27
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs208
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-map.mjs119
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs17
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html45
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs9
-rw-r--r--deps/v8/tools/system-analyzer/view/tool-tip-template.html21
-rw-r--r--deps/v8/tools/system-analyzer/view/tool-tip.mjs6
-rw-r--r--deps/v8/tools/testrunner/PRESUBMIT.py17
-rw-r--r--deps/v8/tools/testrunner/base_runner.py8
-rw-r--r--deps/v8/tools/testrunner/local/command.py15
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/pool.py5
-rwxr-xr-xdeps/v8/tools/testrunner/local/pool_test.py (renamed from deps/v8/tools/testrunner/local/pool_unittest.py)19
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py4
-rwxr-xr-xdeps/v8/tools/testrunner/local/statusfile_test.py (renamed from deps/v8/tools/testrunner/local/statusfile_unittest.py)62
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_test.py (renamed from deps/v8/tools/testrunner/local/testsuite_unittest.py)23
-rw-r--r--deps/v8/tools/testrunner/local/utils.py3
-rw-r--r--deps/v8/tools/testrunner/local/variants.py11
-rw-r--r--deps/v8/tools/testrunner/local/verbose.py3
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py6
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py16
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py11
-rw-r--r--deps/v8/tools/testrunner/outproc/message.py5
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py5
-rw-r--r--deps/v8/tools/testrunner/testproc/combiner.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py81
-rw-r--r--deps/v8/tools/testrunner/testproc/sequence_test.py (renamed from deps/v8/tools/testrunner/testproc/sequence_unittest.py)83
-rwxr-xr-xdeps/v8/tools/testrunner/testproc/shard_test.py (renamed from deps/v8/tools/testrunner/testproc/shard_unittest.py)25
-rw-r--r--deps/v8/tools/testrunner/testproc/sigproc.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/timeout.py1
-rw-r--r--deps/v8/tools/testrunner/testproc/util.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/util_test.py (renamed from deps/v8/tools/testrunner/testproc/util_unittest.py)34
-rwxr-xr-xdeps/v8/tools/testrunner/testproc/variant_test.py (renamed from deps/v8/tools/testrunner/testproc/variant_unittest.py)42
-rw-r--r--deps/v8/tools/testrunner/utils/dump_build_config_gyp.py3
-rw-r--r--deps/v8/tools/tickprocessor.mjs8
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py21
-rw-r--r--deps/v8/tools/turbolizer/info-view.html8
-rw-r--r--deps/v8/tools/turbolizer/src/edge.ts35
-rw-r--r--deps/v8/tools/turbolizer/src/graph-layout.ts6
-rw-r--r--deps/v8/tools/turbolizer/src/graph-view.ts50
-rw-r--r--deps/v8/tools/turbolizer/src/graph.ts4
-rw-r--r--deps/v8/tools/turbolizer/src/graphmultiview.ts39
-rw-r--r--deps/v8/tools/turbolizer/src/node.ts3
-rw-r--r--deps/v8/tools/turbolizer/src/selection.ts8
-rw-r--r--deps/v8/tools/turbolizer/src/source-resolver.ts6
-rw-r--r--deps/v8/tools/turbolizer/src/text-view.ts10
-rw-r--r--deps/v8/tools/turbolizer/src/view.ts6
-rw-r--r--deps/v8/tools/unittests/__init__.py2
-rw-r--r--deps/v8/tools/unittests/compare_torque_output_test.py13
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py28
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py11
-rwxr-xr-xdeps/v8/tools/unittests/v8_presubmit_test.py2
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py102
-rw-r--r--deps/v8/tools/v8heapconst.py701
882 files changed, 29302 insertions, 15793 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index f05ba729c5..e095f81265 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -41,6 +41,7 @@ Meteor Development Group <*@meteor.com>
Cloudflare, Inc. <*@cloudflare.com>
Julia Computing, Inc. <*@juliacomputing.com>
CodeWeavers, Inc. <*@codeweavers.com>
+Alibaba, Inc. <*@alibaba-inc.com>
Aaron Bieber <deftly@gmail.com>
Aaron O'Mullan <aaron.omullan@gmail.com>
@@ -90,9 +91,11 @@ Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel Dromboski <dandromb@gmail.com>
Daniel James <dnljms@gmail.com>
Daniel Shelton <d1.shelton@samsung.com>
+Danylo Boiko <danielboyko02@gmail.com>
Darshan Sen <raisinten@gmail.com>
David Carlier <devnexen@gmail.com>
David Manouchehri <david@davidmanouchehri.com>
+David Sanders <dsanders11@ucsbalum.com>
Deepak Mohan <hop2deep@gmail.com>
Deon Dior <diaoyuanjie@gmail.com>
Derek Tu <derek.t@rioslab.org>
@@ -115,6 +118,7 @@ Gus Caplan <me@gus.host>
Gwang Yoon Hwang <ryumiel@company100.net>
Haichuan Wang <hc.opensource@gmail.com>
Hannu Trey <hannu.trey@gmail.com>
+Harshal Nandigramwar <pro.bbcom18@gmail.com>
Harshil Jain <twitharshil@gmail.com>
Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
@@ -135,7 +139,6 @@ Javad Amiri <javad.amiri@anu.edu.au>
Jay Freeman <saurik@saurik.com>
Jesper van den Ende <jespertheend@gmail.com>
Ji Qiu <qiuji@iscas.ac.cn>
-Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Jiawen Geng <technicalcute@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
Joel Stanley <joel@jms.id.au>
@@ -198,8 +201,6 @@ Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
PhistucK <phistuck@gmail.com>
-Qingyan Li <qingyan.liqy@alibaba-inc.com>
-Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>
Raul Tambre <raul@tambre.ee>
Ray Glover <ray@rayglover.net>
@@ -248,6 +249,7 @@ Vladimir Shutoff <vovan@shutoff.ru>
Wael Almattar <waelsy123@gmail.com>
Wei Wu <lazyparser@gmail.com>
Wenlu Wang <kingwenlu@gmail.com>
+Wenming Yang <yangwenming@bytedance.com>
Wenyu Zhao <wenyu.zhao@anu.edu.au>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Wouter Vermeiren <wouter.vermeiren@essensium.com>
diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel
index bc18ab8c27..279e3e55a0 100644
--- a/deps/v8/BUILD.bazel
+++ b/deps/v8/BUILD.bazel
@@ -44,7 +44,6 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# v8_enable_builtins_profiling
# v8_enable_builtins_profiling_verbose
# v8_builtins_profiling_log_file
-# v8_enable_short_builtin_calls
# v8_enable_external_code_space
# v8_postmortem_support
# v8_use_siphash
@@ -231,6 +230,62 @@ selects.config_setting_group(
],
)
+# We use a string flag to create a 3 value-logic.
+# If no explicit value for v8_enable_short_builtin_calls, we set it to 'none'.
+v8_string(
+ name = "v8_enable_short_builtin_calls",
+ default = "none",
+)
+
+# Default setting for v8_enable_pointer_compression.
+config_setting(
+ name = "v8_enable_short_builtin_calls_is_none",
+ flag_values = {
+ ":v8_enable_short_builtin_calls": "none",
+ },
+)
+
+# Explicity defined v8_enable_pointer_compression.
+config_setting(
+ name = "v8_enable_short_builtin_calls_is_true",
+ flag_values = {
+ ":v8_enable_short_builtin_calls": "True",
+ },
+)
+
+# Default setting for v8_enable_short_builtin_calls when target is x64.
+# Disable short calls when pointer compression is not enabled.
+selects.config_setting_group(
+ name = "v8_target_x64_default_short_builtin_calls",
+ match_all = [
+ ":v8_enable_short_builtin_calls_is_none",
+ "@v8//bazel/config:v8_target_x64",
+ ":is_v8_enable_pointer_compression",
+ ],
+)
+
+# Default setting for v8_enable_short_builtin_calls when target is arm64, but not Android.
+selects.config_setting_group(
+ name = "v8_target_arm64_default_short_builtin_calls",
+ match_all = [
+ ":v8_enable_short_builtin_calls_is_none",
+ "@v8//bazel/config:v8_target_arm64",
+ "@v8//bazel/config:is_not_android",
+ ],
+)
+
+# v8_enable_short_builtin_calls is valid whenever it is explicitly defined
+# or we have the default settings for targets x64 and arm64.
+# TODO(victorgomes): v8_enable_short_builtin_calls should not be enabled when CFI is enabled.
+selects.config_setting_group(
+ name = "is_v8_enable_short_builtin_calls",
+ match_any = [
+ ":v8_enable_short_builtin_calls_is_true",
+ ":v8_target_x64_default_short_builtin_calls",
+ ":v8_target_arm64_default_short_builtin_calls",
+ ],
+)
+
# Enable -rdynamic.
selects.config_setting_group(
name = "should_add_rdynamic",
@@ -340,6 +395,11 @@ v8_config(
],
"//conditions:default": [],
}) + select({
+ ":is_v8_enable_short_builtin_calls": [
+ "V8_SHORT_BUILTIN_CALLS",
+ ],
+ "//conditions:default": [],
+ }) + select({
":is_v8_enable_test_features": [
"V8_ENABLE_ALLOCATION_TIMEOUT",
"V8_ENABLE_FORCE_SLOW_PATH",
@@ -606,7 +666,6 @@ filegroup(
"src/base/template-utils.h",
"src/base/timezone-cache.h",
"src/base/threaded-list.h",
- "src/base/type-traits.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
"src/base/vector.h",
@@ -1305,7 +1364,6 @@ filegroup(
"src/heap/allocation-stats.h",
"src/heap/array-buffer-sweeper.cc",
"src/heap/array-buffer-sweeper.h",
- "src/heap/barrier.h",
"src/heap/base-space.cc",
"src/heap/base-space.h",
"src/heap/basic-memory-chunk.cc",
@@ -1331,7 +1389,9 @@ filegroup(
"src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/cpp-snapshot.h",
+ "src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-state.h",
+ "src/heap/cppgc-js/unified-heap-marking-state-inl.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
@@ -1353,6 +1413,7 @@ filegroup(
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
+ "src/heap/gc-tracer-inl.h",
"src/heap/gc-tracer.h",
"src/heap/heap-allocator-inl.h",
"src/heap/heap-allocator.cc",
@@ -1930,6 +1991,7 @@ filegroup(
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
+ "src/runtime/runtime-shadow-realm.cc",
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
@@ -1964,6 +2026,7 @@ filegroup(
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
"src/snapshot/embedded/embedded-data.cc",
+ "src/snapshot/embedded/embedded-data-inl.h",
"src/snapshot/embedded/embedded-data.h",
"src/snapshot/embedded/embedded-file-writer-interface.h",
"src/snapshot/object-deserializer.cc",
@@ -2374,6 +2437,8 @@ filegroup(
"src/wasm/baseline/liftoff-compiler.h",
"src/wasm/baseline/liftoff-register.h",
"src/wasm/branch-hint-map.h",
+ "src/wasm/canonical-types.cc",
+ "src/wasm/canonical-types.h",
"src/wasm/code-space-access.cc",
"src/wasm/code-space-access.h",
"src/wasm/compilation-environment.h",
@@ -2550,6 +2615,8 @@ filegroup(
"src/compiler/backend/unwinding-info-writer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
+ "src/compiler/branch-condition-duplicator.cc",
+ "src/compiler/branch-condition-duplicator.h",
"src/compiler/branch-elimination.cc",
"src/compiler/branch-elimination.h",
"src/compiler/bytecode-analysis.cc",
@@ -2857,7 +2924,6 @@ filegroup(
"src/heap/cppgc/compactor.h",
"src/heap/cppgc/concurrent-marker.cc",
"src/heap/cppgc/concurrent-marker.h",
- "src/heap/cppgc/default-platform.cc",
"src/heap/cppgc/explicit-management.cc",
"src/heap/cppgc/free-list.cc",
"src/heap/cppgc/free-list.h",
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 9e801d2455..988c907d96 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -299,7 +299,7 @@ declare_args() {
# Enable the experimental V8 sandbox.
# Sets -DV8_SANDBOX.
- v8_enable_sandbox = false
+ v8_enable_sandbox = ""
# Enable external pointer sandboxing. Requires v8_enable_sandbox.
# Sets -DV8_SANDBOXED_EXTERNAL_POINRTERS.
@@ -421,13 +421,10 @@ if (v8_enable_short_builtin_calls == "") {
v8_current_cpu == "x64" || (!is_android && v8_current_cpu == "arm64")
}
if (v8_enable_external_code_space == "") {
- # Can't use !is_android here, because Torque toolchain is affected by
- # the value of this flag but actually runs on the host side.
v8_enable_external_code_space =
v8_enable_pointer_compression &&
(v8_current_cpu == "x64" ||
- (target_os != "android" && target_os != "fuchsia" &&
- v8_current_cpu == "arm64"))
+ (target_os != "fuchsia" && v8_current_cpu == "arm64"))
}
if (v8_enable_maglev == "") {
v8_enable_maglev = v8_current_cpu == "x64" && v8_enable_pointer_compression
@@ -474,7 +471,8 @@ if (v8_multi_arch_build &&
# Check if it is a Chromium build and activate PAC/BTI if needed.
# TODO(cavalcantii): have a single point of integration with PAC/BTI flags.
if (build_with_chromium && v8_current_cpu == "arm64" &&
- arm_control_flow_integrity == "standard") {
+ (arm_control_flow_integrity == "standard" ||
+ arm_control_flow_integrity == "pac")) {
v8_control_flow_integrity = true
}
@@ -492,10 +490,12 @@ if (v8_enable_shared_ro_heap == "") {
v8_enable_pointer_compression_shared_cage
}
-# Enable the v8 sandbox on 64-bit Chromium builds.
-if (build_with_chromium && v8_enable_pointer_compression_shared_cage &&
- v8_enable_external_code_space) {
- v8_enable_sandbox = true
+if (v8_enable_sandbox == "") {
+ # TODO(saelo, v8:11880) remove dependency on v8_enable_external_code_space
+ # once that is enabled everywhere by default.
+ v8_enable_sandbox =
+ build_with_chromium && v8_enable_pointer_compression_shared_cage &&
+ v8_enable_external_code_space
}
# Enable all available sandbox features if sandbox future is enabled.
@@ -1044,8 +1044,8 @@ config("toolchain") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
if (current_cpu == "arm64") {
# This will enable PAC+BTI in code generation and static code.
- if (v8_control_flow_integrity) {
- # TODO(v8:10026): Enable this in src/build.
+ if (v8_control_flow_integrity &&
+ (!build_with_chromium || arm_control_flow_integrity == "standard")) {
cflags += [ "-mbranch-protection=standard" ]
asmflags = [ "-mmark-bti-property" ]
} else if (build_with_chromium && arm_control_flow_integrity == "pac") {
@@ -1179,6 +1179,9 @@ config("toolchain") {
#FIXME: Temporarily use MIPS macro for the building.
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
+ if (target_is_simulator) {
+ defines += [ "CAN_USE_RVV_INSTRUCTIONS" ]
+ }
}
if (v8_current_cpu == "x86") {
@@ -2778,6 +2781,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/backend/spill-placer.h",
"src/compiler/backend/unwinding-info-writer.h",
"src/compiler/basic-block-instrumentor.h",
+ "src/compiler/branch-condition-duplicator.h",
"src/compiler/branch-elimination.h",
"src/compiler/bytecode-analysis.h",
"src/compiler/bytecode-graph-builder.h",
@@ -2963,7 +2967,6 @@ v8_header_set("v8_internal_headers") {
"src/heap/allocation-result.h",
"src/heap/allocation-stats.h",
"src/heap/array-buffer-sweeper.h",
- "src/heap/barrier.h",
"src/heap/base-space.h",
"src/heap/basic-memory-chunk.h",
"src/heap/code-object-registry.h",
@@ -2978,6 +2981,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-marking-state.h",
"src/heap/cppgc-js/cpp-snapshot.h",
+ "src/heap/cppgc-js/unified-heap-marking-state-inl.h",
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
@@ -2993,6 +2997,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/free-list-inl.h",
"src/heap/free-list.h",
"src/heap/gc-idle-time-handler.h",
+ "src/heap/gc-tracer-inl.h",
"src/heap/gc-tracer.h",
"src/heap/heap-allocator-inl.h",
"src/heap/heap-allocator.h",
@@ -3390,6 +3395,7 @@ v8_header_set("v8_internal_headers") {
"src/snapshot/context-deserializer.h",
"src/snapshot/context-serializer.h",
"src/snapshot/deserializer.h",
+ "src/snapshot/embedded/embedded-data-inl.h",
"src/snapshot/embedded/embedded-data.h",
"src/snapshot/embedded/embedded-file-writer-interface.h",
"src/snapshot/object-deserializer.h",
@@ -3479,6 +3485,7 @@ v8_header_set("v8_internal_headers") {
"src/maglev/maglev-graph-labeller.h",
"src/maglev/maglev-graph-printer.h",
"src/maglev/maglev-graph-processor.h",
+ "src/maglev/maglev-graph-verifier.h",
"src/maglev/maglev-graph.h",
"src/maglev/maglev-interpreter-frame-state.h",
"src/maglev/maglev-ir.h",
@@ -3510,6 +3517,7 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/liftoff-assembler.h",
"src/wasm/baseline/liftoff-compiler.h",
"src/wasm/baseline/liftoff-register.h",
+ "src/wasm/canonical-types.h",
"src/wasm/code-space-access.h",
"src/wasm/compilation-environment.h",
"src/wasm/decoder.h",
@@ -3890,6 +3898,7 @@ v8_compiler_sources = [
"src/compiler/backend/register-allocator.cc",
"src/compiler/backend/spill-placer.cc",
"src/compiler/basic-block-instrumentor.cc",
+ "src/compiler/branch-condition-duplicator.cc",
"src/compiler/branch-elimination.cc",
"src/compiler/bytecode-analysis.cc",
"src/compiler/bytecode-graph-builder.cc",
@@ -4227,6 +4236,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/concurrent-marking.cc",
"src/heap/cppgc-js/cpp-heap.cc",
"src/heap/cppgc-js/cpp-snapshot.cc",
+ "src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/embedder-tracing.cc",
@@ -4445,6 +4455,7 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
+ "src/runtime/runtime-shadow-realm.cc",
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
@@ -4535,6 +4546,7 @@ v8_source_set("v8_base_without_compiler") {
"src/trap-handler/handler-shared.cc",
"src/wasm/baseline/liftoff-assembler.cc",
"src/wasm/baseline/liftoff-compiler.cc",
+ "src/wasm/canonical-types.cc",
"src/wasm/code-space-access.cc",
"src/wasm/function-body-decoder.cc",
"src/wasm/function-compiler.cc",
@@ -5185,7 +5197,6 @@ v8_component("v8_libbase") {
"src/base/template-utils.h",
"src/base/threaded-list.h",
"src/base/timezone-cache.h",
- "src/base/type-traits.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
"src/base/v8-fallthrough.h",
@@ -5603,7 +5614,6 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/compactor.h",
"src/heap/cppgc/concurrent-marker.cc",
"src/heap/cppgc/concurrent-marker.h",
- "src/heap/cppgc/default-platform.cc",
"src/heap/cppgc/explicit-management.cc",
"src/heap/cppgc/free-list.cc",
"src/heap/cppgc/free-list.h",
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 61577d45ab..54ac85a4bd 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -40,10 +40,10 @@ vars = {
'reclient_version': 're_client_version:0.40.0.40ff5a5',
# GN CIPD package version.
- 'gn_version': 'git_revision:bd99dbf98cbdefe18a4128189665c5761263bcfb',
+ 'gn_version': 'git_revision:ae110f8b525009255ba1f9ae96982176d3bfad3d',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:cb424e70e75136736a86359ef070aa96425fe7a3',
+ 'luci_go': 'git_revision:6da0608e4fa8a3c6d1fa4f855485c0038b05bf72',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -83,9 +83,9 @@ deps = {
'base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'd115b033c4e53666b535cbd1985ffe60badad082',
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '3d9590754d5d23e62d15472c5baf6777ca59df20',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b37c340767cf9e7777d4ca5a588c34c5744df9b2',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '113dd1badbcbffea108a8c95ac7c89c22bfd25f3',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'c2e4795660817c2776dbabd778b92ed58c074032',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'e435ad79c17b1888b34df88d6a30a094936e3836',
'buildtools/linux64': {
@@ -111,9 +111,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'a897d0f3f8e8c28ac2abf848f3b695b724409298',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'e025ba5dc85202540099d7cd8e72eae2d4ee9e33',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'd1c7f92b8b0bff8d9f710ca40e44563a63db376e',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'c39fea88739be63a2d5590a938ce19d762b915fc',
'buildtools/win': {
'packages': [
{
@@ -139,7 +139,7 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f7fb969cc4934bbc5aa29a378d59325eaa84f475',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd7c0a2076c2b0c1531aef7069d4abe70eec44ee3',
'third_party/aemu-linux-x64': {
'packages': [
{
@@ -161,11 +161,11 @@ deps = {
'dep_type': 'cipd',
},
'third_party/android_ndk': {
- 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '9644104c8cf85bf1bdce5b1c0691e9778572c3f8',
+ 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '87b4b48de3c8204224d63612c287eb5a447a562d',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '2760db43ffc8b074cb7960c90b5254f74a5c299a',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
@@ -207,7 +207,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'b3fe2c177912640bc676b332a2f41dc812ea5843',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '3a1ae18f882d024686144edbec3050aae055f146',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -215,18 +215,18 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'b199f549263a02900faef8c8c3d581c580e837c3',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '3b97fa826eee4bd1978c4c049038b1e4f201e8f2',
'third_party/fuchsia-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '7c9c220d13ab367d49420144a257886ebfbce278',
'condition': 'checkout_fuchsia',
},
'third_party/google_benchmark/src': {
- 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '5704cd4c8cea889d68f9ae29ca5aaee97ef91816',
+ 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'dc901ff9090e2b931433790cc44afc3af3b09ab2',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'ae5e06dd35c6137d335331b0815cf1f60fd7e3c5',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '8a5b728e4f43b0eabdb9ea450f956d67cfb22719',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1fd0dbea04448c3f73fe5cb7599f9472f0f107f1',
'third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e09c4b66b6e87116eb190651421f1a6e2f3b9c52',
'third_party/ittapi': {
@@ -272,19 +272,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'b0676a1f52484bf53a1a49d0e48ff8abc430fafe',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'a6d209ab932df0f1c9d5b7dc67cfa74e8a3272c0',
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b60d34c100e5a8f4b01d838527f000faab673da3',
- 'tools/clang/dsymutil': {
- 'packages': [
- {
- 'package': 'chromium/llvm-build-tools/dsymutil',
- 'version': 'M56jPzDv1620Rnm__jTMYS62Zi8rxHVq7yw0qeBFEgkC',
- }
- ],
- 'condition': 'checkout_mac',
- 'dep_type': 'cipd',
- },
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b5e2f7c16bbf3aefc9354e8fbad3de0a543f2193',
'tools/luci-go': {
'packages': [
{
@@ -355,45 +345,87 @@ hooks = [
'name': 'bazel',
'pattern': '.',
'condition': 'download_prebuilt_bazel',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--bucket', 'chromium-v8-prebuilt-bazel/linux',
'--no_resume',
'-s', 'tools/bazel/bazel.sha1',
'--platform=linux*',
],
},
+ # Pull dsymutil binaries using checked-in hashes.
+ {
+ 'name': 'dsymutil_mac_arm64',
+ 'pattern': '.',
+ 'condition': 'host_os == "mac" and host_cpu == "arm64"',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
+ '--no_resume',
+ '--no_auth',
+ '--bucket', 'chromium-browser-clang',
+ '-s', 'tools/clang/dsymutil/bin/dsymutil.arm64.sha1',
+ '-o', 'tools/clang/dsymutil/bin/dsymutil',
+ ],
+ },
+ {
+ 'name': 'dsymutil_mac_x64',
+ 'pattern': '.',
+ 'condition': 'host_os == "mac" and host_cpu == "x64"',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
+ '--no_resume',
+ '--no_auth',
+ '--bucket', 'chromium-browser-clang',
+ '-s', 'tools/clang/dsymutil/bin/dsymutil.x64.sha1',
+ '-o', 'tools/clang/dsymutil/bin/dsymutil',
+ ],
+ },
# Pull clang-format binaries using checked-in hashes.
{
'name': 'clang_format_win',
'pattern': '.',
'condition': 'host_os == "win"',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
- '--platform=win32',
'--no_auth',
'--bucket', 'chromium-clang-format',
'-s', 'buildtools/win/clang-format.exe.sha1',
],
},
{
- 'name': 'clang_format_mac',
+ 'name': 'clang_format_mac_x64',
'pattern': '.',
- 'condition': 'host_os == "mac"',
- 'action': [ 'download_from_google_storage',
+ 'condition': 'host_os == "mac" and host_cpu == "x64"',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
+ '--no_resume',
+ '--no_auth',
+ '--bucket', 'chromium-clang-format',
+ '-s', 'buildtools/mac/clang-format.x64.sha1',
+ '-o', 'buildtools/mac/clang-format',
+ ],
+ },
+ {
+ 'name': 'clang_format_mac_arm64',
+ 'pattern': '.',
+ 'condition': 'host_os == "mac" and host_cpu == "arm64"',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
- '--platform=darwin',
'--no_auth',
'--bucket', 'chromium-clang-format',
- '-s', 'buildtools/mac/clang-format.sha1',
+ '-s', 'buildtools/mac/clang-format.arm64.sha1',
+ '-o', 'buildtools/mac/clang-format',
],
},
{
'name': 'clang_format_linux',
'pattern': '.',
'condition': 'host_os == "linux"',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
- '--platform=linux*',
'--no_auth',
'--bucket', 'chromium-clang-format',
'-s', 'buildtools/linux64/clang-format.sha1',
@@ -403,7 +435,8 @@ hooks = [
'name': 'gcmole',
'pattern': '.',
'condition': 'download_gcmole',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--bucket', 'chrome-v8-gcmole',
'-u', '--no_resume',
'-s', 'tools/gcmole/gcmole-tools.tar.gz.sha1',
@@ -414,7 +447,8 @@ hooks = [
'name': 'jsfunfuzz',
'pattern': '.',
'condition': 'download_jsfunfuzz',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--bucket', 'chrome-v8-jsfunfuzz',
'-u', '--no_resume',
'-s', 'tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1',
@@ -424,7 +458,8 @@ hooks = [
{
'name': 'wasm_spec_tests',
'pattern': '.',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'-u',
@@ -435,7 +470,8 @@ hooks = [
{
'name': 'wasm_js',
'pattern': '.',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'-u',
@@ -475,7 +511,8 @@ hooks = [
'name': 'msan_chained_origins',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
@@ -486,7 +523,8 @@ hooks = [
'name': 'msan_no_origins',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
@@ -498,7 +536,8 @@ hooks = [
'name': 'ciopfs_linux',
'pattern': '.',
'condition': 'checkout_win and host_os == "linux"',
- 'action': [ 'download_from_google_storage',
+ 'action': [ 'python3',
+ 'third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-browser-clang/ciopfs',
diff --git a/deps/v8/bazel/config/BUILD.bazel b/deps/v8/bazel/config/BUILD.bazel
index ac79c42808..ffa9ef0407 100644
--- a/deps/v8/bazel/config/BUILD.bazel
+++ b/deps/v8/bazel/config/BUILD.bazel
@@ -178,6 +178,15 @@ selects.config_setting_group(
)
selects.config_setting_group(
+ name = "is_not_android",
+ match_any = [
+ ":is_windows",
+ ":is_linux",
+ ":is_macos",
+ ]
+)
+
+selects.config_setting_group(
name = "is_non_android_posix",
match_any = [
":is_linux",
diff --git a/deps/v8/gni/OWNERS b/deps/v8/gni/OWNERS
index e87e9c95a1..fa1262b503 100644
--- a/deps/v8/gni/OWNERS
+++ b/deps/v8/gni/OWNERS
@@ -2,4 +2,4 @@ file:../INFRA_OWNERS
per-file v8.cmx=victorgomes@chromium.org
per-file release_branch_toggle.gni=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
-per-file release_branch_toggle.gni=lutz@chromium.org \ No newline at end of file
+per-file release_branch_toggle.gni=vahl@chromium.org \ No newline at end of file
diff --git a/deps/v8/gni/release_branch_toggle.gni b/deps/v8/gni/release_branch_toggle.gni
index c502c8c62e..43e3b6ae56 100644
--- a/deps/v8/gni/release_branch_toggle.gni
+++ b/deps/v8/gni/release_branch_toggle.gni
@@ -4,4 +4,4 @@
declare_args() {
is_on_release_branch = true
-} \ No newline at end of file
+}
diff --git a/deps/v8/include/cppgc/default-platform.h b/deps/v8/include/cppgc/default-platform.h
index f9af756c39..a27871cc37 100644
--- a/deps/v8/include/cppgc/default-platform.h
+++ b/deps/v8/include/cppgc/default-platform.h
@@ -19,15 +19,6 @@ namespace cppgc {
*/
class V8_EXPORT DefaultPlatform : public Platform {
public:
- /**
- * Use this method instead of 'cppgc::InitializeProcess' when using
- * 'cppgc::DefaultPlatform'. 'cppgc::DefaultPlatform::InitializeProcess'
- * will initialize cppgc and v8 if needed (for non-standalone builds).
- *
- * \param platform DefaultPlatform instance used to initialize cppgc/v8.
- */
- static void InitializeProcess(DefaultPlatform* platform);
-
using IdleTaskSupport = v8::platform::IdleTaskSupport;
explicit DefaultPlatform(
int thread_pool_size = 0,
diff --git a/deps/v8/include/cppgc/internal/api-constants.h b/deps/v8/include/cppgc/internal/api-constants.h
index 791039f1ee..a50d4d046c 100644
--- a/deps/v8/include/cppgc/internal/api-constants.h
+++ b/deps/v8/include/cppgc/internal/api-constants.h
@@ -44,6 +44,9 @@ static constexpr size_t kDefaultAlignment = sizeof(void*);
// Maximum support alignment for a type as in `alignof(T)`.
static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment;
+// Granularity of heap allocations.
+constexpr size_t kAllocationGranularity = sizeof(void*);
+
} // namespace api_constants
} // namespace internal
diff --git a/deps/v8/include/cppgc/internal/caged-heap-local-data.h b/deps/v8/include/cppgc/internal/caged-heap-local-data.h
index 5b30d67029..a27649c17f 100644
--- a/deps/v8/include/cppgc/internal/caged-heap-local-data.h
+++ b/deps/v8/include/cppgc/internal/caged-heap-local-data.h
@@ -6,6 +6,8 @@
#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
#include <array>
+#include <cstddef>
+#include <cstdint>
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/logging.h"
@@ -19,32 +21,41 @@ class HeapBase;
#if defined(CPPGC_YOUNG_GENERATION)
-// AgeTable contains entries that correspond to 4KB memory regions. Each entry
-// can be in one of three states: kOld, kYoung or kUnknown.
+// AgeTable is the bytemap needed for the fast generation check in the write
+// barrier. AgeTable contains entries that correspond to 512 bytes memory
+// regions (cards). Each entry in the table represents generation of the objects
+// that reside on the corresponding card (young, old or mixed).
class AgeTable final {
- static constexpr size_t kGranularityBits = 12; // 4KiB per byte.
+ static constexpr size_t kRequiredSize = 1 * api_constants::kMB;
+ static constexpr size_t kAllocationGranularity =
+ api_constants::kAllocationGranularity;
public:
- enum class Age : uint8_t { kOld, kYoung, kUnknown };
+ enum class Age : uint8_t { kOld, kYoung, kMixed };
- static constexpr size_t kEntrySizeInBytes = 1 << kGranularityBits;
+ static constexpr size_t kCardSizeInBytes =
+ (api_constants::kCagedHeapReservationSize / kAllocationGranularity) /
+ kRequiredSize;
- Age& operator[](uintptr_t offset) { return table_[entry(offset)]; }
- Age operator[](uintptr_t offset) const { return table_[entry(offset)]; }
+ void SetAge(uintptr_t cage_offset, Age age) {
+ table_[card(cage_offset)] = age;
+ }
+ V8_INLINE Age GetAge(uintptr_t cage_offset) const {
+ return table_[card(cage_offset)];
+ }
void Reset(PageAllocator* allocator);
private:
- static constexpr size_t kAgeTableSize =
- api_constants::kCagedHeapReservationSize >> kGranularityBits;
-
- size_t entry(uintptr_t offset) const {
+ V8_INLINE size_t card(uintptr_t offset) const {
+ constexpr size_t kGranularityBits =
+ __builtin_ctz(static_cast<uint32_t>(kCardSizeInBytes));
const size_t entry = offset >> kGranularityBits;
CPPGC_DCHECK(table_.size() > entry);
return entry;
}
- std::array<Age, kAgeTableSize> table_;
+ std::array<Age, kRequiredSize> table_;
};
static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
diff --git a/deps/v8/include/cppgc/internal/finalizer-trait.h b/deps/v8/include/cppgc/internal/finalizer-trait.h
index 7bd6f83bf6..ab49af870e 100644
--- a/deps/v8/include/cppgc/internal/finalizer-trait.h
+++ b/deps/v8/include/cppgc/internal/finalizer-trait.h
@@ -19,7 +19,8 @@ struct HasFinalizeGarbageCollectedObject : std::false_type {};
template <typename T>
struct HasFinalizeGarbageCollectedObject<
- T, void_t<decltype(std::declval<T>().FinalizeGarbageCollectedObject())>>
+ T,
+ std::void_t<decltype(std::declval<T>().FinalizeGarbageCollectedObject())>>
: std::true_type {};
// The FinalizerTraitImpl specifies how to finalize objects.
diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h
index cdb7ec6f9e..bfabc31e13 100644
--- a/deps/v8/include/cppgc/internal/write-barrier.h
+++ b/deps/v8/include/cppgc/internal/write-barrier.h
@@ -405,7 +405,8 @@ void WriteBarrier::GenerationalBarrier(const Params& params, const void* slot) {
const AgeTable& age_table = local_data.age_table;
// Bail out if the slot is in young generation.
- if (V8_LIKELY(age_table[params.slot_offset] == AgeTable::Age::kYoung)) return;
+ if (V8_LIKELY(age_table.GetAge(params.slot_offset) == AgeTable::Age::kYoung))
+ return;
GenerationalBarrierSlow(local_data, age_table, slot, params.value_offset);
}
@@ -420,7 +421,8 @@ void WriteBarrier::GenerationalBarrierForSourceObject(
// Assume that if the first element is in young generation, the whole range is
// in young generation.
- if (V8_LIKELY(age_table[params.slot_offset] == AgeTable::Age::kYoung)) return;
+ if (V8_LIKELY(age_table.GetAge(params.slot_offset) == AgeTable::Age::kYoung))
+ return;
GenerationalBarrierForSourceObjectSlow(local_data, inner_pointer);
}
diff --git a/deps/v8/include/cppgc/type-traits.h b/deps/v8/include/cppgc/type-traits.h
index 56cd55d61e..970ffd4841 100644
--- a/deps/v8/include/cppgc/type-traits.h
+++ b/deps/v8/include/cppgc/type-traits.h
@@ -24,14 +24,6 @@ class StrongMemberTag;
class UntracedMemberTag;
class WeakMemberTag;
-// Pre-C++17 custom implementation of std::void_t.
-template <typename... Ts>
-struct make_void {
- typedef void type;
-};
-template <typename... Ts>
-using void_t = typename make_void<Ts...>::type;
-
// Not supposed to be specialized by the user.
template <typename T>
struct IsWeak : std::false_type {};
@@ -42,7 +34,7 @@ template <typename T, typename = void>
struct IsTraceMethodConst : std::false_type {};
template <typename T>
-struct IsTraceMethodConst<T, void_t<decltype(std::declval<const T>().Trace(
+struct IsTraceMethodConst<T, std::void_t<decltype(std::declval<const T>().Trace(
std::declval<Visitor*>()))>> : std::true_type {
};
@@ -53,7 +45,7 @@ struct IsTraceable : std::false_type {
template <typename T>
struct IsTraceable<
- T, void_t<decltype(std::declval<T>().Trace(std::declval<Visitor*>()))>>
+ T, std::void_t<decltype(std::declval<T>().Trace(std::declval<Visitor*>()))>>
: std::true_type {
// All Trace methods should be marked as const. If an object of type
// 'T' is traceable then any object of type 'const T' should also
@@ -72,8 +64,8 @@ struct HasGarbageCollectedMixinTypeMarker : std::false_type {
template <typename T>
struct HasGarbageCollectedMixinTypeMarker<
- T,
- void_t<typename std::remove_const_t<T>::IsGarbageCollectedMixinTypeMarker>>
+ T, std::void_t<
+ typename std::remove_const_t<T>::IsGarbageCollectedMixinTypeMarker>>
: std::true_type {
static_assert(sizeof(T), "T must be fully defined");
};
@@ -85,7 +77,8 @@ struct HasGarbageCollectedTypeMarker : std::false_type {
template <typename T>
struct HasGarbageCollectedTypeMarker<
- T, void_t<typename std::remove_const_t<T>::IsGarbageCollectedTypeMarker>>
+ T,
+ std::void_t<typename std::remove_const_t<T>::IsGarbageCollectedTypeMarker>>
: std::true_type {
static_assert(sizeof(T), "T must be fully defined");
};
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index 09c420e3a6..53a5f4c11d 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -952,6 +952,37 @@ domain Runtime
# Unique script identifier.
type ScriptId extends string
+ # Represents the value serialiazed by the WebDriver BiDi specification
+ # https://w3c.github.io/webdriver-bidi.
+ type WebDriverValue extends object
+ properties
+ enum type
+ undefined
+ null
+ string
+ number
+ boolean
+ bigint
+ regexp
+ date
+ symbol
+ array
+ object
+ function
+ map
+ set
+ weakmap
+ weakset
+ error
+ proxy
+ promise
+ typedarray
+ arraybuffer
+ node
+ window
+ optional any value
+ optional string objectId
+
# Unique object identifier.
type RemoteObjectId extends string
@@ -1004,6 +1035,8 @@ domain Runtime
optional UnserializableValue unserializableValue
# String representation of the object.
optional string description
+ # WebDriver BiDi representation of the value.
+ experimental optional WebDriverValue webDriverValue
# Unique object identifier (for non-primitive values).
optional RemoteObjectId objectId
# Preview containing abbreviated property values. Specified for `object` type values only.
@@ -1309,6 +1342,8 @@ domain Runtime
optional string objectGroup
# Whether to throw an exception if side effect cannot be ruled out during evaluation.
experimental optional boolean throwOnSideEffect
+ # Whether the result should be serialized according to https://w3c.github.io/webdriver-bidi.
+ experimental optional boolean generateWebDriverValue
returns
# Call result.
RemoteObject result
@@ -1394,6 +1429,8 @@ domain Runtime
# boundaries).
# This is mutually exclusive with `contextId`.
experimental optional string uniqueContextId
+ # Whether the result should be serialized according to https://w3c.github.io/webdriver-bidi.
+ experimental optional boolean generateWebDriverValue
returns
# Evaluation result.
RemoteObject result
diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h
index 7761d87fd0..412154930f 100644
--- a/deps/v8/include/v8-cppgc.h
+++ b/deps/v8/include/v8-cppgc.h
@@ -77,9 +77,6 @@ struct WrapperDescriptor final {
};
struct V8_EXPORT CppHeapCreateParams {
- CppHeapCreateParams(const CppHeapCreateParams&) = delete;
- CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete;
-
std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces;
WrapperDescriptor wrapper_descriptor;
};
@@ -164,6 +161,7 @@ class V8_EXPORT CppHeap {
class JSVisitor : public cppgc::Visitor {
public:
explicit JSVisitor(cppgc::Visitor::Key key) : cppgc::Visitor(key) {}
+ ~JSVisitor() override = default;
void Trace(const TracedReferenceBase& ref) {
if (ref.IsEmptyThreadSafe()) return;
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index edd968c766..ce5430bd03 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -205,6 +205,15 @@ class V8_EXPORT V8InspectorSession {
virtual void triggerPreciseCoverageDeltaUpdate(StringView occasion) = 0;
};
+class V8_EXPORT WebDriverValue {
+ public:
+ explicit WebDriverValue(StringView type, v8::MaybeLocal<v8::Value> value = {})
+ : type(type), value(value) {}
+
+ StringView type;
+ v8::MaybeLocal<v8::Value> value;
+};
+
class V8_EXPORT V8InspectorClient {
public:
virtual ~V8InspectorClient() = default;
@@ -219,6 +228,10 @@ class V8_EXPORT V8InspectorClient {
virtual void beginUserGesture() {}
virtual void endUserGesture() {}
+ virtual std::unique_ptr<WebDriverValue> serializeToWebDriverValue(
+ v8::Local<v8::Value> v8_value, int max_depth) {
+ return nullptr;
+ }
virtual std::unique_ptr<StringBuffer> valueSubtype(v8::Local<v8::Value>) {
return nullptr;
}
@@ -270,6 +283,9 @@ class V8_EXPORT V8InspectorClient {
// The caller would defer to generating a random 64 bit integer if
// this method returns 0.
virtual int64_t generateUniqueId() { return 0; }
+
+ virtual void dispatchError(v8::Local<v8::Context>, v8::Local<v8::Message>,
+ v8::Local<v8::Value>) {}
};
// These stack trace ids are intended to be passed between debuggers and be
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index e6e9cc5f9f..37c5b336da 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -365,8 +365,8 @@ class Internals {
static const uint32_t kNumIsolateDataSlots = 4;
static const int kStackGuardSize = 7 * kApiSystemPointerSize;
- static const int kBuiltinTier0EntryTableSize = 10 * kApiSystemPointerSize;
- static const int kBuiltinTier0TableSize = 10 * kApiSystemPointerSize;
+ static const int kBuiltinTier0EntryTableSize = 9 * kApiSystemPointerSize;
+ static const int kBuiltinTier0TableSize = 9 * kApiSystemPointerSize;
// IsolateData layout guarantees.
static const int kIsolateCageBaseOffset = 0;
diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h
index 01bc538e22..d8e8bd865b 100644
--- a/deps/v8/include/v8-metrics.h
+++ b/deps/v8/include/v8-metrics.h
@@ -61,26 +61,38 @@ struct GarbageCollectionFullMainThreadIncrementalMark {
int64_t cpp_wall_clock_duration_in_us = -1;
};
-struct GarbageCollectionFullMainThreadBatchedIncrementalMark {
- std::vector<GarbageCollectionFullMainThreadIncrementalMark> events;
-};
-
struct GarbageCollectionFullMainThreadIncrementalSweep {
int64_t wall_clock_duration_in_us = -1;
int64_t cpp_wall_clock_duration_in_us = -1;
};
-struct GarbageCollectionFullMainThreadBatchedIncrementalSweep {
- std::vector<GarbageCollectionFullMainThreadIncrementalSweep> events;
+template <typename EventType>
+struct GarbageCollectionBatchedEvents {
+ std::vector<EventType> events;
};
+using GarbageCollectionFullMainThreadBatchedIncrementalMark =
+ GarbageCollectionBatchedEvents<
+ GarbageCollectionFullMainThreadIncrementalMark>;
+using GarbageCollectionFullMainThreadBatchedIncrementalSweep =
+ GarbageCollectionBatchedEvents<
+ GarbageCollectionFullMainThreadIncrementalSweep>;
+
struct GarbageCollectionYoungCycle {
int reason = -1;
int64_t total_wall_clock_duration_in_us = -1;
int64_t main_thread_wall_clock_duration_in_us = -1;
- double collection_rate_in_percent;
- double efficiency_in_bytes_per_us;
- double main_thread_efficiency_in_bytes_per_us;
+ double collection_rate_in_percent = -1.0;
+ double efficiency_in_bytes_per_us = -1.0;
+ double main_thread_efficiency_in_bytes_per_us = -1.0;
+#if defined(CPPGC_YOUNG_GENERATION)
+ GarbageCollectionPhases total_cpp;
+ GarbageCollectionSizes objects_cpp;
+ GarbageCollectionSizes memory_cpp;
+ double collection_rate_cpp_in_percent = -1.0;
+ double efficiency_cpp_in_bytes_per_us = -1.0;
+ double main_thread_efficiency_cpp_in_bytes_per_us = -1.0;
+#endif // defined(CPPGC_YOUNG_GENERATION)
};
struct WasmModuleDecoded {
diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h
index 5644a3bb70..88252ac189 100644
--- a/deps/v8/include/v8-script.h
+++ b/deps/v8/include/v8-script.h
@@ -650,6 +650,7 @@ class V8_EXPORT ScriptCompiler {
* It is possible to specify multiple context extensions (obj in the above
* example).
*/
+ V8_DEPRECATED("Use CompileFunction")
static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
Local<Context> context, Source* source, size_t arguments_count,
Local<String> arguments[], size_t context_extension_count,
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 4312c9ff8b..709fdc5790 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 10
-#define V8_MINOR_VERSION 1
-#define V8_BUILD_NUMBER 124
-#define V8_PATCH_LEVEL 8
+#define V8_MINOR_VERSION 2
+#define V8_BUILD_NUMBER 154
+#define V8_PATCH_LEVEL 2
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index e20cd6e7bd..769582188a 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -116,7 +116,6 @@
'V8 Linux64 - pointer compression - builder': 'release_x64_pointer_compression',
'V8 Linux64 - pointer compression without dchecks':
'release_x64_pointer_compression_without_dchecks',
- 'V8 Linux64 - python3 - builder': 'release_x64',
'V8 Linux64 - arm64 - sim - pointer compression - builder':
'release_simulate_arm64_pointer_compression',
'V8 Linux64 gcc - debug builder': 'debug_x64_gcc',
@@ -238,7 +237,6 @@
'v8_linux64_nodcheck_rel_ng': 'release_x64',
'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto',
'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
- 'v8_linux64_python3_rel_ng': 'release_x64',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
'v8_linux64_single_generation_dbg_ng': 'debug_x64_single_generation',
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 519adbdb07..11db9b11f7 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -280,11 +280,11 @@
'os': 'Ubuntu-18.04',
},
'tests': [
- {'name': 'mjsunit_sp_frame_access'},
- {'name': 'mozilla'},
+ {'name': 'mjsunit_sp_frame_access', 'shards': 3},
+ {'name': 'mozilla', 'shards': 3},
{'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 7},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 7},
+ {'name': 'v8testing', 'shards': 10},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 10},
],
},
'v8_linux_arm_lite_rel_ng_triggered': {
@@ -347,15 +347,15 @@
'os': 'Ubuntu-18.04',
},
'tests': [
- {'name': 'benchmarks'},
+ {'name': 'benchmarks', 'shards': 2},
{'name': 'benchmarks', 'variant': 'extra'},
- {'name': 'mjsunit_sp_frame_access'},
+ {'name': 'mjsunit_sp_frame_access', 'shards': 2},
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'test262', 'variant': 'extra', 'shards': 5},
- {'name': 'v8testing', 'shards': 3},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
+ {'name': 'test262', 'variant': 'extra', 'shards': 9},
+ {'name': 'v8testing', 'shards': 5},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 5},
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
@@ -491,15 +491,6 @@
{'name': 'v8testing', 'shards': 3},
],
},
- 'v8_linux64_python3_rel_ng_triggered': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 2},
- {'name': 'gcmole'},
- ],
- },
'v8_linux64_single_generation_dbg_ng_triggered': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -592,7 +583,7 @@
'os': 'Ubuntu-18.04',
},
'tests': [
- {'name': 'mjsunit_sp_frame_access'},
+ {'name': 'mjsunit_sp_frame_access', 'shards': 2},
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
@@ -1369,15 +1360,6 @@
{'name': 'v8testing', 'shards': 2},
],
},
- 'V8 Linux64 - python3': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 2},
- {'name': 'gcmole'},
- ],
- },
'V8 Linux64 - shared': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1813,8 +1795,8 @@
'os': 'Ubuntu-18.04',
},
'tests': [
- {'name': 'mjsunit_sp_frame_access', 'shards': 6},
- {'name': 'mozilla', 'shards': 6},
+ {'name': 'mjsunit_sp_frame_access', 'shards': 3},
+ {'name': 'mozilla', 'shards': 3},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
diff --git a/deps/v8/samples/cppgc/hello-world.cc b/deps/v8/samples/cppgc/hello-world.cc
index fe0d002ab4..65b7aa9db2 100644
--- a/deps/v8/samples/cppgc/hello-world.cc
+++ b/deps/v8/samples/cppgc/hello-world.cc
@@ -57,7 +57,7 @@ int main(int argc, char* argv[]) {
#endif // !CPPGC_IS_STANDALONE
// Initialize the process. This must happen before any cppgc::Heap::Create()
// calls.
- cppgc::DefaultPlatform::InitializeProcess(cppgc_platform.get());
+ cppgc::InitializeProcess(cppgc_platform->GetPageAllocator());
{
// Create a managed heap.
std::unique_ptr<cppgc::Heap> heap = cppgc::Heap::Create(cppgc_platform);
diff --git a/deps/v8/src/api/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h
index 786f849be6..5d437370bc 100644
--- a/deps/v8/src/api/api-arguments-inl.h
+++ b/deps/v8/src/api/api-arguments-inl.h
@@ -87,24 +87,22 @@ inline JSReceiver FunctionCallbackArguments::holder() {
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(values_);
-#define CREATE_NAMED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \
- INFO_FOR_SIDE_EFFECT) \
- Handle<RETURN_TYPE> PropertyCallbackArguments::CallNamed##FUNCTION( \
- Handle<InterceptorInfo> interceptor, Handle<Name> name) { \
- DCHECK_NAME_COMPATIBLE(interceptor, name); \
- Isolate* isolate = this->isolate(); \
- RCS_SCOPE(isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \
- Handle<Object> receiver_check_unsupported; \
- GenericNamedProperty##FUNCTION##Callback f = \
- ToCData<GenericNamedProperty##FUNCTION##Callback>( \
- interceptor->TYPE()); \
- PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
- INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \
- NotAccessor); \
- LOG(isolate, \
- ApiNamedPropertyAccess("interceptor-named-" #TYPE, holder(), *name)); \
- f(v8::Utils::ToLocal(name), callback_info); \
- return GetReturnValue<RETURN_TYPE>(isolate); \
+#define CREATE_NAMED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \
+ INFO_FOR_SIDE_EFFECT) \
+ Handle<RETURN_TYPE> PropertyCallbackArguments::CallNamed##FUNCTION( \
+ Handle<InterceptorInfo> interceptor, Handle<Name> name) { \
+ DCHECK_NAME_COMPATIBLE(interceptor, name); \
+ Isolate* isolate = this->isolate(); \
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \
+ Handle<Object> receiver_check_unsupported; \
+ GenericNamedProperty##FUNCTION##Callback f = \
+ ToCData<GenericNamedProperty##FUNCTION##Callback>( \
+ interceptor->TYPE()); \
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
+ INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \
+ NotAccessor); \
+ f(v8::Utils::ToLocal(name), callback_info); \
+ return GetReturnValue<RETURN_TYPE>(isolate); \
}
FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
@@ -123,8 +121,6 @@ FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \
NotAccessor); \
- LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #TYPE, \
- holder(), index)); \
f(index, callback_info); \
return GetReturnValue<RETURN_TYPE>(isolate); \
}
@@ -136,7 +132,6 @@ FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo handler) {
Isolate* isolate = this->isolate();
- LOG(isolate, ApiObjectAccess("call", holder()));
RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback);
v8::FunctionCallback f =
v8::ToCData<v8::FunctionCallback>(handler.callback());
@@ -156,7 +151,6 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo handler) {
Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
Handle<InterceptorInfo> interceptor) {
DCHECK(interceptor->is_named());
- LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder()));
RCS_SCOPE(isolate(), RuntimeCallCounterId::kNamedEnumeratorCallback);
return CallPropertyEnumerator(interceptor);
}
@@ -164,7 +158,6 @@ Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
Handle<InterceptorInfo> interceptor) {
DCHECK(!interceptor->is_named());
- LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder()));
RCS_SCOPE(isolate(), RuntimeCallCounterId::kIndexedEnumeratorCallback);
return CallPropertyEnumerator(interceptor);
}
@@ -172,10 +165,7 @@ Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
Handle<Object> PropertyCallbackArguments::CallNamedGetter(
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
DCHECK_NAME_COMPATIBLE(interceptor, name);
- Isolate* isolate = this->isolate();
- RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback);
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-getter", holder(), *name));
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kNamedGetterCallback);
GenericNamedPropertyGetterCallback f =
ToCData<GenericNamedPropertyGetterCallback>(interceptor->getter());
return BasicCallNamedGetterCallback(f, name, interceptor);
@@ -184,10 +174,7 @@ Handle<Object> PropertyCallbackArguments::CallNamedGetter(
Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
DCHECK_NAME_COMPATIBLE(interceptor, name);
- Isolate* isolate = this->isolate();
- RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDescriptorCallback);
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-descriptor", holder(), *name));
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kNamedDescriptorCallback);
GenericNamedPropertyDescriptorCallback f =
ToCData<GenericNamedPropertyDescriptorCallback>(
interceptor->descriptor());
@@ -215,8 +202,6 @@ Handle<Object> PropertyCallbackArguments::CallNamedSetter(
RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedSetterCallback);
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -231,8 +216,6 @@ Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
f(v8::Utils::ToLocal(name), desc, callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -246,8 +229,6 @@ Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
ToCData<IndexedPropertySetterCallback>(interceptor->setter());
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
f(index, v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -262,8 +243,6 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
f(index, desc, callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -271,10 +250,7 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
Handle<Object> PropertyCallbackArguments::CallIndexedGetter(
Handle<InterceptorInfo> interceptor, uint32_t index) {
DCHECK(!interceptor->is_named());
- Isolate* isolate = this->isolate();
- RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback);
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-getter", holder(), index));
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kNamedGetterCallback);
IndexedPropertyGetterCallback f =
ToCData<IndexedPropertyGetterCallback>(interceptor->getter());
return BasicCallIndexedGetterCallback(f, index, interceptor);
@@ -283,10 +259,7 @@ Handle<Object> PropertyCallbackArguments::CallIndexedGetter(
Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
Handle<InterceptorInfo> interceptor, uint32_t index) {
DCHECK(!interceptor->is_named());
- Isolate* isolate = this->isolate();
- RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDescriptorCallback);
- LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-descriptor",
- holder(), index));
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kIndexedDescriptorCallback);
IndexedPropertyDescriptorCallback f =
ToCData<IndexedPropertyDescriptorCallback>(interceptor->descriptor());
return BasicCallIndexedGetterCallback(f, index, interceptor);
@@ -323,7 +296,6 @@ Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
Handle<AccessorInfo> info, Handle<Name> name) {
Isolate* isolate = this->isolate();
RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorGetterCallback);
- LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
AccessorNameGetterCallback f =
ToCData<AccessorNameGetterCallback>(info->getter());
return BasicCallNamedGetterCallback(f, name, info,
@@ -339,7 +311,6 @@ Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
ToCData<AccessorNameSetterCallback>(accessor_info->setter());
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, void, accessor_info,
handle(receiver(), isolate), Setter);
- LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
diff --git a/deps/v8/src/api/api-macros.h b/deps/v8/src/api/api-macros.h
index 07b2e2d0f2..9fbe9a9739 100644
--- a/deps/v8/src/api/api-macros.h
+++ b/deps/v8/src/api/api-macros.h
@@ -35,10 +35,9 @@
* TODO(verwaest): Remove calls form API methods to DO_NOT_USE macros.
*/
-#define LOG_API(isolate, class_name, function_name) \
- RCS_SCOPE(isolate, \
- i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \
- LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
+#define API_RCS_SCOPE(isolate, class_name, function_name) \
+ RCS_SCOPE(isolate, \
+ i::RuntimeCallCounterId::kAPI_##class_name##_##function_name);
#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
@@ -50,7 +49,7 @@
} \
HandleScopeClass handle_scope(isolate); \
CallDepthScope<do_callback> call_depth_scope(isolate, context); \
- LOG_API(isolate, class_name, function_name); \
+ API_RCS_SCOPE(isolate, class_name, function_name); \
i::VMState<v8::OTHER> __state__((isolate)); \
bool has_pending_exception = false
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index 75109e35b7..29b94d8dea 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -386,7 +386,7 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info,
if (fun.shared().function_data(kAcquireLoad) != info.constructor())
return false;
if (info.immutable_proto()) return false;
- return fun.context().native_context() == isolate->raw_native_context();
+ return fun.native_context() == isolate->raw_native_context();
}
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 29d4bea237..a0ab21d71a 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -50,7 +50,6 @@
#endif // V8_ENABLE_WEBASSEMBLY
#include "src/debug/liveedit.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/diagnostics/gdb-jit.h"
#include "src/execution/embedder-state.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
@@ -157,9 +156,6 @@
#include "src/base/platform/wrappers.h"
#include "src/diagnostics/unwinding-info-win64.h"
#endif // V8_OS_WIN64
-#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
-#include "src/diagnostics/system-jit-win.h"
-#endif
#endif // V8_OS_WIN
// Has to be the last include (doesn't have include guards):
@@ -814,7 +810,7 @@ namespace internal {
i::Address* GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
internal::Address* slot,
GlobalHandleStoreMode store_mode) {
- LOG_API(isolate, TracedGlobal, New);
+ API_RCS_SCOPE(isolate, TracedGlobal, New);
#ifdef DEBUG
Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
"the address slot must be not null");
@@ -847,7 +843,7 @@ void DisposeTracedReference(internal::Address* location) {
namespace api_internal {
i::Address* GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
- LOG_API(isolate, Persistent, New);
+ API_RCS_SCOPE(isolate, Persistent, New);
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
@@ -1338,7 +1334,7 @@ Local<FunctionTemplate> FunctionTemplate::New(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
- LOG_API(i_isolate, FunctionTemplate, New);
+ API_RCS_SCOPE(i_isolate, FunctionTemplate, New);
if (!Utils::ApiCheck(
!c_function || behavior == ConstructorBehavior::kThrow,
@@ -1363,7 +1359,7 @@ Local<FunctionTemplate> FunctionTemplate::NewWithCFunctionOverloads(
SideEffectType side_effect_type,
const MemorySpan<const CFunction>& c_function_overloads) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, FunctionTemplate, New);
+ API_RCS_SCOPE(i_isolate, FunctionTemplate, New);
if (!Utils::ApiCheck(
c_function_overloads.size() == 0 ||
@@ -1384,7 +1380,7 @@ Local<FunctionTemplate> FunctionTemplate::NewWithCache(
Local<Value> data, Local<Signature> signature, int length,
SideEffectType side_effect_type) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, FunctionTemplate, NewWithCache);
+ API_RCS_SCOPE(i_isolate, FunctionTemplate, NewWithCache);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return FunctionTemplateNew(i_isolate, callback, data, signature, length,
ConstructorBehavior::kAllow, false, cache_property,
@@ -1567,7 +1563,7 @@ Local<ObjectTemplate> ObjectTemplate::New(
static Local<ObjectTemplate> ObjectTemplateNew(
i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
bool do_not_cache) {
- LOG_API(isolate, ObjectTemplate, New);
+ API_RCS_SCOPE(isolate, ObjectTemplate, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Struct> struct_obj = isolate->factory()->NewStruct(
i::OBJECT_TEMPLATE_INFO_TYPE, i::AllocationType::kOld);
@@ -2031,8 +2027,7 @@ Local<Script> UnboundScript::BindToCurrentContext() {
int UnboundScript::GetId() const {
auto function_info = i::SharedFunctionInfo::cast(*Utils::OpenHandle(this));
- i::Isolate* isolate = function_info.GetIsolate();
- LOG_API(isolate, UnboundScript, GetId);
+ API_RCS_SCOPE(function_info.GetIsolate(), UnboundScript, GetId);
return i::Script::cast(function_info.script()).id();
}
@@ -2041,7 +2036,7 @@ int UnboundScript::GetLineNumber(int code_pos) {
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- LOG_API(isolate, UnboundScript, GetLineNumber);
+ API_RCS_SCOPE(isolate, UnboundScript, GetLineNumber);
if (obj->script().IsScript()) {
i::Handle<i::Script> script(i::Script::cast(obj->script()), isolate);
return i::Script::GetLineNumber(script, code_pos);
@@ -2055,7 +2050,7 @@ Local<Value> UnboundScript::GetScriptName() {
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- LOG_API(isolate, UnboundScript, GetName);
+ API_RCS_SCOPE(isolate, UnboundScript, GetName);
if (obj->script().IsScript()) {
i::Object name = i::Script::cast(obj->script()).name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
@@ -2069,7 +2064,7 @@ Local<Value> UnboundScript::GetSourceURL() {
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- LOG_API(isolate, UnboundScript, GetSourceURL);
+ API_RCS_SCOPE(isolate, UnboundScript, GetSourceURL);
if (obj->script().IsScript()) {
i::Object url = i::Script::cast(obj->script()).source_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
@@ -2082,7 +2077,7 @@ Local<Value> UnboundScript::GetSourceMappingURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, UnboundScript, GetSourceMappingURL);
+ API_RCS_SCOPE(isolate, UnboundScript, GetSourceMappingURL);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (obj->script().IsScript()) {
i::Object url = i::Script::cast(obj->script()).source_mapping_url();
@@ -3501,7 +3496,7 @@ MaybeLocal<Value> ValueDeserializer::ReadValue(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, ValueDeserializer, ReadValue, Value);
i::MaybeHandle<i::Object> result;
if (GetWireFormatVersion() > 0) {
- result = private_->deserializer.ReadObject();
+ result = private_->deserializer.ReadObjectWrapper();
} else {
result =
private_->deserializer.ReadObjectUsingEntireBufferForLegacyFormat();
@@ -4011,7 +4006,7 @@ std::unique_ptr<v8::BackingStore> v8::BackingStore::Reallocate(
v8::Isolate* isolate, std::unique_ptr<v8::BackingStore> backing_store,
size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, ArrayBuffer, BackingStore_Reallocate);
+ API_RCS_SCOPE(i_isolate, ArrayBuffer, BackingStore_Reallocate);
Utils::ApiCheck(byte_length <= i::JSArrayBuffer::kMaxByteLength,
"v8::BackingStore::Reallocate", "byte_lenght is too large");
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -4241,7 +4236,7 @@ bool Value::SameValue(Local<Value> that) const {
Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- LOG_API(isolate, Value, TypeOf);
+ API_RCS_SCOPE(isolate, Value, TypeOf);
return Utils::ToLocal(i::Object::TypeOf(isolate, Utils::OpenHandle(this)));
}
@@ -5171,7 +5166,7 @@ MaybeLocal<Function> Function::New(Local<Context> context,
int length, ConstructorBehavior behavior,
SideEffectType side_effect_type) {
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
- LOG_API(isolate, Function, New);
+ API_RCS_SCOPE(isolate, Function, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto templ =
FunctionTemplateNew(isolate, callback, data, Local<Signature>(), length,
@@ -5677,7 +5672,7 @@ int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
int* nchars_ref, int options) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, String, WriteUtf8);
+ API_RCS_SCOPE(isolate, String, WriteUtf8);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
str = i::String::Flatten(isolate, str);
i::DisallowGarbageCollection no_gc;
@@ -5695,7 +5690,7 @@ template <typename CharType>
static inline int WriteHelper(i::Isolate* isolate, const String* string,
CharType* buffer, int start, int length,
int options) {
- LOG_API(isolate, String, Write);
+ API_RCS_SCOPE(isolate, String, Write);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
@@ -6376,7 +6371,7 @@ Local<Context> NewContext(
CHECK(isolate->builtins()->code(i::Builtin::kIllegal).IsCodeT());
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
- LOG_API(isolate, Context, New);
+ API_RCS_SCOPE(isolate, Context, New);
i::HandleScope scope(isolate);
ExtensionConfiguration no_extensions;
if (extensions == nullptr) extensions = &no_extensions;
@@ -6421,7 +6416,7 @@ MaybeLocal<Object> v8::Context::NewRemoteContext(
v8::Isolate* external_isolate, v8::Local<ObjectTemplate> global_template,
v8::MaybeLocal<v8::Value> global_object) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- LOG_API(isolate, Context, NewRemoteContext);
+ API_RCS_SCOPE(isolate, Context, NewRemoteContext);
i::HandleScope scope(isolate);
i::Handle<i::FunctionTemplateInfo> global_constructor =
EnsureConstructor(isolate, *global_template);
@@ -6691,7 +6686,7 @@ MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
MaybeLocal<v8::Object> FunctionTemplate::NewRemoteInstance() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- LOG_API(isolate, FunctionTemplate, NewRemoteInstance);
+ API_RCS_SCOPE(isolate, FunctionTemplate, NewRemoteInstance);
i::HandleScope scope(isolate);
i::Handle<i::FunctionTemplateInfo> constructor =
EnsureConstructor(isolate, *InstanceTemplate());
@@ -6752,7 +6747,7 @@ Local<External> v8::External::New(Isolate* isolate, void* value) {
// constructors.
DCHECK_NOT_NULL(value);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, External, New);
+ API_RCS_SCOPE(i_isolate, External, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
return Utils::ExternalToLocal(external);
@@ -6828,7 +6823,7 @@ STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
} else { \
i::Isolate* i_isolate = reinterpret_cast<internal::Isolate*>(isolate); \
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); \
- LOG_API(i_isolate, class_name, function_name); \
+ API_RCS_SCOPE(i_isolate, class_name, function_name); \
if (length < 0) length = StringLength(data); \
i::Handle<i::String> handle_result = \
NewString(i_isolate->factory(), type, \
@@ -6842,7 +6837,7 @@ Local<String> String::NewFromUtf8Literal(Isolate* isolate, const char* literal,
DCHECK_LE(length, i::String::kMaxLength);
i::Isolate* i_isolate = reinterpret_cast<internal::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- LOG_API(i_isolate, String, NewFromUtf8Literal);
+ API_RCS_SCOPE(i_isolate, String, NewFromUtf8Literal);
i::Handle<i::String> handle_result =
NewString(i_isolate->factory(), type,
base::Vector<const char>(literal, length))
@@ -6874,7 +6869,7 @@ Local<String> v8::String::Concat(Isolate* v8_isolate, Local<String> left,
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- LOG_API(isolate, String, Concat);
+ API_RCS_SCOPE(isolate, String, Concat);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
// If we are steering towards a range error, do not wait for the error to be
// thrown, and return the null handle instead.
@@ -6896,7 +6891,7 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- LOG_API(i_isolate, String, NewExternalTwoByte);
+ API_RCS_SCOPE(i_isolate, String, NewExternalTwoByte);
if (resource->length() > 0) {
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromTwoByte(resource)
@@ -6918,7 +6913,7 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- LOG_API(i_isolate, String, NewExternalOneByte);
+ API_RCS_SCOPE(i_isolate, String, NewExternalOneByte);
if (resource->length() == 0) {
// The resource isn't going to be used, free it immediately.
resource->Dispose();
@@ -7011,7 +7006,7 @@ Isolate* v8::Object::GetIsolate() {
Local<v8::Object> v8::Object::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Object, New);
+ API_RCS_SCOPE(i_isolate, Object, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSObject> obj =
i_isolate->factory()->NewJSObject(i_isolate->object_function());
@@ -7085,7 +7080,7 @@ Local<v8::Object> v8::Object::New(Isolate* isolate,
"v8::Object::New", "prototype must be null or object")) {
return Local<v8::Object>();
}
- LOG_API(i_isolate, Object, New);
+ API_RCS_SCOPE(i_isolate, Object, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::FixedArrayBase> elements =
@@ -7118,7 +7113,7 @@ Local<v8::Object> v8::Object::New(Isolate* isolate,
Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, NumberObject, New);
+ API_RCS_SCOPE(i_isolate, NumberObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
i::Handle<i::Object> obj =
@@ -7130,14 +7125,13 @@ double v8::NumberObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
i::Handle<i::JSPrimitiveWrapper>::cast(obj);
- i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
- LOG_API(isolate, NumberObject, NumberValue);
+ API_RCS_SCOPE(js_primitive_wrapper->GetIsolate(), NumberObject, NumberValue);
return js_primitive_wrapper->value().Number();
}
Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, BigIntObject, New);
+ API_RCS_SCOPE(i_isolate, BigIntObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> bigint = i::BigInt::FromInt64(i_isolate, value);
i::Handle<i::Object> obj =
@@ -7150,14 +7144,14 @@ Local<v8::BigInt> v8::BigIntObject::ValueOf() const {
i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
i::Handle<i::JSPrimitiveWrapper>::cast(obj);
i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
- LOG_API(isolate, BigIntObject, BigIntValue);
+ API_RCS_SCOPE(isolate, BigIntObject, BigIntValue);
return Utils::ToLocal(i::Handle<i::BigInt>(
i::BigInt::cast(js_primitive_wrapper->value()), isolate));
}
Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, BooleanObject, New);
+ API_RCS_SCOPE(i_isolate, BooleanObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> boolean(value
? i::ReadOnlyRoots(i_isolate).true_value()
@@ -7172,7 +7166,7 @@ bool v8::BooleanObject::ValueOf() const {
i::Object obj = *Utils::OpenHandle(this);
i::JSPrimitiveWrapper js_primitive_wrapper = i::JSPrimitiveWrapper::cast(obj);
i::Isolate* isolate = js_primitive_wrapper.GetIsolate();
- LOG_API(isolate, BooleanObject, BooleanValue);
+ API_RCS_SCOPE(isolate, BooleanObject, BooleanValue);
return js_primitive_wrapper.value().IsTrue(isolate);
}
@@ -7180,7 +7174,7 @@ Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, StringObject, New);
+ API_RCS_SCOPE(isolate, StringObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Object> obj =
i::Object::ToObject(isolate, string).ToHandleChecked();
@@ -7192,14 +7186,14 @@ Local<v8::String> v8::StringObject::ValueOf() const {
i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
i::Handle<i::JSPrimitiveWrapper>::cast(obj);
i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
- LOG_API(isolate, StringObject, StringValue);
+ API_RCS_SCOPE(isolate, StringObject, StringValue);
return Utils::ToLocal(i::Handle<i::String>(
i::String::cast(js_primitive_wrapper->value()), isolate));
}
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, SymbolObject, New);
+ API_RCS_SCOPE(i_isolate, SymbolObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> obj =
i::Object::ToObject(i_isolate, Utils::OpenHandle(*value))
@@ -7212,7 +7206,7 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
i::Handle<i::JSPrimitiveWrapper>::cast(obj);
i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
- LOG_API(isolate, SymbolObject, SymbolValue);
+ API_RCS_SCOPE(isolate, SymbolObject, SymbolValue);
return Utils::ToLocal(i::Handle<i::Symbol>(
i::Symbol::cast(js_primitive_wrapper->value()), isolate));
}
@@ -7234,8 +7228,7 @@ MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
double v8::Date::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
- i::Isolate* isolate = jsdate->GetIsolate();
- LOG_API(isolate, Date, NumberValue);
+ API_RCS_SCOPE(jsdate->GetIsolate(), Date, NumberValue);
return jsdate->value().Number();
}
@@ -7328,7 +7321,7 @@ MaybeLocal<v8::Object> v8::RegExp::Exec(Local<Context> context,
Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Array, New);
+ API_RCS_SCOPE(i_isolate, Array, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
int real_length = length > 0 ? length : 0;
i::Handle<i::JSArray> obj = i_isolate->factory()->NewJSArray(real_length);
@@ -7342,7 +7335,7 @@ Local<v8::Array> v8::Array::New(Isolate* isolate, Local<Value>* elements,
size_t length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Factory* factory = i_isolate->factory();
- LOG_API(i_isolate, Array, New);
+ API_RCS_SCOPE(i_isolate, Array, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
int len = static_cast<int>(length);
@@ -7368,7 +7361,7 @@ uint32_t v8::Array::Length() const {
Local<v8::Map> v8::Map::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Map, New);
+ API_RCS_SCOPE(i_isolate, Map, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSMap> obj = i_isolate->factory()->NewJSMap();
return Utils::ToLocal(obj);
@@ -7382,7 +7375,7 @@ size_t v8::Map::Size() const {
void Map::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- LOG_API(isolate, Map, Clear);
+ API_RCS_SCOPE(isolate, Map, Clear);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::JSMap::Clear(isolate, self);
}
@@ -7493,7 +7486,7 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object table_obj,
Local<Array> Map::AsArray() const {
i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, Map, AsArray);
+ API_RCS_SCOPE(isolate, Map, AsArray);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return Utils::ToLocal(
MapAsArray(isolate, obj->table(), 0, MapAsArrayKind::kEntries));
@@ -7501,7 +7494,7 @@ Local<Array> Map::AsArray() const {
Local<v8::Set> v8::Set::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Set, New);
+ API_RCS_SCOPE(i_isolate, Set, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSSet> obj = i_isolate->factory()->NewJSSet();
return Utils::ToLocal(obj);
@@ -7515,7 +7508,7 @@ size_t v8::Set::Size() const {
void Set::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- LOG_API(isolate, Set, Clear);
+ API_RCS_SCOPE(isolate, Set, Clear);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::JSSet::Clear(isolate, self);
}
@@ -7596,7 +7589,7 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object table_obj,
Local<Array> Set::AsArray() const {
i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, Set, AsArray);
+ API_RCS_SCOPE(isolate, Set, AsArray);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return Utils::ToLocal(
SetAsArray(isolate, obj->table(), 0, SetAsArrayKind::kValues));
@@ -7703,7 +7696,7 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
bool Promise::HasHandler() const {
i::JSReceiver promise = *Utils::OpenHandle(this);
i::Isolate* isolate = promise.GetIsolate();
- LOG_API(isolate, Promise, HasRejectHandler);
+ API_RCS_SCOPE(isolate, Promise, HasRejectHandler);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (!promise.IsJSPromise()) return false;
return i::JSPromise::cast(promise).has_handler();
@@ -7712,7 +7705,7 @@ bool Promise::HasHandler() const {
Local<Value> Promise::Result() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, Promise, Result);
+ API_RCS_SCOPE(isolate, Promise, Result);
i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
Utils::ApiCheck(js_promise->status() != kPending, "v8_Promise_Result",
"Promise is still pending");
@@ -7722,8 +7715,7 @@ Local<Value> Promise::Result() {
Promise::PromiseState Promise::State() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
- i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, Promise, Status);
+ API_RCS_SCOPE(promise->GetIsolate(), Promise, Status);
i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
return static_cast<PromiseState>(js_promise->status());
}
@@ -7924,7 +7916,7 @@ void v8::ArrayBuffer::Detach() {
i::Isolate* isolate = obj->GetIsolate();
Utils::ApiCheck(obj->is_detachable(), "v8::ArrayBuffer::Detach",
"Only detachable ArrayBuffers can be detached");
- LOG_API(isolate, ArrayBuffer, Detach);
+ API_RCS_SCOPE(isolate, ArrayBuffer, Detach);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
obj->Detach();
}
@@ -7936,7 +7928,7 @@ size_t v8::ArrayBuffer::ByteLength() const {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, ArrayBuffer, New);
+ API_RCS_SCOPE(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::MaybeHandle<i::JSArrayBuffer> result =
i_isolate->factory()->NewJSArrayBufferAndBackingStore(
@@ -7957,7 +7949,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(
CHECK_IMPLIES(backing_store->ByteLength() != 0,
backing_store->Data() != nullptr);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, ArrayBuffer, New);
+ API_RCS_SCOPE(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::shared_ptr<i::BackingStore> i_backing_store(
ToInternal(std::move(backing_store)));
@@ -7972,7 +7964,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(
std::unique_ptr<v8::BackingStore> v8::ArrayBuffer::NewBackingStore(
Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, ArrayBuffer, NewBackingStore);
+ API_RCS_SCOPE(i_isolate, ArrayBuffer, NewBackingStore);
CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::unique_ptr<i::BackingStoreBase> backing_store =
@@ -8063,7 +8055,7 @@ static_assert(
Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer, \
size_t byte_offset, size_t length) { \
i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
- LOG_API(isolate, Type##Array, New); \
+ API_RCS_SCOPE(isolate, Type##Array, New); \
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); \
if (!Utils::ApiCheck(length <= kMaxLength, \
"v8::" #Type \
@@ -8082,7 +8074,7 @@ static_assert(
CHECK(i::FLAG_harmony_sharedarraybuffer); \
i::Isolate* isolate = \
Utils::OpenHandle(*shared_array_buffer)->GetIsolate(); \
- LOG_API(isolate, Type##Array, New); \
+ API_RCS_SCOPE(isolate, Type##Array, New); \
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); \
if (!Utils::ApiCheck( \
length <= kMaxLength, \
@@ -8105,7 +8097,7 @@ Local<DataView> DataView::New(Local<ArrayBuffer> array_buffer,
size_t byte_offset, size_t byte_length) {
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
- LOG_API(isolate, DataView, New);
+ API_RCS_SCOPE(isolate, DataView, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::JSDataView> obj =
isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
@@ -8117,7 +8109,7 @@ Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
CHECK(i::FLAG_harmony_sharedarraybuffer);
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*shared_array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
- LOG_API(isolate, DataView, New);
+ API_RCS_SCOPE(isolate, DataView, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::JSDataView> obj =
isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
@@ -8133,7 +8125,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
size_t byte_length) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, SharedArrayBuffer, New);
+ API_RCS_SCOPE(i_isolate, SharedArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::unique_ptr<i::BackingStore> backing_store =
@@ -8157,7 +8149,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
CHECK_IMPLIES(backing_store->ByteLength() != 0,
backing_store->Data() != nullptr);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, SharedArrayBuffer, New);
+ API_RCS_SCOPE(i_isolate, SharedArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::shared_ptr<i::BackingStore> i_backing_store(ToInternal(backing_store));
Utils::ApiCheck(
@@ -8171,7 +8163,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
std::unique_ptr<v8::BackingStore> v8::SharedArrayBuffer::NewBackingStore(
Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, SharedArrayBuffer, NewBackingStore);
+ API_RCS_SCOPE(i_isolate, SharedArrayBuffer, NewBackingStore);
CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::unique_ptr<i::BackingStoreBase> backing_store =
@@ -8198,7 +8190,7 @@ std::unique_ptr<v8::BackingStore> v8::SharedArrayBuffer::NewBackingStore(
Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Symbol, New);
+ API_RCS_SCOPE(i_isolate, Symbol, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
if (!name.IsEmpty()) result->set_description(*Utils::OpenHandle(*name));
@@ -8245,7 +8237,7 @@ WELL_KNOWN_SYMBOLS(SYMBOL_GETTER)
Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Private, New);
+ API_RCS_SCOPE(i_isolate, Private, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
if (!name.IsEmpty()) symbol->set_description(*Utils::OpenHandle(*name));
@@ -8659,20 +8651,9 @@ void Isolate::Initialize(Isolate* isolate,
// Set up code event handlers. Needs to be after i::Snapshot::Initialize
// because that is where we add the isolate to WasmEngine.
auto code_event_handler = params.code_event_handler;
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (code_event_handler == nullptr && i::FLAG_gdbjit) {
- code_event_handler = i::GDBJITInterface::EventHandler;
- }
-#endif // ENABLE_GDB_JIT_INTERFACE
-#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
- if (code_event_handler == nullptr &&
- i::FLAG_enable_system_instrumentation) {
- code_event_handler = i::ETWJITInterface::EventHandler;
- }
-#endif // defined(V8_OS_WIN)
-
if (code_event_handler) {
- isolate->SetJitCodeEventHandler(kJitCodeEventDefault, code_event_handler);
+ isolate->SetJitCodeEventHandler(kJitCodeEventEnumExisting,
+ code_event_handler);
}
}
@@ -9515,7 +9496,7 @@ void Isolate::SetAllowAtomicsWait(bool allow) {
void v8::Isolate::DateTimeConfigurationChangeNotification(
TimeZoneDetection time_zone_detection) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- LOG_API(i_isolate, Isolate, DateTimeConfigurationChangeNotification);
+ API_RCS_SCOPE(i_isolate, Isolate, DateTimeConfigurationChangeNotification);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->date_cache()->ResetDateCache(
static_cast<base::TimezoneCache::TimeZoneDetection>(time_zone_detection));
@@ -9531,7 +9512,7 @@ void v8::Isolate::DateTimeConfigurationChangeNotification(
void v8::Isolate::LocaleConfigurationChangeNotification() {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- LOG_API(i_isolate, Isolate, LocaleConfigurationChangeNotification);
+ API_RCS_SCOPE(i_isolate, Isolate, LocaleConfigurationChangeNotification);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
#ifdef V8_INTL_SUPPORT
@@ -9541,7 +9522,7 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
bool v8::Object::IsCodeLike(v8::Isolate* isolate) const {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Object, IsCodeLike);
+ API_RCS_SCOPE(i_isolate, Object, IsCodeLike);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::HandleScope scope(i_isolate);
return Utils::OpenHandle(this)->IsCodeLike(i_isolate);
@@ -9648,7 +9629,7 @@ String::Value::~Value() { i::DeleteArray(str_); }
#define DEFINE_ERROR(NAME, name) \
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
i::Isolate* isolate = i::Isolate::Current(); \
- LOG_API(isolate, NAME, New); \
+ API_RCS_SCOPE(isolate, NAME, New); \
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); \
i::Object error; \
{ \
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 069e31491c..fcfb3ac0a8 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -1,3 +1,4 @@
leszeks@chromium.org
marja@chromium.org
+syg@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 804a6840c1..97db02225f 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -103,6 +103,10 @@ bool Expression::IsNullLiteral() const {
return IsLiteral() && AsLiteral()->type() == Literal::kNull;
}
+bool Expression::IsBooleanLiteral() const {
+ return IsLiteral() && AsLiteral()->type() == Literal::kBoolean;
+}
+
bool Expression::IsTheHoleLiteral() const {
return IsLiteral() && AsLiteral()->type() == Literal::kTheHole;
}
@@ -892,6 +896,24 @@ static bool IsVoidOfLiteral(Expression* expr) {
maybe_unary->expression()->IsLiteral();
}
+static bool MatchLiteralStrictCompareBoolean(Expression* left, Token::Value op,
+ Expression* right,
+ Expression** expr,
+ Literal** literal) {
+ if (left->IsBooleanLiteral() && op == Token::EQ_STRICT) {
+ *expr = right;
+ *literal = left->AsLiteral();
+ return true;
+ }
+ return false;
+}
+
+bool CompareOperation::IsLiteralStrictCompareBoolean(Expression** expr,
+ Literal** literal) {
+ return MatchLiteralStrictCompareBoolean(left_, op(), right_, expr, literal) ||
+ MatchLiteralStrictCompareBoolean(right_, op(), left_, expr, literal);
+}
+
// Check for the pattern: void <literal> equals <expression> or
// undefined equals <expression>
static bool MatchLiteralCompareUndefined(Expression* left, Token::Value op,
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 1fb5abdf8f..a348ccf82a 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -236,6 +236,8 @@ class Expression : public AstNode {
// True iff the expression is the null literal.
bool IsNullLiteral() const;
+ bool IsBooleanLiteral() const;
+
// True iff the expression is the hole literal.
bool IsTheHoleLiteral() const;
@@ -955,6 +957,11 @@ class Literal final : public Expression {
return Smi::FromInt(smi_);
}
+ bool AsBooleanLiteral() const {
+ DCHECK_EQ(kBoolean, type());
+ return boolean_;
+ }
+
// Returns true if literal represents a Number.
bool IsNumber() const { return type() == kHeapNumber || type() == kSmi; }
double AsNumber() const {
@@ -1598,14 +1605,14 @@ class OptionalChain final : public Expression {
// Otherwise, the assignment is to a non-property (a global, a local slot, a
// parameter slot, or a destructuring pattern).
enum AssignType {
- NON_PROPERTY, // destructuring
- NAMED_PROPERTY, // obj.key
- KEYED_PROPERTY, // obj[key]
- NAMED_SUPER_PROPERTY, // super.key
- KEYED_SUPER_PROPERTY, // super[key]
- PRIVATE_METHOD, // obj.#key: #key is a private method
- PRIVATE_GETTER_ONLY, // obj.#key: #key only has a getter defined
- PRIVATE_SETTER_ONLY, // obj.#key: #key only has a setter defined
+ NON_PROPERTY, // destructuring
+ NAMED_PROPERTY, // obj.key
+ KEYED_PROPERTY, // obj[key] and obj.#key when #key is a private field
+ NAMED_SUPER_PROPERTY, // super.key
+ KEYED_SUPER_PROPERTY, // super[key]
+ PRIVATE_METHOD, // obj.#key: #key is a private method
+ PRIVATE_GETTER_ONLY, // obj.#key: #key only has a getter defined
+ PRIVATE_SETTER_ONLY, // obj.#key: #key only has a setter defined
PRIVATE_GETTER_AND_SETTER // obj.#key: #key has both accessors defined
};
@@ -1963,6 +1970,7 @@ class CompareOperation final : public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Literal** literal);
+ bool IsLiteralStrictCompareBoolean(Expression** expr, Literal** literal);
bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index be045b0e68..76fc6449b9 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -112,6 +112,14 @@ class AsAtomicImpl {
}
template <typename T>
+ static T SeqCst_Swap(T* addr,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ return base::SeqCst_AtomicExchange(
+ to_storage_addr(addr), cast_helper<T>::to_storage_type(new_value));
+ }
+
+ template <typename T>
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
@@ -190,6 +198,7 @@ class AsAtomicImpl {
};
using AsAtomic8 = AsAtomicImpl<base::Atomic8>;
+using AsAtomic16 = AsAtomicImpl<base::Atomic16>;
using AsAtomic32 = AsAtomicImpl<base::Atomic32>;
using AsAtomicWord = AsAtomicImpl<base::AtomicWord>;
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index f6b516ad9e..82f569ecdf 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -130,6 +130,12 @@ inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
std::memory_order_relaxed);
}
+inline Atomic32 SeqCst_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
@@ -267,6 +273,12 @@ inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
std::memory_order_relaxed);
}
+inline Atomic64 SeqCst_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 5c31addd39..9fe237fb30 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -329,6 +329,16 @@ inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
return rhs ? lhs % rhs : 0u;
}
+// Wraparound integer arithmetic without undefined behavior.
+
+inline int32_t WraparoundAdd32(int32_t lhs, int32_t rhs) {
+ return static_cast<int32_t>(static_cast<uint32_t>(lhs) +
+ static_cast<uint32_t>(rhs));
+}
+
+inline int32_t WraparoundNeg32(int32_t x) {
+ return static_cast<int32_t>(-static_cast<uint32_t>(x));
+}
// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
// checks and returns the result.
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index fcb9f8756f..61644ffe05 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -251,12 +251,6 @@ struct Use {
(void)unused_tmp_array_for_use_macro; \
} while (false)
-// Evaluate the instantiations of an expression with parameter packs.
-// Since USE has left-to-right evaluation order of it's arguments,
-// the parameter pack is iterated from left to right and side effects
-// have defined behavior.
-#define ITERATE_PACK(...) USE(0, ((__VA_ARGS__), 0)...)
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index c4224e84e3..98d88c1270 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -93,7 +93,7 @@ inline void CallOnce(OnceType* once, std::function<void()> init_func) {
}
template <typename... Args, typename = std::enable_if_t<
- conjunction<std::is_scalar<Args>...>::value>>
+ std::conjunction_v<std::is_scalar<Args>...>>>
inline void CallOnce(OnceType* once,
typename FunctionWithArgs<Args...>::type init_func,
Args... args) {
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index bba8d3c699..a97295b4cc 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -87,6 +87,33 @@ void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
return reinterpret_cast<void*>(addr);
}
+// static
+bool OS::RemapPages(const void* address, size_t size, void* new_address,
+ MemoryPermission access) {
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(address), AllocatePageSize()));
+ DCHECK(
+ IsAligned(reinterpret_cast<uintptr_t>(new_address), AllocatePageSize()));
+ DCHECK(IsAligned(size, AllocatePageSize()));
+
+ vm_prot_t cur_protection = GetVMProtFromMemoryPermission(access);
+ vm_prot_t max_protection;
+ // Asks the kernel to remap *on top* of an existing mapping, rather than
+ // copying the data.
+ int flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
+ mach_vm_address_t target = reinterpret_cast<mach_vm_address_t>(new_address);
+ kern_return_t ret =
+ mach_vm_remap(mach_task_self(), &target, size, 0, flags, mach_task_self(),
+ reinterpret_cast<mach_vm_address_t>(address), FALSE,
+ &cur_protection, &max_protection, VM_INHERIT_NONE);
+
+ if (ret != KERN_SUCCESS) return false;
+
+ // Did we get the address we wanted?
+ CHECK_EQ(new_address, reinterpret_cast<void*>(target));
+
+ return true;
+}
+
bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 664ed301c8..280d7f88f8 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -581,7 +581,25 @@ void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
// Need to disable CFI_ICALL due to the indirect call to memfd_create.
DISABLE_CFI_ICALL
PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
+#if V8_OS_LINUX && !V8_OS_ANDROID
+ // Use memfd_create if available, otherwise mkstemp.
+ using memfd_create_t = int (*)(const char*, unsigned int);
+ memfd_create_t memfd_create =
+ reinterpret_cast<memfd_create_t>(dlsym(RTLD_DEFAULT, "memfd_create"));
+ int fd = -1;
+ if (memfd_create) {
+ fd = memfd_create("V8MemFDForTesting", MFD_CLOEXEC);
+ } else {
+ char filename[] = "/tmp/v8_tmp_file_for_testing_XXXXXX";
+ fd = mkstemp(filename);
+ if (fd != -1) CHECK_EQ(0, unlink(filename));
+ }
+ if (fd == -1) return kInvalidSharedMemoryHandle;
+ CHECK_EQ(0, ftruncate(fd, size));
+ return SharedMemoryHandleFromFileDescriptor(fd);
+#else
return kInvalidSharedMemoryHandle;
+#endif
}
// static
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index b696669142..d495a30212 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -90,6 +90,11 @@ int fopen_s(FILE** pFile, const char* filename, const char* mode) {
return *pFile != nullptr ? 0 : 1;
}
+int _wfopen_s(FILE** pFile, const wchar_t* filename, const wchar_t* mode) {
+ *pFile = _wfopen(filename, mode);
+ return *pFile != nullptr ? 0 : 1;
+}
+
int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
const char* format, va_list argptr) {
DCHECK(count == _TRUNCATE);
@@ -593,10 +598,25 @@ static void VPrintHelper(FILE* stream, const char* format, va_list args) {
}
}
+// Convert utf-8 encoded string to utf-16 encoded.
+static std::wstring ConvertUtf8StringToUtf16(const char* str) {
+ // On Windows wchar_t must be a 16-bit value.
+ static_assert(sizeof(wchar_t) == 2, "wrong wchar_t size");
+ std::wstring utf16_str;
+ int name_length = static_cast<int>(strlen(str));
+ int len = MultiByteToWideChar(CP_UTF8, 0, str, name_length, nullptr, 0);
+ if (len > 0) {
+ utf16_str.resize(len);
+ MultiByteToWideChar(CP_UTF8, 0, str, name_length, &utf16_str[0], len);
+ }
+ return utf16_str;
+}
FILE* OS::FOpen(const char* path, const char* mode) {
FILE* result;
- if (fopen_s(&result, path, mode) == 0) {
+ std::wstring utf16_path = ConvertUtf8StringToUtf16(path);
+ std::wstring utf16_mode = ConvertUtf8StringToUtf16(mode);
+ if (_wfopen_s(&result, utf16_path.c_str(), utf16_mode.c_str()) == 0) {
return result;
} else {
return nullptr;
@@ -1141,8 +1161,11 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
if (mode == FileMode::kReadWrite) {
access |= GENERIC_WRITE;
}
- HANDLE file = CreateFileA(name, access, FILE_SHARE_READ | FILE_SHARE_WRITE,
- nullptr, OPEN_EXISTING, 0, nullptr);
+
+ std::wstring utf16_name = ConvertUtf8StringToUtf16(name);
+ HANDLE file = CreateFileW(utf16_name.c_str(), access,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+ OPEN_EXISTING, 0, nullptr);
if (file == INVALID_HANDLE_VALUE) return nullptr;
DWORD size = GetFileSize(file, nullptr);
@@ -1165,8 +1188,9 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
// static
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
+ std::wstring utf16_name = ConvertUtf8StringToUtf16(name);
// Open a physical file.
- HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+ HANDLE file = CreateFileW(utf16_name.c_str(), GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
OPEN_ALWAYS, 0, nullptr);
if (file == nullptr) return nullptr;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 0a359ad211..e801ec78c2 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -33,6 +33,7 @@
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
#if V8_OS_QNX
#include "src/base/qnx-math.h"
@@ -313,6 +314,28 @@ class V8_BASE_EXPORT OS {
[[noreturn]] static void ExitProcess(int exit_code);
+ // Whether the platform supports mapping a given address in another location
+ // in the address space.
+ V8_WARN_UNUSED_RESULT static constexpr bool IsRemapPageSupported() {
+#ifdef V8_OS_MACOS
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ // Remaps already-mapped memory at |new_address| with |access| permissions.
+ //
+ // Both the source and target addresses must be page-aligned, and |size| must
+ // be a multiple of the system page size. If there is already memory mapped
+ // at the target address, it is replaced by the new mapping.
+ //
+ // Must not be called if |IsRemapPagesSupported()| return false.
+ // Returns true for success.
+ V8_WARN_UNUSED_RESULT static bool RemapPages(const void* address, size_t size,
+ void* new_address,
+ MemoryPermission access);
+
private:
// These classes use the private memory management API below.
friend class AddressSpaceReservation;
@@ -321,6 +344,7 @@ class V8_BASE_EXPORT OS {
friend class v8::base::PageAllocator;
friend class v8::base::VirtualAddressSpace;
friend class v8::base::VirtualAddressSubspace;
+ FRIEND_TEST(OS, RemapPages);
static size_t AllocatePageSize();
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index f222593e2d..3394b60f7a 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -60,53 +60,6 @@ struct has_output_operator<
T, TStream, decltype(void(std::declval<TStream&>() << std::declval<T>()))>
: std::true_type {};
-// Fold all arguments from left to right with a given function.
-template <typename Func, typename T>
-constexpr auto fold(Func func, T&& t) {
- return std::forward<T>(t);
-}
-
-template <typename Func, typename T1, typename T2, typename... Ts>
-constexpr auto fold(Func func, T1&& first, T2&& second, Ts&&... more) {
- auto&& folded = func(std::forward<T1>(first), std::forward<T2>(second));
- return fold(std::move(func), std::forward<decltype(folded)>(folded),
- std::forward<Ts>(more)...);
-}
-
-// {is_same<Ts...>::value} is true if all Ts are the same, false otherwise.
-template <typename... Ts>
-struct is_same : public std::false_type {};
-template <>
-struct is_same<> : public std::true_type {};
-template <typename T>
-struct is_same<T> : public std::true_type {};
-template <typename T, typename... Ts>
-struct is_same<T, T, Ts...> : public is_same<T, Ts...> {};
-
-// Returns true, iff all values (implicitly converted to bool) are trueish.
-template <typename... Args>
-constexpr bool all(Args... rest) {
- return fold(std::logical_and<>{}, true, rest...);
-}
-
-template <class... Ts>
-struct make_void {
- using type = void;
-};
-// Corresponds to C++17's std::void_t.
-// Used for SFINAE based on type errors.
-template <class... Ts>
-using void_t = typename make_void<Ts...>::type;
-
-// Corresponds to C++17's std::conjunction
-template <class...>
-struct conjunction : std::true_type {};
-template <class B>
-struct conjunction<B> : B {};
-template <class B, class... Bn>
-struct conjunction<B, Bn...>
- : std::conditional_t<bool(B::value), conjunction<Bn...>, B> {};
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
index 807ff4f2a8..dfca0c4c25 100644
--- a/deps/v8/src/base/threaded-list.h
+++ b/deps/v8/src/base/threaded-list.h
@@ -266,6 +266,17 @@ class ThreadedListBase final : public BaseClass {
return true;
}
+ void RevalidateTail() {
+ T* last = *tail_;
+ if (last != nullptr) {
+ while (*TLTraits::next(last) != nullptr) {
+ last = *TLTraits::next(last);
+ }
+ tail_ = TLTraits::next(last);
+ }
+ SLOW_DCHECK(Verify());
+ }
+
private:
T* head_;
T** tail_;
diff --git a/deps/v8/src/base/type-traits.h b/deps/v8/src/base/type-traits.h
deleted file mode 100644
index 9b6e8971a7..0000000000
--- a/deps/v8/src/base/type-traits.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BASE_TYPE_TRAITS_H_
-#define V8_BASE_TYPE_TRAITS_H_
-
-#include <type_traits>
-
-namespace v8 {
-namespace internal {
-
-// Conjunction metafunction.
-template <class... Args>
-struct conjunction;
-
-template <>
-struct conjunction<> : std::true_type {};
-
-template <class T>
-struct conjunction<T> : T {};
-
-template <class T, class... Args>
-struct conjunction<T, Args...>
- : std::conditional<T::value, conjunction<Args...>, T>::type {};
-
-// Disjunction metafunction.
-template <class... Args>
-struct disjunction;
-
-template <>
-struct disjunction<> : std::true_type {};
-
-template <class T>
-struct disjunction<T> : T {};
-
-template <class T, class... Args>
-struct disjunction<T, Args...>
- : std::conditional<T::value, T, disjunction<Args...>>::type {};
-
-// Negation metafunction.
-template <class T>
-struct negation : std::integral_constant<bool, !T::value> {};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_BASE_TYPE_TRAITS_H_
diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index f77b85e2ef..b19b2f337c 100644
--- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -102,18 +102,28 @@ void BaselineAssembler::JumpTarget() {
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ b(target);
}
+
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
+
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ JumpIf(cc, left, Operand(right), target, distance);
+}
+
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
@@ -351,18 +361,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ ldrh(output, FieldMemOperand(source, offset));
+}
+
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ ldrb(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ASM_CODE_COMMENT(masm_);
@@ -371,6 +390,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ mov(tmp, Operand(value));
__ str(tmp, FieldMemOperand(target, offset));
}
+
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
@@ -380,6 +400,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
}
+
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
@@ -432,6 +453,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, lhs, Operand(rhs));
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ and_(output, lhs, Operand(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index b08ac0d7ac..9c074c0b0e 100644
--- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -100,23 +100,33 @@ void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ B(target);
}
+
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
+
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ JumpIf(cc, left, Immediate(right), target, distance);
+}
+
void BaselineAssembler::CallBuiltin(Builtin builtin) {
if (masm()->options().short_builtin_calls) {
// Generate pc-relative call.
@@ -424,18 +434,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ Ldrh(output, FieldMemOperand(source, offset));
+}
+
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ Ldrb(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ASM_CODE_COMMENT(masm_);
@@ -444,6 +463,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ Mov(tmp, Operand(value));
__ StoreTaggedField(tmp, FieldMemOperand(target, offset));
}
+
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
@@ -452,6 +472,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
}
+
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
@@ -509,6 +530,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
}
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ And(output, lhs, Immediate(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
index e0fe720bc4..41f34684a1 100644
--- a/deps/v8/src/baseline/baseline-assembler.h
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -79,6 +79,9 @@ class BaselineAssembler {
Label::Distance distance = Label::kFar);
inline void JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target,
Label::Distance distance = Label::kFar);
+ inline void JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance = Label::kFar);
inline void JumpIfTagged(Condition cc, Register value, MemOperand operand,
Label* target,
Label::Distance distance = Label::kFar);
@@ -152,7 +155,9 @@ class BaselineAssembler {
inline void LoadTaggedSignedField(Register output, Register source,
int offset);
inline void LoadTaggedAnyField(Register output, Register source, int offset);
- inline void LoadByteField(Register output, Register source, int offset);
+ inline void LoadWord16FieldZeroExtend(Register output, Register source,
+ int offset);
+ inline void LoadWord8Field(Register output, Register source, int offset);
inline void StoreTaggedSignedField(Register target, int offset, Smi value);
inline void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
Register value);
@@ -173,6 +178,8 @@ class BaselineAssembler {
inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value);
+ inline void Word32And(Register output, Register lhs, int rhs);
+
inline void Switch(Register reg, int case_value_base, Label** labels,
int num_labels);
diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc
index e0f528bcbe..78e59afb6a 100644
--- a/deps/v8/src/baseline/baseline-batch-compiler.cc
+++ b/deps/v8/src/baseline/baseline-batch-compiler.cc
@@ -29,8 +29,7 @@ namespace baseline {
static bool CanCompileWithConcurrentBaseline(SharedFunctionInfo shared,
Isolate* isolate) {
- return !shared.is_compiled() || shared.HasBaselineCode() ||
- !CanCompileWithBaseline(isolate, shared);
+ return !shared.HasBaselineCode() && CanCompileWithBaseline(isolate, shared);
}
class BaselineCompilerTask {
@@ -71,9 +70,8 @@ class BaselineCompilerTask {
}
shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) {
- // Arm back edges for OSR
shared_function_info_->GetBytecodeArray(isolate)
- .set_osr_loop_nesting_level(AbstractCode::kMaxLoopNestingMarker);
+ .RequestOsrAtNextOpportunity();
}
if (FLAG_trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
@@ -107,7 +105,7 @@ class BaselineBatchCompilerJob {
if (!maybe_sfi.GetHeapObjectIfWeak(&obj)) continue;
// Skip functions where the bytecode has been flushed.
SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
- if (CanCompileWithConcurrentBaseline(shared, isolate)) continue;
+ if (!CanCompileWithConcurrentBaseline(shared, isolate)) continue;
tasks_.emplace_back(isolate, handles_.get(), shared);
}
if (FLAG_trace_baseline_concurrent_compilation) {
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index e057535020..9d4d480da2 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -1538,7 +1538,7 @@ void BaselineCompiler::VisitTestUndetectable() {
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
- __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
@@ -1665,7 +1665,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// All other undetectable maps are typeof undefined.
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
- __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
@@ -1685,7 +1685,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// Check if the map is callable but not undetectable.
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
- __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask,
Condition::kZero, &not_callable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
@@ -1717,7 +1717,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// If the map is undetectable or callable, return false.
Register map_bit_field = kInterpreterAccumulatorRegister;
- __ LoadByteField(map_bit_field, map, Map::kBitFieldOffset);
+ __ LoadWord8Field(map_bit_field, map, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field,
Map::Bits1::IsUndetectableBit::kMask |
Map::Bits1::IsCallableBit::kMask,
@@ -1925,21 +1925,50 @@ void BaselineCompiler::VisitCreateRestParameter() {
}
void BaselineCompiler::VisitJumpLoop() {
- BaselineAssembler::ScratchRegisterScope scope(&basm_);
- Register scratch = scope.AcquireScratch();
- Label osr_not_armed;
+ Label osr_not_armed, osr;
{
+ BaselineAssembler::ScratchRegisterScope scope(&basm_);
+ Register osr_urgency_and_install_target = scope.AcquireScratch();
+
ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed");
- Register osr_level = scratch;
- __ LoadRegister(osr_level, interpreter::Register::bytecode_array());
- __ LoadByteField(osr_level, osr_level,
- BytecodeArray::kOsrLoopNestingLevelOffset);
+ __ LoadRegister(osr_urgency_and_install_target,
+ interpreter::Register::bytecode_array());
+ __ LoadWord16FieldZeroExtend(
+ osr_urgency_and_install_target, osr_urgency_and_install_target,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset);
int loop_depth = iterator().GetImmediateOperand(1);
- __ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth,
- &osr_not_armed);
- CallBuiltin<Builtin::kBaselineOnStackReplacement>();
+ __ JumpIfImmediate(Condition::kUnsignedLessThanEqual,
+ osr_urgency_and_install_target, loop_depth,
+ &osr_not_armed, Label::kNear);
+
+ // TODO(jgruber): Move the extended checks into the
+ // BaselineOnStackReplacement builtin.
+
+ // OSR based on urgency, i.e. is the OSR urgency greater than the current
+ // loop depth?
+ STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
+ Register scratch2 = scope.AcquireScratch();
+ __ Word32And(scratch2, osr_urgency_and_install_target,
+ BytecodeArray::OsrUrgencyBits::kMask);
+ __ JumpIfImmediate(Condition::kUnsignedGreaterThan, scratch2, loop_depth,
+ &osr, Label::kNear);
+
+ // OSR based on the install target offset, i.e. does the current bytecode
+ // offset match the install target offset?
+ static constexpr int kShift = BytecodeArray::OsrInstallTargetBits::kShift;
+ static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
+ const int encoded_current_offset =
+ BytecodeArray::OsrInstallTargetFor(
+ BytecodeOffset{iterator().current_offset()})
+ << kShift;
+ __ Word32And(scratch2, osr_urgency_and_install_target, kMask);
+ __ JumpIfImmediate(Condition::kNotEqual, scratch2, encoded_current_offset,
+ &osr_not_armed, Label::kNear);
}
+ __ Bind(&osr);
+ CallBuiltin<Builtin::kBaselineOnStackReplacement>();
+
__ Bind(&osr_not_armed);
Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked;
int weight = iterator().GetRelativeJumpTargetOffset() -
@@ -2184,7 +2213,7 @@ void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
LoadRegister(reg, 0);
Register map_bit_field = scratch_scope.AcquireScratch();
__ LoadMap(map_bit_field, reg);
- __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask,
Condition::kNotZero, &done, Label::kNear);
diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index 6c36c7e8ba..c630ccd6a7 100644
--- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -103,18 +103,29 @@ void BaselineAssembler::JumpTarget() {
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance);
}
+
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfRoot(value, index, target, distance);
}
+
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfNotRoot(value, index, target, distance);
}
+
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfSmi(value, target, distance);
}
+
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ __ cmp(left, Immediate(right));
+ __ j(AsMasmCondition(cc), target, distance);
+}
+
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfNotSmi(value, target, distance);
@@ -316,29 +327,39 @@ void BaselineAssembler::PushReverse(T... vals) {
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
- ITERATE_PACK(__ Pop(registers));
+ (__ Pop(registers), ...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
+
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
+
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ movzx_w(output, FieldOperand(source, offset));
+}
+
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ mov_b(output, FieldOperand(source, offset));
}
+
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
__ mov(FieldOperand(target, offset), Immediate(value));
}
+
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
@@ -349,6 +370,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ mov(FieldOperand(target, offset), value);
__ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
}
+
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
@@ -391,6 +413,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, Immediate(rhs));
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ Move(output, lhs);
+ __ and_(output, Immediate(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
index 25b279ff8e..c124bbf12c 100644
--- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
+++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
@@ -113,6 +113,11 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ JumpIf(cc, left, Operand(right), target, distance);
+}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
@@ -346,8 +351,12 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld_d(output, FieldMemOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ Ld_hu(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ Ld_b(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
@@ -415,6 +424,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Add_d(lhs, lhs, Operand(rhs));
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ And(output, lhs, Operand(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
index c33ff88024..237f706ea6 100644
--- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
+++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
@@ -115,6 +115,11 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ JumpIf(cc, left, Operand(right), target, distance);
+}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
@@ -356,8 +361,12 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Lw(output, FieldMemOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ lhu(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
@@ -427,6 +436,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Addu(lhs, lhs, Operand(rhs));
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ And(output, lhs, Operand(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
index 8aa9122f51..7cc1e1808c 100644
--- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
+++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
@@ -113,6 +113,11 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ JumpIf(cc, left, Operand(right), target, distance);
+}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
@@ -354,8 +359,12 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld(output, FieldMemOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ Lhu(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ Lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
@@ -425,6 +434,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Daddu(lhs, lhs, Operand(rhs));
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ And(output, lhs, Operand(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
index 2058cd7ff3..e11ab1d22c 100644
--- a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
+++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
@@ -20,7 +20,7 @@ static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
#ifdef DEBUG
inline bool Clobbers(Register target, MemOperand op) {
- return op.rb() == target || op.rx() == target;
+ return op.rb() == target || op.ra() == target;
}
#endif
} // namespace detail
@@ -108,30 +108,57 @@ inline internal::Condition AsMasmCondition(Condition cond) {
}
}
-namespace detail {
+inline bool IsSignedCondition(Condition cond) {
+ switch (cond) {
+ case Condition::kEqual:
+ case Condition::kNotEqual:
+ case Condition::kLessThan:
+ case Condition::kGreaterThan:
+ case Condition::kLessThanEqual:
+ case Condition::kGreaterThanEqual:
+ case Condition::kOverflow:
+ case Condition::kNoOverflow:
+ case Condition::kZero:
+ case Condition::kNotZero:
+ return true;
-#ifdef DEBUG
-inline bool Clobbers(Register target, MemOperand op) {
- UNIMPLEMENTED();
- return false;
+ case Condition::kUnsignedLessThan:
+ case Condition::kUnsignedGreaterThan:
+ case Condition::kUnsignedLessThanEqual:
+ case Condition::kUnsignedGreaterThanEqual:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
}
-#endif
-} // namespace detail
+#define __ assm->
+// ppc helper
+static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
+ Register rhs, Label* target) {
+ if (IsSignedCondition(cc)) {
+ __ CmpS64(lhs, rhs);
+ } else {
+ __ CmpU64(lhs, rhs);
+ }
+ __ b(AsMasmCondition(cc), target);
+}
+#undef __
#define __ masm_->
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
- UNIMPLEMENTED();
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
void BaselineAssembler::RegisterFrameAddress(
interpreter::Register interpreter_register, Register rscratch) {
- UNIMPLEMENTED();
+ return __ AddS64(
+ rscratch, fp,
+ Operand(interpreter_register.ToOperand() * kSystemPointerSize));
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
- UNIMPLEMENTED();
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -140,108 +167,203 @@ void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() {
// NOP on arm.
- UNIMPLEMENTED();
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ b(target);
}
+
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ JumpIfRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ JumpIfNotRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ JumpIfSmi(value, target);
+}
+
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ ASM_CODE_COMMENT(masm_);
+ JumpIf(cc, left, Operand(right), target, distance);
}
+
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ JumpIfNotSmi(value, target);
}
-void BaselineAssembler::CallBuiltin(Builtin builtin) { UNIMPLEMENTED(); }
+void BaselineAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative call.
+ __ CallBuiltin(builtin, al);
+ } else {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
+ }
+}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("tail call", builtin));
- UNIMPLEMENTED();
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative call.
+ __ TailCallBuiltin(builtin);
+ } else {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
+ }
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ AndU64(r0, value, Operand(mask), ip, SetRC);
+ __ b(AsMasmCondition(cc), target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ if (IsSignedCondition(cc)) {
+ __ CmpS64(lhs, rhs, r0);
+ } else {
+ __ CmpU64(lhs, rhs, r0);
+ }
+ __ b(AsMasmCondition(cc), target);
}
+
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
Register map, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ LoadMap(map, object);
+ __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ JumpIf(cc, type, Operand(instance_type), target);
}
+
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ CompareObjectType(map, type, type, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ JumpIf(cc, type, Operand(instance_type), target);
}
+
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ LoadU64(tmp, operand);
+ JumpIfHelper(masm_, cc, value, tmp, target);
}
+
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ AssertSmi(value);
+ __ LoadSmiLiteral(r0, smi);
+ JumpIfHelper(masm_, cc, value, r0, target);
}
+
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ JumpIfHelper(masm_, cc, lhs, rhs, target);
}
+
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ LoadU64(r0, operand);
+ JumpIfHelper(masm_, cc, value, r0, target);
}
+
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ LoadU64(r0, operand);
+ JumpIfHelper(masm_, cc, r0, value, target);
}
+
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ JumpIf(cc, value, Operand(byte), target);
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ Move(RegisterFrameOperand(output), source);
}
+
void BaselineAssembler::Move(Register output, TaggedIndex value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ mov(output, Operand(value.ptr()));
}
+
void BaselineAssembler::Move(MemOperand output, Register source) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ StoreU64(source, output);
}
+
void BaselineAssembler::Move(Register output, ExternalReference reference) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ Move(output, reference);
}
+
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ Move(output, value);
}
+
void BaselineAssembler::Move(Register output, int32_t value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ mov(output, Operand(value));
}
+
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ mr(output, source);
}
+
void BaselineAssembler::MoveSmi(Register output, Register source) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ mr(output, source);
}
namespace detail {
@@ -250,7 +372,8 @@ template <typename Arg>
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Arg arg) {
- UNIMPLEMENTED();
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
return reg;
}
inline Register ToRegister(BaselineAssembler* basm,
@@ -355,33 +478,58 @@ void BaselineAssembler::Pop(T... registers) {
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
}
+
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ LoadTaggedSignedField(output, FieldMemOperand(source, offset), r0);
}
+
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
- UNIMPLEMENTED();
+
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ ASM_CODE_COMMENT(masm_);
+ __ LoadU16(output, FieldMemOperand(source, offset), r0);
}
+
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
+ ASM_CODE_COMMENT(masm_);
+ __ LoadU8(output, FieldMemOperand(source, offset), r0);
+}
+
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ LoadSmiLiteral(tmp, value);
+ __ StoreTaggedField(tmp, FieldMemOperand(target, offset), r0);
}
+
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ Register scratch = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(target, value, scratch));
+ __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
+ __ RecordWriteField(target, offset, value, scratch, kLRHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
- UNIMPLEMENTED();
+ ASM_CODE_COMMENT(masm_);
+ __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
@@ -401,6 +549,10 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
UNIMPLEMENTED();
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ AndU32(output, lhs, Operand(rhs));
+}
+
#undef __
#define __ basm.
diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
index 7aef7d138e..d7b9566a16 100644
--- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -112,7 +112,11 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
-
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ JumpIf(cc, left, Operand(right), target, distance);
+}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
@@ -347,8 +351,12 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ Lhu(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ Lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
@@ -424,6 +432,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
}
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ And(output, lhs, Operand(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
index 705e7bbd85..628fcc463a 100644
--- a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
+++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
@@ -170,26 +170,44 @@ void BaselineAssembler::JumpTarget() {
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ ASM_CODE_COMMENT(masm_);
__ b(target);
}
+
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ JumpIfRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ JumpIfNotRoot(value, index, target);
}
+
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ JumpIfSmi(value, target);
}
+
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ ASM_CODE_COMMENT(masm_);
+ JumpIf(cc, left, Operand(right), target, distance);
+}
+
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
if (masm()->options().short_builtin_calls) {
// Generate pc-relative call.
__ CallBuiltin(builtin);
@@ -217,12 +235,14 @@ void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ AndP(r0, value, Operand(mask));
__ b(AsMasmCondition(cc), target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
if (IsSignedCondition(cc)) {
__ CmpS64(lhs, rhs);
} else {
@@ -235,15 +255,18 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
Register map, Label* target,
Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ LoadMap(map, object);
__ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
JumpIf(cc, type, Operand(instance_type), target);
}
+
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
@@ -254,9 +277,11 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
JumpIf(cc, type, Operand(instance_type), target);
}
+
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ LoadU64(tmp, operand);
@@ -265,55 +290,77 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ AssertSmi(value);
__ LoadSmiLiteral(r0, smi);
JumpIfHelper(masm_, cc, value, r0, target);
}
+
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ AssertSmi(lhs);
__ AssertSmi(rhs);
JumpIfHelper(masm_, cc, lhs, rhs, target);
}
+
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ LoadU64(r0, operand);
JumpIfHelper(masm_, cc, value, r0, target);
}
+
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
__ LoadU64(r0, operand);
JumpIfHelper(masm_, cc, r0, value, target);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
+ ASM_CODE_COMMENT(masm_);
JumpIf(cc, value, Operand(byte), target);
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
Move(RegisterFrameOperand(output), source);
}
+
void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ ASM_CODE_COMMENT(masm_);
__ mov(output, Operand(value.ptr()));
}
+
void BaselineAssembler::Move(MemOperand output, Register source) {
+ ASM_CODE_COMMENT(masm_);
__ StoreU64(source, output);
}
+
void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ ASM_CODE_COMMENT(masm_);
__ Move(output, reference);
}
+
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ ASM_CODE_COMMENT(masm_);
__ Move(output, value);
}
+
void BaselineAssembler::Move(Register output, int32_t value) {
+ ASM_CODE_COMMENT(masm_);
__ mov(output, Operand(value));
}
+
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ ASM_CODE_COMMENT(masm_);
__ mov(output, source);
}
+
void BaselineAssembler::MoveSmi(Register output, Register source) {
+ ASM_CODE_COMMENT(masm_);
__ mov(output, source);
}
@@ -429,20 +476,34 @@ void BaselineAssembler::Pop(T... registers) {
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
+ ASM_CODE_COMMENT(masm_);
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
}
+
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
+ ASM_CODE_COMMENT(masm_);
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
+ ASM_CODE_COMMENT(masm_);
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ ASM_CODE_COMMENT(masm_);
+ __ LoadU16(output, FieldMemOperand(source, offset));
+}
+
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
+ ASM_CODE_COMMENT(masm_);
__ LoadU8(output, FieldMemOperand(source, offset));
}
+
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ASM_CODE_COMMENT(masm_);
@@ -451,6 +512,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ LoadSmiLiteral(tmp, value);
__ StoreTaggedField(tmp, FieldMemOperand(target, offset), r0);
}
+
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
@@ -461,6 +523,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, scratch, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
}
+
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
@@ -523,6 +586,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
}
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ __ AndP(output, lhs, Operand(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index 594b794672..275ad592dd 100644
--- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -195,6 +195,14 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
__ SmiCompare(lhs, rhs);
__ j(AsMasmCondition(cc), target, distance);
}
+
+void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
+ Label* target,
+ Label::Distance distance) {
+ __ cmpq(left, Immediate(right));
+ __ j(AsMasmCondition(cc), target, distance);
+}
+
// cmp_tagged
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -323,7 +331,7 @@ void BaselineAssembler::PushReverse(T... vals) {
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
- ITERATE_PACK(__ Pop(registers));
+ (__ Pop(registers), ...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
@@ -338,8 +346,12 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
-void BaselineAssembler::LoadByteField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
+ Register source, int offset) {
+ __ movzxwq(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::LoadWord8Field(Register output, Register source,
+ int offset) {
__ movb(output, FieldOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
@@ -402,6 +414,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
}
}
+void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
+ Move(output, lhs);
+ __ andq(output, Immediate(rhs));
+}
+
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 5aeac1f179..3077cf2131 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -764,6 +764,57 @@ Handle<AccessorInfo> Accessors::MakeBoundFunctionNameInfo(Isolate* isolate) {
}
//
+// Accessors::WrappedFunctionLength
+//
+
+void Accessors::WrappedFunctionLengthGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kBoundFunctionLengthGetter);
+ HandleScope scope(isolate);
+ Handle<JSWrappedFunction> function =
+ Handle<JSWrappedFunction>::cast(Utils::OpenHandle(*info.Holder()));
+
+ int length = 0;
+ if (!JSWrappedFunction::GetLength(isolate, function).To(&length)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+ Handle<Object> result(Smi::FromInt(length), isolate);
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+Handle<AccessorInfo> Accessors::MakeWrappedFunctionLengthInfo(
+ Isolate* isolate) {
+ return MakeAccessor(isolate, isolate->factory()->length_string(),
+ &WrappedFunctionLengthGetter, &ReconfigureToDataProperty);
+}
+
+//
+// Accessors::WrappedFunctionName
+//
+
+void Accessors::WrappedFunctionNameGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kWrappedFunctionNameGetter);
+ HandleScope scope(isolate);
+ Handle<JSWrappedFunction> function =
+ Handle<JSWrappedFunction>::cast(Utils::OpenHandle(*info.Holder()));
+ Handle<Object> result;
+ if (!JSWrappedFunction::GetName(isolate, function).ToHandle(&result)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+Handle<AccessorInfo> Accessors::MakeWrappedFunctionNameInfo(Isolate* isolate) {
+ return MakeAccessor(isolate, isolate->factory()->name_string(),
+ &WrappedFunctionNameGetter, &ReconfigureToDataProperty);
+}
+
+//
// Accessors::ErrorStack
//
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index 27ff276821..b7580b01d6 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -44,7 +44,12 @@ class JavaScriptFrame;
kHasSideEffectToReceiver) \
V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
- V(_, string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver)
+ V(_, string_length, StringLength, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, wrapped_function_length, WrappedFunctionLength, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, wrapped_function_name, WrappedFunctionName, kHasNoSideEffect, \
+ kHasSideEffectToReceiver)
#define ACCESSOR_SETTER_LIST(V) \
V(ArrayLengthSetter) \
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index b8cfcd19d8..38a88c24ef 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -870,14 +870,14 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ cmp_raw_immediate(actual_marker, static_cast<int>(expected_marker));
+ __ cmp_raw_immediate(actual_state, static_cast<int>(expected_state));
__ b(ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -925,30 +925,25 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- r0 : actual argument count
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a int32 containing a non-zero optimization
+ // -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ stop();
- }
+ DCHECK(!AreAliased(feedback_vector, r1, r3, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ stop();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1033,18 +1028,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ ldr(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ tst(
optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ b(ne, has_optimized_code_or_marker);
+ Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ b(ne, has_optimized_code_or_state);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1055,21 +1050,36 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Label maybe_has_optimized_code;
// Check if optimized code is available
__ tst(optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedMarker));
+ Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
__ b(eq, &maybe_has_optimized_code);
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
- __ ldr(optimization_marker,
+ __ ldr(tiering_state,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
}
+namespace {
+
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
+ Register scratch) {
+ // Reset the bytecode age and OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ mov(scratch, Operand(0));
+ __ str(scratch,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
+}
+
+} // namespace
+
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
@@ -1093,17 +1103,17 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check the tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = no_reg;
{
UseScratchRegisterScope temps(masm);
- // optimization_state will be used only in |has_optimized_code_or_marker|
+ // optimization_state will be used only in |has_optimized_code_or_state|
// and outside it can be reused.
optimization_state = temps.Acquire();
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
}
// Increment invocation count for the function.
@@ -1140,22 +1150,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{
UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ mov(scratch, Operand(0));
- __ strh(scratch,
- FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, bytecodeArray, temps.Acquire());
}
-
__ Push(argc, bytecodeArray);
// Baseline code frames store the feedback vector where interpreter would
@@ -1194,7 +1192,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
UseScratchRegisterScope temps(masm);
@@ -1274,9 +1272,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &push_stack_frame);
Register optimization_state = r4;
- Label has_optimized_code_or_marker;
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+ Label has_optimized_code_or_state;
+ LoadTieringStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
@@ -1295,15 +1293,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ mov(r9, Operand(0));
- __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r9);
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@@ -1421,7 +1411,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
@@ -1441,10 +1431,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r8, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &install_baseline_code);
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check the tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ mov(r2, kInterpreterBytecodeArrayRegister);
@@ -1830,7 +1820,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
@@ -3357,17 +3347,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
static constexpr int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
- __ mov(r2, Operand(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
- __ mov(r3, lr);
- __ add(r4, sp, Operand(kSavedRegistersAreaSize));
- __ sub(r4, fp, r4);
+ __ mov(r2, lr);
+ __ add(r3, sp, Operand(kSavedRegistersAreaSize));
+ __ sub(r3, fp, r3);
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(6);
+ __ PrepareCallCFunction(5);
__ mov(r0, Operand(0));
Label context_check;
__ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
@@ -3375,15 +3364,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(r1, Operand(static_cast<int>(deopt_kind)));
- // r2: bailout id already loaded.
- // r3: code address or 0 already loaded.
- __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ Move(r5, ExternalReference::isolate_address(isolate));
- __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
+ // r2: code address or 0 already loaded.
+ // r3: Fp-to-sp delta already loaded.
+ __ Move(r4, ExternalReference::isolate_address(isolate));
+ __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register r0 and get the input
@@ -3545,10 +3533,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3671,15 +3655,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
- // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
- // Sparkplug here.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
+ // disarm Sparkplug here.
UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ mov(scratch, Operand(0));
- __ strh(scratch,
- FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
+ temps.Acquire());
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 896115b3e7..7bfd4f8190 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -1043,14 +1043,14 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ DropArguments(params_size);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ CompareAndBranch(actual_marker, Operand(static_cast<int>(expected_marker)),
+ __ CompareAndBranch(actual_state, Operand(static_cast<int>(expected_state)),
ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -1111,30 +1111,25 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- x0 : actual argument count
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : int32 containing non-zero optimization marker.
+ // -- tiering_state : int32 containing non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
- DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ Unreachable();
- }
+ DCHECK(!AreAliased(feedback_vector, x1, x3, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ Unreachable();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1215,18 +1210,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ Ldr(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ TestAndBranchIfAnySet(
optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
- has_optimized_code_or_marker);
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask,
+ has_optimized_code_or_state);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1237,12 +1232,12 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestAndBranchIfAllClear(optimization_state,
- FeedbackVector::kHasCompileOptimizedMarker,
+ FeedbackVector::kTieringStateIsAnyRequestMask,
&maybe_has_optimized_code);
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = x7;
@@ -1253,6 +1248,20 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
}
+namespace {
+
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
+ Register bytecode_array) {
+ // Reset the bytecode age and OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Str(wzr,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
+}
+
+} // namespace
+
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
@@ -1275,11 +1284,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check the tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = temps.AcquireW();
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
// Increment invocation count for the function.
{
@@ -1315,16 +1324,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ Strh(wzr, FieldMemOperand(bytecode_array,
- BytecodeArray::kOsrLoopNestingLevelOffset));
-
+ ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
@@ -1368,7 +1368,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
}
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
@@ -1449,11 +1449,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check the tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = w7;
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
@@ -1474,15 +1474,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(fp, sp);
__ Push(cp, closure);
- // Reset code age.
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
@@ -1609,7 +1601,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
@@ -1631,10 +1623,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &install_baseline_code);
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check the tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ Move(x2, kInterpreterBytecodeArrayRegister);
@@ -2082,7 +2074,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
@@ -3896,10 +3888,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Floating point registers are saved on the stack above core registers.
const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
- Register bailout_id = x2;
- Register code_object = x3;
- Register fp_to_sp = x4;
- __ Mov(bailout_id, Deoptimizer::kFixedExitSizeMarker);
+ Register code_object = x2;
+ Register fp_to_sp = x3;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
@@ -3920,15 +3910,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Mov(x1, static_cast<int>(deopt_kind));
// Following arguments are already loaded:
- // - x2: bailout id
- // - x3: code object address
- // - x4: fp-to-sp delta
- __ Mov(x5, ExternalReference::isolate_address(isolate));
+ // - x2: code object address
+ // - x3: fp-to-sp delta
+ __ Mov(x4, ExternalReference::isolate_address(isolate));
{
// Call Deoptimizer::New().
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register x0.
@@ -4063,10 +4052,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -4194,11 +4179,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister, padreg);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
- // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
- // Sparkplug here.
- __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
+ // disarm Sparkplug here.
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
diff --git a/deps/v8/src/builtins/arraybuffer.tq b/deps/v8/src/builtins/arraybuffer.tq
index f033048abc..611cabc4ed 100644
--- a/deps/v8/src/builtins/arraybuffer.tq
+++ b/deps/v8/src/builtins/arraybuffer.tq
@@ -47,8 +47,11 @@ transitioning javascript builtin ArrayBufferPrototypeGetMaxByteLength(
// 6. Else,
// a. Let length be O.[[ArrayBufferByteLength]].
// 7. Return F(length);
- dcheck(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
- return Convert<Number>(o.max_byte_length);
+
+ if (IsResizableArrayBuffer(o)) {
+ return Convert<Number>(o.max_byte_length);
+ }
+ return Convert<Number>(o.byte_length);
}
// #sec-get-arraybuffer.prototype.resizable
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index dbcc05de28..c34d9f6884 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -402,7 +402,7 @@ extern enum MessageTemplate {
kRegExpNonRegExp,
kRegExpNonObject,
kPromiseNonCallable,
- kNotAPromise,
+ kPromiseNewTargetUndefined,
kResolverNotAFunction,
kTooManyElementsInPromiseCombinator,
kToRadixFormatRange,
@@ -672,7 +672,7 @@ extern transitioning builtin SetProperty(implicit context: Context)(
JSAny, JSAny, JSAny): JSAny;
extern transitioning builtin SetPropertyIgnoreAttributes(
implicit context: Context)(JSObject, String, JSAny, Smi): JSAny;
-extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
+extern transitioning builtin CreateDataProperty(implicit context: Context)(
JSAny, JSAny, JSAny): JSAny;
extern transitioning builtin DeleteProperty(implicit context: Context)(
JSAny, JSAny | PrivateSymbol, LanguageModeSmi): Boolean;
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 8acf5f8220..5ce561e1d2 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -267,7 +267,6 @@ V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor(
Object result;
{
HandleScope scope(isolate);
- LOG(isolate, ApiObjectAccess("call non-function", obj));
FunctionCallbackArguments custom(
isolate, call_data.data(), constructor, obj, new_target,
args.address_of_first_argument(), args.length() - 1);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index d8669b85a8..020a7f4dd8 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -1564,22 +1564,6 @@ inline Handle<OrderedHashMap> AddValueToKeyedGroup(
return groups;
}
-inline ElementsKind DeduceKeyedGroupElementsKind(ElementsKind kind) {
- // The keyed groups are array lists with fast elements.
- // Double elements are stored as HeapNumbers in the keyed group elements
- // so that we don't need to cast all the keyed groups when switching from
- // fast path to the generic path.
- // TODO(v8:12499) add unboxed double elements support
- switch (kind) {
- case ElementsKind::PACKED_SMI_ELEMENTS: {
- return ElementsKind::PACKED_SMI_ELEMENTS;
- }
- default: {
- return ElementsKind::PACKED_ELEMENTS;
- }
- }
-}
-
inline bool IsFastArray(Handle<JSReceiver> object) {
Isolate* isolate = object->GetIsolate();
if (isolate->force_slow_path()) return false;
@@ -1659,7 +1643,10 @@ inline MaybeHandle<OrderedHashMap> GenericArrayGroupBy(
template <GroupByMode mode>
inline MaybeHandle<OrderedHashMap> FastArrayGroupBy(
Isolate* isolate, Handle<JSArray> array, Handle<Object> callbackfn,
- Handle<OrderedHashMap> groups, double len) {
+ Handle<OrderedHashMap> groups, double len,
+ ElementsKind* result_elements_kind) {
+ DCHECK_NOT_NULL(result_elements_kind);
+
Handle<Map> original_map = Handle<Map>(array->map(), isolate);
uint32_t uint_len = static_cast<uint32_t>(len);
ElementsAccessor* accessor = array->GetElementsAccessor();
@@ -1667,7 +1654,8 @@ inline MaybeHandle<OrderedHashMap> FastArrayGroupBy(
// 4. Let k be 0.
// 6. Repeat, while k < len
for (InternalIndex k : InternalIndex::Range(uint_len)) {
- if (!CheckArrayMapNotModified(array, original_map)) {
+ if (!CheckArrayMapNotModified(array, original_map) ||
+ k.as_uint32() >= static_cast<uint32_t>(array->length().Number())) {
return GenericArrayGroupBy<mode>(isolate, array, callbackfn, groups,
k.as_uint32(), len);
}
@@ -1709,6 +1697,17 @@ inline MaybeHandle<OrderedHashMap> FastArrayGroupBy(
// done by the loop.
}
+ // When staying on the fast path, we can deduce a more specific results
+ // ElementsKind for the keyed groups based on the input ElementsKind.
+ //
+ // Double elements are stored as HeapNumbers in the keyed group elements
+ // so that we don't need to cast all the keyed groups when switching from
+ // fast path to the generic path.
+ // TODO(v8:12499) add unboxed double elements support
+ if (array->GetElementsKind() == ElementsKind::PACKED_SMI_ELEMENTS) {
+ *result_elements_kind = ElementsKind::PACKED_SMI_ELEMENTS;
+ }
+
return groups;
}
@@ -1738,16 +1737,13 @@ BUILTIN(ArrayPrototypeGroupBy) {
// 5. Let groups be a new empty List.
Handle<OrderedHashMap> groups = isolate->factory()->NewOrderedHashMap();
- // Elements kind of the array for grouped elements kind deduction.
- ElementsKind elements_kind = ElementsKind::NO_ELEMENTS;
+ ElementsKind result_elements_kind = ElementsKind::PACKED_ELEMENTS;
if (IsFastArray(O)) {
Handle<JSArray> array = Handle<JSArray>::cast(O);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, groups,
- FastArrayGroupBy<GroupByMode::kToObject>(isolate, array, callbackfn,
- groups, len));
- // Get array's elements kind after called into javascript.
- elements_kind = array->GetElementsKind();
+ FastArrayGroupBy<GroupByMode::kToObject>(
+ isolate, array, callbackfn, groups, len, &result_elements_kind));
} else {
// 4. Let k be 0.
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1758,8 +1754,7 @@ BUILTIN(ArrayPrototypeGroupBy) {
// 7. Let obj be ! OrdinaryObjectCreate(null).
Handle<JSObject> obj = isolate->factory()->NewJSObjectWithNullProto();
- ElementsKind result_elements_kind =
- DeduceKeyedGroupElementsKind(elements_kind);
+
// 8. For each Record { [[Key]], [[Elements]] } g of groups, do
for (InternalIndex entry : groups->IterateEntries()) {
Handle<Name> key = Handle<Name>(Name::cast(groups->KeyAt(entry)), isolate);
@@ -1804,16 +1799,13 @@ BUILTIN(ArrayPrototypeGroupByToMap) {
// 5. Let groups be a new empty List.
Handle<OrderedHashMap> groups = isolate->factory()->NewOrderedHashMap();
- // Elements kind of the array for grouped elements kind deduction.
- ElementsKind elements_kind = ElementsKind::NO_ELEMENTS;
+ ElementsKind result_elements_kind = ElementsKind::PACKED_ELEMENTS;
if (IsFastArray(O)) {
Handle<JSArray> array = Handle<JSArray>::cast(O);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, groups,
- FastArrayGroupBy<GroupByMode::kToMap>(isolate, array, callbackfn,
- groups, len));
- // Get array's elements kind after called into javascript.
- elements_kind = array->GetElementsKind();
+ FastArrayGroupBy<GroupByMode::kToMap>(
+ isolate, array, callbackfn, groups, len, &result_elements_kind));
} else {
// 4. Let k be 0.
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1825,8 +1817,6 @@ BUILTIN(ArrayPrototypeGroupByToMap) {
// 7. Let map be ! Construct(%Map%).
Handle<JSMap> map = isolate->factory()->NewJSMap();
Handle<OrderedHashMap> map_table = isolate->factory()->NewOrderedHashMap();
- ElementsKind result_elements_kind =
- DeduceKeyedGroupElementsKind(elements_kind);
// 8. For each Record { [[Key]], [[Elements]] } g of groups, do
for (InternalIndex entry : groups->IterateEntries()) {
Handle<Object> key = Handle<Object>(groups->KeyAt(entry), isolate);
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index ed0110ba2c..39837f4a32 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -37,16 +37,6 @@ namespace internal {
namespace {
-bool RoundUpToPageSize(size_t byte_length, size_t page_size,
- size_t max_allowed_byte_length, size_t* pages) {
- size_t bytes_wanted = RoundUp(byte_length, page_size);
- if (bytes_wanted > max_allowed_byte_length) {
- return false;
- }
- *pages = bytes_wanted / page_size;
- return true;
-}
-
Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
Handle<Object> max_length, InitializedFlag initialized) {
@@ -91,21 +81,12 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
}
- size_t page_size = AllocatePageSize();
- size_t initial_pages;
- if (!RoundUpToPageSize(byte_length, page_size,
- JSArrayBuffer::kMaxByteLength, &initial_pages)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
+ size_t page_size, initial_pages, max_pages;
+ MAYBE_RETURN(JSArrayBuffer::GetResizableBackingStorePageConfiguration(
+ isolate, byte_length, max_byte_length, kThrowOnError,
+ &page_size, &initial_pages, &max_pages),
+ ReadOnlyRoots(isolate).exception());
- size_t max_pages;
- if (!RoundUpToPageSize(max_byte_length, page_size,
- JSArrayBuffer::kMaxByteLength, &max_pages)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
- }
constexpr bool kIsWasmMemory = false;
backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
isolate, byte_length, max_byte_length, page_size, initial_pages,
@@ -435,7 +416,7 @@ static Object ResizeHelper(BuiltinArguments args, Isolate* isolate,
// [GSAB] Let hostHandled be ? HostGrowArrayBuffer(O, newByteLength).
// If hostHandled is handled, return undefined.
- // TODO(v8:11111): Wasm integration.
+ // TODO(v8:11111, v8:12746): Wasm integration.
if (!is_shared) {
// [RAB] Let oldBlock be O.[[ArrayBufferData]].
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 1703e43447..68b66f1404 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -104,7 +104,8 @@ BUILTIN(CallSitePrototypeGetPosition) {
BUILTIN(CallSitePrototypeGetPromiseIndex) {
HandleScope scope(isolate);
CHECK_CALLSITE(frame, "getPromiseIndex");
- if (!frame->IsPromiseAll() && !frame->IsPromiseAny()) {
+ if (!frame->IsPromiseAll() && !frame->IsPromiseAny() &&
+ !frame->IsPromiseAllSettled()) {
return ReadOnlyRoots(isolate).null_value();
}
return Smi::FromInt(CallSiteInfo::GetSourcePosition(frame));
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index c4e81c71fd..b3e053dcfd 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -24,11 +24,6 @@ namespace internal {
namespace {
-const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
- "Thu", "Fri", "Sat"};
-const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
-
// ES6 section 20.3.1.16 Date Time String Format
double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
str = String::Flatten(isolate, str);
@@ -61,55 +56,6 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
return DateCache::TimeClip(date);
}
-enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
-
-using DateBuffer = base::SmallVector<char, 128>;
-
-template <class... Args>
-DateBuffer FormatDate(const char* format, Args... args) {
- DateBuffer buffer;
- SmallStringOptimizedAllocator<DateBuffer::kInlineSize> allocator(&buffer);
- StringStream sstream(&allocator);
- sstream.Add(format, args...);
- buffer.resize_no_init(sstream.length());
- return buffer;
-}
-
-// ES6 section 20.3.4.41.1 ToDateString(tv)
-DateBuffer ToDateString(double time_val, DateCache* date_cache,
- ToDateStringMode mode = kDateAndTime) {
- if (std::isnan(time_val)) {
- return FormatDate("Invalid Date");
- }
- int64_t time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = date_cache->ToLocal(time_ms);
- int year, month, day, weekday, hour, min, sec, ms;
- date_cache->BreakDownTime(local_time_ms, &year, &month, &day, &weekday, &hour,
- &min, &sec, &ms);
- int timezone_offset = -date_cache->TimezoneOffset(time_ms);
- int timezone_hour = std::abs(timezone_offset) / 60;
- int timezone_min = std::abs(timezone_offset) % 60;
- const char* local_timezone = date_cache->LocalTimezone(time_ms);
- switch (mode) {
- case kDateOnly:
- return FormatDate((year < 0) ? "%s %s %02d %05d" : "%s %s %02d %04d",
- kShortWeekDays[weekday], kShortMonths[month], day,
- year);
- case kTimeOnly:
- return FormatDate("%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
- (timezone_offset < 0) ? '-' : '+', timezone_hour,
- timezone_min, local_timezone);
- case kDateAndTime:
- return FormatDate(
- (year < 0) ? "%s %s %02d %05d %02d:%02d:%02d GMT%c%02d%02d (%s)"
- : "%s %s %02d %04d %02d:%02d:%02d GMT%c%02d%02d (%s)",
- kShortWeekDays[weekday], kShortMonths[month], day, year, hour, min,
- sec, (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
- local_timezone);
- }
- UNREACHABLE();
-}
-
Object SetLocalDateValue(Isolate* isolate, Handle<JSDate> date,
double time_val) {
if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
@@ -128,7 +74,8 @@ BUILTIN(DateConstructor) {
HandleScope scope(isolate);
if (args.new_target()->IsUndefined(isolate)) {
double const time_val = JSDate::CurrentTimeValue(isolate);
- DateBuffer buffer = ToDateString(time_val, isolate->date_cache());
+ DateBuffer buffer = ToDateString(time_val, isolate->date_cache(),
+ ToDateStringMode::kLocalDateAndTime);
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(base::VectorOf(buffer)));
}
@@ -720,7 +667,8 @@ BUILTIN(DatePrototypeToDateString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
DateBuffer buffer =
- ToDateString(date->value().Number(), isolate->date_cache(), kDateOnly);
+ ToDateString(date->value().Number(), isolate->date_cache(),
+ ToDateStringMode::kLocalDate);
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(base::VectorOf(buffer)));
}
@@ -757,7 +705,8 @@ BUILTIN(DatePrototypeToString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
DateBuffer buffer =
- ToDateString(date->value().Number(), isolate->date_cache());
+ ToDateString(date->value().Number(), isolate->date_cache(),
+ ToDateStringMode::kLocalDateAndTime);
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(base::VectorOf(buffer)));
}
@@ -767,7 +716,8 @@ BUILTIN(DatePrototypeToTimeString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
DateBuffer buffer =
- ToDateString(date->value().Number(), isolate->date_cache(), kTimeOnly);
+ ToDateString(date->value().Number(), isolate->date_cache(),
+ ToDateStringMode::kLocalTime);
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(base::VectorOf(buffer)));
}
@@ -838,21 +788,11 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
BUILTIN(DatePrototypeToUTCString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toUTCString");
- double const time_val = date->value().Number();
- if (std::isnan(time_val)) {
- return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
- }
- char buffer[128];
- int64_t time_ms = static_cast<int64_t>(time_val);
- int year, month, day, weekday, hour, min, sec, ms;
- isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
- &hour, &min, &sec, &ms);
- SNPrintF(base::ArrayVector(buffer),
- (year < 0) ? "%s, %02d %s %05d %02d:%02d:%02d GMT"
- : "%s, %02d %s %04d %02d:%02d:%02d GMT",
- kShortWeekDays[weekday], day, kShortMonths[month], year, hour, min,
- sec);
- return *isolate->factory()->NewStringFromAsciiChecked(buffer);
+ DateBuffer buffer =
+ ToDateString(date->value().Number(), isolate->date_cache(),
+ ToDateStringMode::kUTCDateAndTime);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromUtf8(base::VectorOf(buffer)));
}
// ES6 section B.2.4.1 Date.prototype.getYear ( )
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 2f72e0a15a..6b43659d99 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -45,7 +45,6 @@ namespace internal {
#define BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \
/* Deoptimization entries. */ \
ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
- ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
\
/* GC write barrier. */ \
@@ -165,7 +164,6 @@ namespace internal {
ASM(ResumeGeneratorTrampoline, ResumeGenerator) \
\
/* String helpers */ \
- TFC(StringCodePointAt, StringAt) \
TFC(StringFromCodePointAt, StringAtAsString) \
TFC(StringEqual, Compare) \
TFC(StringGreaterThan, Compare) \
@@ -870,9 +868,9 @@ namespace internal {
\
/* ShadowRealm */ \
CPP(ShadowRealmConstructor) \
+ TFS(ShadowRealmGetWrappedValue, kCreationContext, kTargetContext, kValue) \
CPP(ShadowRealmPrototypeEvaluate) \
CPP(ShadowRealmPrototypeImportValue) \
- TFS(ShadowRealmGetWrappedValue, kCreationContext, kValue) \
\
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
@@ -884,8 +882,8 @@ namespace internal {
kIndexOrFieldName) \
TFJ(AtomicsStore, kJSArgcReceiverSlots + 3, kReceiver, kArrayOrSharedStruct, \
kIndexOrFieldName, kValue) \
- TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
- kValue) \
+ TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, \
+ kArrayOrSharedStruct, kIndexOrFieldName, kValue) \
TFJ(AtomicsCompareExchange, kJSArgcReceiverSlots + 4, kReceiver, kArray, \
kIndex, kOldValue, kNewValue) \
TFJ(AtomicsAdd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
@@ -1063,7 +1061,7 @@ namespace internal {
TFC(GetProperty, GetProperty) \
TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \
TFS(SetProperty, kReceiver, kKey, kValue) \
- TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \
+ TFS(CreateDataProperty, kReceiver, kKey, kValue) \
ASM(MemCopyUint8Uint8, CCall) \
ASM(MemMove, CCall) \
\
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 7fb1d7d55f..84856beba8 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -206,72 +206,13 @@ Object DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, function,
isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
-
- // Setup the "length" property based on the "length" of the {target}.
- // If the targets length is the default JSFunction accessor, we can keep the
- // accessor that's installed by default on the JSBoundFunction. It lazily
- // computes the value from the underlying internal length.
- Handle<AccessorInfo> function_length_accessor =
- isolate->factory()->function_length_accessor();
- LookupIterator length_lookup(isolate, target,
- isolate->factory()->length_string(), target,
- LookupIterator::OWN);
- if (!target->IsJSFunction() ||
- length_lookup.state() != LookupIterator::ACCESSOR ||
- !length_lookup.GetAccessors().is_identical_to(function_length_accessor)) {
- Handle<Object> length(Smi::zero(), isolate);
- Maybe<PropertyAttributes> attributes =
- JSReceiver::GetPropertyAttributes(&length_lookup);
- if (attributes.IsNothing()) return ReadOnlyRoots(isolate).exception();
- if (attributes.FromJust() != ABSENT) {
- Handle<Object> target_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_length,
- Object::GetProperty(&length_lookup));
- if (target_length->IsNumber()) {
- length = isolate->factory()->NewNumber(std::max(
- 0.0, DoubleToInteger(target_length->Number()) - argv.length()));
- }
- }
- LookupIterator it(isolate, function, isolate->factory()->length_string(),
- function);
- DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, length, it.property_attributes()));
- }
-
- // Setup the "name" property based on the "name" of the {target}.
- // If the target's name is the default JSFunction accessor, we can keep the
- // accessor that's installed by default on the JSBoundFunction. It lazily
- // computes the value from the underlying internal name.
- Handle<AccessorInfo> function_name_accessor =
- isolate->factory()->function_name_accessor();
- LookupIterator name_lookup(isolate, target, isolate->factory()->name_string(),
- target);
- if (!target->IsJSFunction() ||
- name_lookup.state() != LookupIterator::ACCESSOR ||
- !name_lookup.GetAccessors().is_identical_to(function_name_accessor) ||
- (name_lookup.IsFound() && !name_lookup.HolderIsReceiver())) {
- Handle<Object> target_name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_name,
- Object::GetProperty(&name_lookup));
- Handle<String> name;
- if (target_name->IsString()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, name,
- Name::ToFunctionName(isolate, Handle<String>::cast(target_name)));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, name,
- isolate->factory()->NewConsString(isolate->factory()->bound__string(),
- name));
- } else {
- name = isolate->factory()->bound__string();
- }
- LookupIterator it(isolate, function, isolate->factory()->name_string());
- DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, name, it.property_attributes()));
+ Maybe<bool> result =
+ JSFunctionOrBoundFunctionOrWrappedFunction::CopyNameAndLength(
+ isolate, function, target, isolate->factory()->bound__string(),
+ argv.length());
+ if (result.IsNothing()) {
+ DCHECK(isolate->has_pending_exception());
+ return ReadOnlyRoots(isolate).exception();
}
return *function;
}
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index a8b22f243c..f4ab999f44 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -839,7 +839,7 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
1, IndexAdvanceMode::kPost);
}
- CallBuiltin(Builtin::kSetPropertyInLiteral, context, target, key,
+ CallBuiltin(Builtin::kCreateDataProperty, context, target, key,
value);
Goto(&skip);
Bind(&skip);
@@ -1362,14 +1362,14 @@ TF_BUILTIN(SetProperty, CodeStubAssembler) {
// being initialized, and have not yet been made accessible to the user. Thus,
// any operation here should be unobservable until after the object has been
// returned.
-TF_BUILTIN(SetPropertyInLiteral, CodeStubAssembler) {
+TF_BUILTIN(CreateDataProperty, CodeStubAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
auto key = Parameter<Object>(Descriptor::kKey);
auto value = Parameter<Object>(Descriptor::kValue);
- KeyedStoreGenericGenerator::SetPropertyInLiteral(state(), context, receiver,
- key, value);
+ KeyedStoreGenericGenerator::CreateDataProperty(state(), context, receiver,
+ key, value);
}
TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index f9b5378cc9..6beed8b6b1 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -29,13 +29,13 @@ void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
GenerateTailCallToJSCode(code, function);
}
-void LazyBuiltinsAssembler::TailCallRuntimeIfMarkerEquals(
- TNode<Uint32T> marker, OptimizationMarker expected_marker,
+void LazyBuiltinsAssembler::TailCallRuntimeIfStateEquals(
+ TNode<Uint32T> state, TieringState expected_state,
Runtime::FunctionId function_id, TNode<JSFunction> function) {
Label no_match(this);
- GotoIfNot(Word32Equal(marker,
- Uint32Constant(static_cast<uint32_t>(expected_marker))),
- &no_match);
+ GotoIfNot(
+ Word32Equal(state, Uint32Constant(static_cast<uint32_t>(expected_state))),
+ &no_match);
GenerateTailCallToReturnedCode(function_id, function);
BIND(&no_match);
}
@@ -48,31 +48,29 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
LoadObjectField<Uint32T>(feedback_vector, FeedbackVector::kFlagsOffset);
// Fall through if no optimization trigger or optimized code.
- GotoIfNot(IsSetWord32(
- optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask),
- &fallthrough);
+ GotoIfNot(
+ IsSetWord32(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask),
+ &fallthrough);
GotoIfNot(IsSetWord32(optimization_state,
- FeedbackVector::kHasCompileOptimizedMarker),
+ FeedbackVector::kTieringStateIsAnyRequestMask),
&may_have_optimized_code);
- // TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
- // all these marker values there.
- TNode<Uint32T> marker =
- DecodeWord32<FeedbackVector::OptimizationMarkerBits>(optimization_state);
- TailCallRuntimeIfMarkerEquals(
- marker, OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent, function);
- TailCallRuntimeIfMarkerEquals(marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent, function);
- TailCallRuntimeIfMarkerEquals(
- marker, OptimizationMarker::kCompileMaglev_NotConcurrent,
- Runtime::kCompileMaglev_NotConcurrent, function);
- TailCallRuntimeIfMarkerEquals(marker,
- OptimizationMarker::kCompileMaglev_Concurrent,
- Runtime::kCompileMaglev_Concurrent, function);
+ // TODO(ishell): introduce Runtime::kHandleTieringState and check
+ // all these state values there.
+ TNode<Uint32T> state =
+ DecodeWord32<FeedbackVector::TieringStateBits>(optimization_state);
+ TailCallRuntimeIfStateEquals(state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous, function);
+ TailCallRuntimeIfStateEquals(state, TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent, function);
+ TailCallRuntimeIfStateEquals(state, TieringState::kRequestMaglev_Synchronous,
+ Runtime::kCompileMaglev_Synchronous, function);
+ TailCallRuntimeIfStateEquals(state, TieringState::kRequestMaglev_Concurrent,
+ Runtime::kCompileMaglev_Concurrent, function);
Unreachable();
BIND(&may_have_optimized_code);
@@ -101,15 +99,15 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
Comment("MaybeTailCallOptimizedCodeSlot:: GenerateTailCallToJSCode");
GenerateTailCallToJSCode(optimized_code, function);
- // Optimized code slot contains deoptimized code or code is cleared and
- // optimized code marker isn't updated. Evict the code, update the marker
- // and re-enter the closure's code.
+ // Optimized code slot contains deoptimized code, or the code is cleared
+ // and tiering state hasn't yet been updated. Evict the code, update the
+ // state and re-enter the closure's code.
BIND(&heal_optimized_code_slot);
GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, function);
}
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
+ // Fall-through if the optimized code cell is clear and the tiering state is
+ // kNone.
BIND(&fallthrough);
}
@@ -119,7 +117,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// Check the code object for the SFI. If SFI's code entry points to
// CompileLazy, then we need to lazy compile regardless of the function or
- // feedback vector marker.
+ // tiering state.
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
TVARIABLE(Uint16T, sfi_data_type);
@@ -131,6 +129,10 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// If feedback cell isn't initialized, compile function
GotoIf(IsUndefined(feedback_cell_value), &compile_function);
+ CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
+ isolate(), CompileLazy))));
+ StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
+
Label maybe_use_sfi_code(this);
// If there is no feedback, don't check for optimized code.
GotoIf(HasInstanceType(feedback_cell_value, CLOSURE_FEEDBACK_CELL_ARRAY_TYPE),
@@ -139,7 +141,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// If it isn't undefined or fixed array it must be a feedback vector.
CSA_DCHECK(this, IsFeedbackVector(feedback_cell_value));
- // Is there an optimization marker or optimized code in the feedback vector?
+ // Is there a tiering state or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
Goto(&maybe_use_sfi_code);
@@ -147,13 +149,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode.
BIND(&maybe_use_sfi_code);
- CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
- isolate(), CompileLazy))));
- StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
-
- Label tailcall_code(this);
- Label baseline(this);
-
+ Label tailcall_code(this), baseline(this);
TVARIABLE(CodeT, code);
// Check if we have baseline code.
@@ -172,8 +168,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
function));
});
Goto(&tailcall_code);
+
BIND(&tailcall_code);
- // Jump to the selected code entry.
GenerateTailCallToJSCode(code.value(), function);
BIND(&compile_function);
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h
index 623811663e..b7dcbb71d9 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.h
+++ b/deps/v8/src/builtins/builtins-lazy-gen.h
@@ -21,10 +21,10 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
TNode<JSFunction> function);
- void TailCallRuntimeIfMarkerEquals(TNode<Uint32T> marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id,
- TNode<JSFunction> function);
+ void TailCallRuntimeIfStateEquals(TNode<Uint32T> state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id,
+ TNode<JSFunction> function);
void MaybeTailCallOptimizedCodeSlot(TNode<JSFunction> function,
TNode<FeedbackVector> feedback_vector);
diff --git a/deps/v8/src/builtins/builtins-shadow-realms.cc b/deps/v8/src/builtins/builtins-shadow-realms.cc
index b39f570ef8..08b3f3ec31 100644
--- a/deps/v8/src/builtins/builtins-shadow-realms.cc
+++ b/deps/v8/src/builtins/builtins-shadow-realms.cc
@@ -59,9 +59,9 @@ BUILTIN(ShadowRealmConstructor) {
namespace {
// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue
-MaybeHandle<Object> GetWrappedValue(Isolate* isolate, Handle<Object> value,
+MaybeHandle<Object> GetWrappedValue(Isolate* isolate,
Handle<NativeContext> creation_context,
- Handle<NativeContext> target_context) {
+ Handle<Object> value) {
// 1. If Type(value) is Object, then
if (!value->IsJSReceiver()) {
// 2. Return value.
@@ -69,6 +69,8 @@ MaybeHandle<Object> GetWrappedValue(Isolate* isolate, Handle<Object> value,
}
// 1a. If IsCallable(value) is false, throw a TypeError exception.
if (!value->IsCallable()) {
+ // The TypeError thrown is created with creation Realm's TypeError
+ // constructor instead of the executing Realm's.
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewError(Handle<JSFunction>(creation_context->type_error_function(),
@@ -77,34 +79,8 @@ MaybeHandle<Object> GetWrappedValue(Isolate* isolate, Handle<Object> value,
{});
}
// 1b. Return ? WrappedFunctionCreate(callerRealm, value).
-
- // WrappedFunctionCreate
- // https://tc39.es/proposal-shadowrealm/#sec-wrappedfunctioncreate
-
- // The intermediate wrapped functions are not user-visible. And calling a
- // wrapped function won't cause a side effect in the creation realm.
- // Unwrap here to avoid nested unwrapping at the call site.
- if (value->IsJSWrappedFunction()) {
- Handle<JSWrappedFunction> target_wrapped =
- Handle<JSWrappedFunction>::cast(value);
- value = Handle<Object>(target_wrapped->wrapped_target_function(), isolate);
- }
-
- // 1. Let internalSlotsList be the internal slots listed in Table 2, plus
- // [[Prototype]] and [[Extensible]].
- // 2. Let wrapped be ! MakeBasicObject(internalSlotsList).
- // 3. Set wrapped.[[Prototype]] to
- // callerRealm.[[Intrinsics]].[[%Function.prototype%]].
- // 4. Set wrapped.[[Call]] as described in 2.1.
- // 5. Set wrapped.[[WrappedTargetFunction]] to Target.
- // 6. Set wrapped.[[Realm]] to callerRealm.
- // 7. Let result be CopyNameAndLength(wrapped, Target, "wrapped").
- // 8. If result is an Abrupt Completion, throw a TypeError exception.
- Handle<JSWrappedFunction> wrapped =
- isolate->factory()->NewJSWrappedFunction(creation_context, value);
-
- // 9. Return wrapped.
- return wrapped;
+ return JSWrappedFunction::Create(isolate, creation_context,
+ Handle<JSReceiver>::cast(value));
}
} // namespace
@@ -213,6 +189,7 @@ BUILTIN(ShadowRealmPrototypeEvaluate) {
}
if (result.is_null()) {
+ DCHECK(isolate->has_pending_exception());
Handle<Object> pending_exception =
Handle<Object>(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
@@ -225,7 +202,9 @@ BUILTIN(ShadowRealmPrototypeEvaluate) {
*factory->NewError(isolate->syntax_error_function(), message));
}
// 21. If result.[[Type]] is not normal, throw a TypeError exception.
- // TODO(v8:11989): provide a non-observable inspection.
+ // TODO(v8:11989): provide a non-observable inspection on the
+ // pending_exception to the newly created TypeError.
+ // https://github.com/tc39/proposal-shadowrealm/issues/353
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCallShadowRealmFunctionThrown));
}
@@ -233,8 +212,7 @@ BUILTIN(ShadowRealmPrototypeEvaluate) {
Handle<Object> wrapped_result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, wrapped_result,
- GetWrappedValue(isolate, result.ToHandleChecked(), caller_context,
- eval_context));
+ GetWrappedValue(isolate, caller_context, result.ToHandleChecked()));
return *wrapped_result;
}
diff --git a/deps/v8/src/builtins/builtins-shadowrealm-gen.cc b/deps/v8/src/builtins/builtins-shadowrealm-gen.cc
index 03bc854c9c..f65f611683 100644
--- a/deps/v8/src/builtins/builtins-shadowrealm-gen.cc
+++ b/deps/v8/src/builtins/builtins-shadowrealm-gen.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/objects/descriptor-array.h"
namespace v8 {
namespace internal {
@@ -15,25 +16,44 @@ class ShadowRealmBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- TNode<JSObject> AllocateJSWrappedFunction(TNode<Context> context);
+ TNode<JSObject> AllocateJSWrappedFunction(TNode<Context> context,
+ TNode<Object> target);
+ void CheckAccessor(TNode<DescriptorArray> array, TNode<IntPtrT> index,
+ TNode<Name> name, Label* bailout);
};
TNode<JSObject> ShadowRealmBuiltinsAssembler::AllocateJSWrappedFunction(
- TNode<Context> context) {
+ TNode<Context> context, TNode<Object> target) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(
LoadContextElement(native_context, Context::WRAPPED_FUNCTION_MAP_INDEX));
- return AllocateJSObjectFromMap(map);
+ TNode<JSObject> wrapped = AllocateJSObjectFromMap(map);
+ StoreObjectFieldNoWriteBarrier(
+ wrapped, JSWrappedFunction::kWrappedTargetFunctionOffset, target);
+ StoreObjectFieldNoWriteBarrier(wrapped, JSWrappedFunction::kContextOffset,
+ context);
+ return wrapped;
+}
+
+void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode<DescriptorArray> array,
+ TNode<IntPtrT> index,
+ TNode<Name> name,
+ Label* bailout) {
+ TNode<Name> key = LoadKeyByDescriptorEntry(array, index);
+ GotoIfNot(TaggedEqual(key, name), bailout);
+ TNode<Object> value = LoadValueByDescriptorEntry(array, index);
+ GotoIfNot(IsAccessorInfo(CAST(value)), bailout);
}
// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue
TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto creation_context = Parameter<Context>(Descriptor::kCreationContext);
+ auto target_context = Parameter<Context>(Descriptor::kTargetContext);
auto value = Parameter<Object>(Descriptor::kValue);
Label if_primitive(this), if_callable(this), unwrap(this), wrap(this),
- bailout(this, Label::kDeferred);
+ slow_wrap(this, Label::kDeferred), bailout(this, Label::kDeferred);
// 2. Return value.
GotoIf(TaggedIsSmi(value), &if_primitive);
@@ -64,28 +84,68 @@ TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) {
Goto(&wrap);
BIND(&wrap);
+ // Disallow wrapping of slow-mode functions. We need to figure out
+ // whether the length and name property are in the original state.
+ TNode<Map> map = LoadMap(CAST(target.value()));
+ GotoIf(IsDictionaryMap(map), &slow_wrap);
+
+ // Check whether the length and name properties are still present as
+ // AccessorInfo objects. If so, their value can be recomputed even if
+ // the actual value on the object changes.
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ TNode<IntPtrT> number_of_own_descriptors = Signed(
+ DecodeWordFromWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bit_field3));
+ GotoIf(IntPtrLessThan(
+ number_of_own_descriptors,
+ IntPtrConstant(JSFunction::kMinDescriptorsForFastBindAndWrap)),
+ &slow_wrap);
+
+ // We don't need to check the exact accessor here because the only case
+ // custom accessor arise is with function templates via API, and in that
+ // case the object is in dictionary mode
+ TNode<DescriptorArray> descriptors = LoadMapInstanceDescriptors(map);
+ CheckAccessor(
+ descriptors,
+ IntPtrConstant(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex),
+ LengthStringConstant(), &slow_wrap);
+ CheckAccessor(
+ descriptors,
+ IntPtrConstant(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex),
+ NameStringConstant(), &slow_wrap);
+
+ // Verify that prototype matches the function prototype of the target
+ // context.
+ TNode<Object> prototype = LoadMapPrototype(map);
+ TNode<Object> function_map =
+ LoadContextElement(target_context, Context::WRAPPED_FUNCTION_MAP_INDEX);
+ TNode<Object> function_prototype = LoadMapPrototype(CAST(function_map));
+ GotoIf(TaggedNotEqual(prototype, function_prototype), &slow_wrap);
+
// 1. Let internalSlotsList be the internal slots listed in Table 2, plus
// [[Prototype]] and [[Extensible]].
// 2. Let wrapped be ! MakeBasicObject(internalSlotsList).
// 3. Set wrapped.[[Prototype]] to
// callerRealm.[[Intrinsics]].[[%Function.prototype%]].
// 4. Set wrapped.[[Call]] as described in 2.1.
- TNode<JSObject> wrapped = AllocateJSWrappedFunction(creation_context);
-
// 5. Set wrapped.[[WrappedTargetFunction]] to Target.
- StoreObjectFieldNoWriteBarrier(
- wrapped, JSWrappedFunction::kWrappedTargetFunctionOffset, target.value());
// 6. Set wrapped.[[Realm]] to callerRealm.
- StoreObjectFieldNoWriteBarrier(wrapped, JSWrappedFunction::kContextOffset,
- creation_context);
-
// 7. Let result be CopyNameAndLength(wrapped, Target, "wrapped").
// 8. If result is an Abrupt Completion, throw a TypeError exception.
- // TODO(v8:11989): https://github.com/tc39/proposal-shadowrealm/pull/348
+ // Installed with default accessors.
+ TNode<JSObject> wrapped =
+ AllocateJSWrappedFunction(creation_context, target.value());
// 9. Return wrapped.
Return(wrapped);
+ BIND(&slow_wrap);
+ {
+ Return(CallRuntime(Runtime::kShadowRealmWrappedFunctionCreate, context,
+ creation_context, target.value()));
+ }
+
BIND(&bailout);
ThrowTypeError(context, MessageTemplate::kNotCallable, value);
}
@@ -132,7 +192,7 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) {
// Create wrapped value in the target realm.
TNode<Object> wrapped_receiver =
CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context,
- target_context, receiver);
+ target_context, caller_context, receiver);
StoreFixedArrayElement(wrapped_args, 0, wrapped_receiver);
// 7. For each element arg of argumentsList, do
BuildFastLoop<IntPtrT>(
@@ -142,7 +202,7 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) {
// Create wrapped value in the target realm.
TNode<Object> wrapped_value =
CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context,
- target_context, args.AtIndex(index));
+ target_context, caller_context, args.AtIndex(index));
// 7b. Append wrappedValue to wrappedArgs.
StoreFixedArrayElement(
wrapped_args, IntPtrAdd(index, IntPtrConstant(1)), wrapped_value);
@@ -167,13 +227,15 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) {
// 10a. Return ? GetWrappedValue(callerRealm, result.[[Value]]).
TNode<Object> wrapped_result =
CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context,
- caller_context, result);
+ caller_context, target_context, result);
args.PopAndReturn(wrapped_result);
// 11. Else,
BIND(&call_exception);
// 11a. Throw a TypeError exception.
- // TODO(v8:11989): provide a non-observable inspection.
+ // TODO(v8:11989): provide a non-observable inspection on the
+ // pending_exception to the newly created TypeError.
+ // https://github.com/tc39/proposal-shadowrealm/issues/353
ThrowTypeError(context, MessageTemplate::kCallShadowRealmFunctionThrown,
var_exception.value());
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 4222cf9c79..37530e7aba 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -385,11 +385,15 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.exchange
TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
- auto maybe_array = Parameter<Object>(Descriptor::kArray);
- auto index = Parameter<Object>(Descriptor::kIndex);
+ auto maybe_array_or_shared_struct =
+ Parameter<Object>(Descriptor::kArrayOrSharedStruct);
+ auto index_or_field_name = Parameter<Object>(Descriptor::kIndexOrFieldName);
auto value = Parameter<Object>(Descriptor::kValue);
auto context = Parameter<Context>(Descriptor::kContext);
+ Label shared_struct(this);
+ GotoIf(IsJSSharedStruct(maybe_array_or_shared_struct), &shared_struct);
+
// Inlines AtomicReadModifyWrite
// https://tc39.es/ecma262/#sec-atomicreadmodifywrite
@@ -397,12 +401,14 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
Label detached(this);
TNode<Int32T> elements_kind;
TNode<RawPtrT> backing_store;
- TNode<JSArrayBuffer> array_buffer = ValidateIntegerTypedArray(
- maybe_array, context, &elements_kind, &backing_store, &detached);
- TNode<JSTypedArray> array = CAST(maybe_array);
+ TNode<JSArrayBuffer> array_buffer =
+ ValidateIntegerTypedArray(maybe_array_or_shared_struct, context,
+ &elements_kind, &backing_store, &detached);
+ TNode<JSTypedArray> array = CAST(maybe_array_or_shared_struct);
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
- TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
+ TNode<UintPtrT> index_word =
+ ValidateAtomicAccess(array, index_or_field_name, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
USE(array_buffer);
@@ -513,6 +519,13 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kDetachedOperation,
"Atomics.exchange");
}
+
+ BIND(&shared_struct);
+ {
+ Return(CallRuntime(Runtime::kAtomicsExchangeSharedStructField, context,
+ maybe_array_or_shared_struct, index_or_field_name,
+ value));
+ }
}
// https://tc39.es/ecma262/#sec-atomics.compareexchange
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index ceee7b0b94..ee89b02e5a 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -751,21 +751,6 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
Operation::kGreaterThanOrEqual);
}
-TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
- auto receiver = Parameter<String>(Descriptor::kReceiver);
- auto position = UncheckedParameter<IntPtrT>(Descriptor::kPosition);
-
- // TODO(sigurds) Figure out if passing length as argument pays off.
- TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
- // Load the character code at the {position} from the {receiver}.
- TNode<Int32T> code =
- LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
- // And return it as TaggedSigned value.
- // TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromInt32(code);
- Return(result);
-}
-
TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
auto receiver = Parameter<String>(Descriptor::kReceiver);
auto position = UncheckedParameter<IntPtrT>(Descriptor::kPosition);
diff --git a/deps/v8/src/builtins/builtins-struct.cc b/deps/v8/src/builtins/builtins-struct.cc
index 851af187a1..3aa9cd60fe 100644
--- a/deps/v8/src/builtins/builtins-struct.cc
+++ b/deps/v8/src/builtins/builtins-struct.cc
@@ -109,7 +109,7 @@ BUILTIN(SharedStructConstructor) {
Handle<Map> instance_map(instance->map(), isolate);
if (instance_map->HasOutOfObjectProperties()) {
int num_oob_fields =
- instance_map->NumberOfFields(ConcurrencyMode::kNotConcurrent) -
+ instance_map->NumberOfFields(ConcurrencyMode::kSynchronous) -
instance_map->GetInObjectProperties();
Handle<PropertyArray> property_array =
factory->NewPropertyArray(num_oob_fields, AllocationType::kSharedOld);
diff --git a/deps/v8/src/builtins/builtins-temporal.cc b/deps/v8/src/builtins/builtins-temporal.cc
index 91ff221ddd..ff6c6e1f2c 100644
--- a/deps/v8/src/builtins/builtins-temporal.cc
+++ b/deps/v8/src/builtins/builtins-temporal.cc
@@ -66,8 +66,6 @@ TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToString)
TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToJSON)
/* Temporal.PlaneTime */
-/* Temporal #sec-temporal.plaintime.from */
-TO_BE_IMPLEMENTED(TemporalPlainTimeFrom)
/* Temporal #sec-temporal.plaintime.compare */
TO_BE_IMPLEMENTED(TemporalPlainTimeCompare)
/* Temporal #sec-temporal.plaintime.prototype.add */
@@ -198,10 +196,6 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainDate)
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainTime)
/* Temporal #sec-temporal.zoneddatetime.prototype.toplaindatetime */
TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainDateTime)
-/* Temporal #sec-temporal.zoneddatetime.prototype.toplainyearmonth */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainYearMonth)
-/* Temporal #sec-temporal.zoneddatetime.prototype.toplainmonthday */
-TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainMonthDay)
/* Temporal.Duration */
/* Temporal #sec-temporal.duration.from */
@@ -210,10 +204,6 @@ TO_BE_IMPLEMENTED(TemporalDurationFrom)
TO_BE_IMPLEMENTED(TemporalDurationCompare)
/* Temporal #sec-temporal.duration.prototype.with */
TO_BE_IMPLEMENTED(TemporalDurationPrototypeWith)
-/* Temporal #sec-temporal.duration.prototype.negated */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeNegated)
-/* Temporal #sec-temporal.duration.prototype.abs */
-TO_BE_IMPLEMENTED(TemporalDurationPrototypeAbs)
/* Temporal #sec-temporal.duration.prototype.add */
TO_BE_IMPLEMENTED(TemporalDurationPrototypeAdd)
/* Temporal #sec-temporal.duration.prototype.subtract */
@@ -228,16 +218,6 @@ TO_BE_IMPLEMENTED(TemporalDurationPrototypeToString)
TO_BE_IMPLEMENTED(TemporalDurationPrototypeToJSON)
/* Temporal.Instant */
-/* Temporal #sec-temporal.instant.from */
-TO_BE_IMPLEMENTED(TemporalInstantFrom)
-/* Temporal #sec-temporal.instant.fromepochseconds */
-TO_BE_IMPLEMENTED(TemporalInstantFromEpochSeconds)
-/* Temporal #sec-temporal.instant.fromepochmilliseconds */
-TO_BE_IMPLEMENTED(TemporalInstantFromEpochMilliseconds)
-/* Temporal #sec-temporal.instant.fromepochmicroseconds */
-TO_BE_IMPLEMENTED(TemporalInstantFromEpochMicroseconds)
-/* Temporal #sec-temporal.instant.fromepochnanoseconds */
-TO_BE_IMPLEMENTED(TemporalInstantFromEpochNanoseconds)
/* Temporal #sec-temporal.instant.compare */
TO_BE_IMPLEMENTED(TemporalInstantCompare)
/* Temporal #sec-temporal.instant.prototype.add */
@@ -322,8 +302,6 @@ TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetPreviousTransition)
TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeToJSON)
/* Temporal.Calendar */
-/* Temporal #sec-temporal.calendar.prototype.datefromfields */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDateFromFields)
/* Temporal #sec-temporal.calendar.prototype.yearmonthfromfields */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeYearMonthFromFields)
/* Temporal #sec-temporal.calendar.prototype.monthdayfromfields */
@@ -332,32 +310,14 @@ TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonthDayFromFields)
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDateAdd)
/* Temporal #sec-temporal.calendar.prototype.dateuntil */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDateUntil)
-/* Temporal #sec-temporal.calendar.prototype.year */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeYear)
/* Temporal #sec-temporal.calendar.prototype.month */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonth)
/* Temporal #sec-temporal.calendar.prototype.monthcode */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonthCode)
/* Temporal #sec-temporal.calendar.prototype.day */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDay)
-/* Temporal #sec-temporal.calendar.prototype.dayofweek */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDayOfWeek)
-/* Temporal #sec-temporal.calendar.prototype.dayofyear */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDayOfYear)
/* Temporal #sec-temporal.calendar.prototype.weekofyear */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeWeekOfYear)
-/* Temporal #sec-temporal.calendar.prototype.daysinweek */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDaysInWeek)
-/* Temporal #sec-temporal.calendar.prototype.daysinmonth */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDaysInMonth)
-/* Temporal #sec-temporal.calendar.prototype.daysinyear */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDaysInYear)
-/* Temporal #sec-temporal.calendar.prototype.monthsinyear */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonthsInYear)
-/* Temporal #sec-temporal.calendar.prototype.inleapyear */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeInLeapYear)
-/* Temporal #sec-temporal.calendar.prototype.mergefields */
-TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMergeFields)
/* Temporal #sec-temporal.calendar.prototype.tojson */
TO_BE_IMPLEMENTED(TemporalCalendarPrototypeToJSON)
@@ -440,6 +400,17 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToLocaleString)
JSTemporal##T ::METHOD(isolate, obj, args.atOrUndefined(isolate, 1))); \
}
+#define TEMPORAL_PROTOTYPE_METHOD2(T, METHOD, name) \
+ BUILTIN(Temporal##T##Prototype##METHOD) { \
+ HandleScope scope(isolate); \
+ const char* method = "Temporal." #T ".prototype." #name; \
+ CHECK_RECEIVER(JSTemporal##T, obj, method); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, \
+ JSTemporal##T ::METHOD(isolate, obj, args.atOrUndefined(isolate, 1), \
+ args.atOrUndefined(isolate, 2))); \
+ }
+
#define TEMPORAL_PROTOTYPE_METHOD3(T, METHOD, name) \
BUILTIN(Temporal##T##Prototype##METHOD) { \
HandleScope scope(isolate); \
@@ -481,6 +452,14 @@ TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToLocaleString)
return Smi::FromInt(obj->field()); \
}
+#define TEMPORAL_METHOD1(T, METHOD) \
+ BUILTIN(Temporal##T##METHOD) { \
+ HandleScope scope(isolate); \
+ RETURN_RESULT_OR_FAILURE( \
+ isolate, \
+ JSTemporal##T ::METHOD(isolate, args.atOrUndefined(isolate, 1))); \
+ }
+
#define TEMPORAL_GET(T, METHOD, field) \
BUILTIN(Temporal##T##Prototype##METHOD) { \
HandleScope scope(isolate); \
@@ -606,6 +585,7 @@ TEMPORAL_GET_SMI(PlainTime, Second, iso_second)
TEMPORAL_GET_SMI(PlainTime, Millisecond, iso_millisecond)
TEMPORAL_GET_SMI(PlainTime, Microsecond, iso_microsecond)
TEMPORAL_GET_SMI(PlainTime, Nanosecond, iso_nanosecond)
+TEMPORAL_METHOD2(PlainTime, From)
TEMPORAL_PROTOTYPE_METHOD0(PlainTime, GetISOFields, getISOFields)
TEMPORAL_VALUE_OF(PlainTime)
@@ -756,6 +736,8 @@ TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Nanosecond,
iso_nanosecond)
TEMPORAL_PROTOTYPE_METHOD1(ZonedDateTime, WithCalendar, withCalendar)
TEMPORAL_PROTOTYPE_METHOD1(ZonedDateTime, WithTimeZone, withTimeZone)
+TEMPORAL_PROTOTYPE_METHOD0(ZonedDateTime, ToPlainYearMonth, toPlainYearMonth)
+TEMPORAL_PROTOTYPE_METHOD0(ZonedDateTime, ToPlainMonthDay, toPlainMonthDay)
TEMPORAL_PROTOTYPE_METHOD0(ZonedDateTime, GetISOFields, getISOFields)
TEMPORAL_VALUE_OF(ZonedDateTime)
@@ -788,10 +770,17 @@ TEMPORAL_GET(Duration, Microseconds, microseconds)
TEMPORAL_GET(Duration, Nanoseconds, nanoseconds)
TEMPORAL_PROTOTYPE_METHOD0(Duration, Sign, sign)
TEMPORAL_PROTOTYPE_METHOD0(Duration, Blank, blank)
+TEMPORAL_PROTOTYPE_METHOD0(Duration, Negated, negated)
+TEMPORAL_PROTOTYPE_METHOD0(Duration, Abs, abs)
TEMPORAL_VALUE_OF(Duration)
// Instant
TEMPORAL_CONSTRUCTOR1(Instant)
+TEMPORAL_METHOD1(Instant, FromEpochSeconds)
+TEMPORAL_METHOD1(Instant, FromEpochMilliseconds)
+TEMPORAL_METHOD1(Instant, FromEpochMicroseconds)
+TEMPORAL_METHOD1(Instant, FromEpochNanoseconds)
+TEMPORAL_METHOD1(Instant, From)
TEMPORAL_VALUE_OF(Instant)
TEMPORAL_GET(Instant, EpochNanoseconds, nanoseconds)
TEMPORAL_GET_NUMBER_AFTER_DIVID(Instant, EpochSeconds, nanoseconds, 1000000000,
@@ -804,6 +793,16 @@ TEMPORAL_GET_BIGINT_AFTER_DIVID(Instant, EpochMicroseconds, nanoseconds, 1000,
// Calendar
TEMPORAL_CONSTRUCTOR1(Calendar)
TEMPORAL_ID_BY_TO_STRING(Calendar)
+TEMPORAL_PROTOTYPE_METHOD2(Calendar, DateFromFields, dateFromFields)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, DaysInMonth, daysInMonth)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, DaysInWeek, daysInWeek)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, DaysInYear, daysInYear)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, DayOfWeek, dayOfWeek)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, DayOfYear, dayOfYear)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, InLeapYear, inLeapYear)
+TEMPORAL_PROTOTYPE_METHOD2(Calendar, MergeFields, mergeFields)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, MonthsInYear, monthsInYear)
+TEMPORAL_PROTOTYPE_METHOD1(Calendar, Year, year)
TEMPORAL_TO_STRING(Calendar)
// #sec-temporal.calendar.from
BUILTIN(TemporalCalendarFrom) {
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index c0ab3bc564..cb0ad9f22b 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -18,7 +18,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/objects-inl.h"
#include "src/objects/visitors.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index c53c970f9c..5cb6e0bc92 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -741,8 +741,8 @@ Cast<Zero|PromiseReaction>(implicit context: Context)(o: Object): Zero|
}
}
-Cast<JSFunction|JSBoundFunction>(implicit context: Context)(o: Object):
- JSFunction|JSBoundFunction labels CastError {
+Cast<JSFunction|JSBoundFunction|JSWrappedFunction>(implicit context: Context)(
+ o: Object): JSFunction|JSBoundFunction|JSWrappedFunction labels CastError {
typeswitch (o) {
case (o: JSFunction): {
return o;
@@ -750,6 +750,9 @@ Cast<JSFunction|JSBoundFunction>(implicit context: Context)(o: Object):
case (o: JSBoundFunction): {
return o;
}
+ case (o: JSWrappedFunction): {
+ return o;
+ }
case (Object): {
goto CastError;
}
diff --git a/deps/v8/src/builtins/function.tq b/deps/v8/src/builtins/function.tq
index d9eb1740aa..ccc3c5bc81 100644
--- a/deps/v8/src/builtins/function.tq
+++ b/deps/v8/src/builtins/function.tq
@@ -22,8 +22,8 @@ const kLengthDescriptorIndex: constexpr int32
const kNameDescriptorIndex: constexpr int32
generates 'JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex'
;
-const kMinDescriptorsForFastBind:
- constexpr int31 generates 'JSFunction::kMinDescriptorsForFastBind';
+const kMinDescriptorsForFastBindAndWrap: constexpr int31
+ generates 'JSFunction::kMinDescriptorsForFastBindAndWrap';
macro CheckAccessor(implicit context: Context)(
array: DescriptorArray, index: constexpr int32,
@@ -44,7 +44,7 @@ FastFunctionPrototypeBind(
const argc: intptr = arguments.actual_count;
try {
typeswitch (receiver) {
- case (fn: JSFunction|JSBoundFunction): {
+ case (fn: JSFunction|JSBoundFunction|JSWrappedFunction): {
// Disallow binding of slow-mode functions. We need to figure out
// whether the length and name property are in the original state.
Comment('Disallow binding of slow-mode functions');
@@ -55,7 +55,7 @@ FastFunctionPrototypeBind(
// the actual value on the object changes.
if (fn.map.bit_field3.number_of_own_descriptors <
- kMinDescriptorsForFastBind) {
+ kMinDescriptorsForFastBindAndWrap) {
goto Slow;
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 2df39166c9..7f32853d1f 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -821,14 +821,14 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ cmp(actual_marker, static_cast<int>(expected_marker));
+ __ cmp(actual_state, static_cast<int>(expected_state));
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -883,30 +883,24 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
-static void MaybeOptimizeCode(MacroAssembler* masm,
- Register optimization_marker) {
+static void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- eax : actual argument count
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
- DCHECK(!AreAliased(edx, edi, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ int3();
- }
+ DCHECK(!AreAliased(edx, edi, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ int3();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -996,11 +990,11 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
+// is optimized code or a tiering state that needs to be processed.
// Registers optimization_state and feedback_vector must be aliased.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state,
- XMMRegister saved_feedback_vector, Label* has_optimized_code_or_marker) {
+ XMMRegister saved_feedback_vector, Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
Register feedback_vector = optimization_state;
@@ -1010,12 +1004,12 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing(
__ mov(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if there is optimized code or a optimization marker that needes to be
+ // Check if there is optimized code or a tiering state that needes to be
// processed.
- __ test(
- optimization_state,
- Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ j(not_zero, has_optimized_code_or_marker);
+ __ test(optimization_state,
+ Immediate(
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ j(not_zero, has_optimized_code_or_state);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1025,16 +1019,16 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Label maybe_has_optimized_code;
// Check if optimized code is available
__ test(optimization_state,
- Immediate(FeedbackVector::kHasCompileOptimizedMarker));
+ Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
__ j(zero, &maybe_has_optimized_code);
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, tiering_state);
__ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_marker;
- Register feedback_vector = optimization_marker;
+ Register optimized_code_entry = tiering_state;
+ Register feedback_vector = tiering_state;
__ movd(feedback_vector, saved_feedback_vector); // Restore feedback vector.
__ mov(
optimized_code_entry,
@@ -1042,6 +1036,20 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
}
+namespace {
+
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
+ Register bytecode_array) {
+ // Reset the bytecode age and OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ mov(FieldOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
+ Immediate(0));
+}
+
+} // namespace
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1087,10 +1095,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the optimization state from the feedback vector and re-use the
// register.
- Label has_optimized_code_or_marker;
+ Label has_optimized_code_or_state;
Register optimization_state = ecx;
- LoadOptimizationStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
- &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
+ &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
@@ -1130,15 +1138,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset),
- Immediate(0));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
@@ -1262,7 +1262,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
// Restore actual argument count.
__ movd(eax, xmm0);
@@ -1290,9 +1290,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &install_baseline_code);
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, xmm1, &has_optimized_code_or_marker);
+ // Check the tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ movd(ecx, xmm2);
@@ -1715,11 +1715,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Load the optimization state from the feedback vector and re-use the
// register.
- Label has_optimized_code_or_marker;
+ Label has_optimized_code_or_state;
Register optimization_state = ecx;
- LoadOptimizationStateAndJumpIfNeedsProcessing(masm, optimization_state,
- saved_feedback_vector,
- &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ saved_feedback_vector,
+ &has_optimized_code_or_state);
// Load the feedback vector and increment the invocation count.
__ movd(feedback_vector, saved_feedback_vector);
@@ -1750,15 +1750,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecode_array = scratch;
__ movd(bytecode_array, saved_bytecode_array);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ mov_w(
- FieldOperand(bytecode_array, BytecodeArray::kOsrLoopNestingLevelOffset),
- Immediate(0));
+ ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
@@ -1791,7 +1783,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the return address and bytecode array, rebalancing the return stack
@@ -2813,7 +2805,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
Label skip;
@@ -4009,7 +4001,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ neg(edx);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, eax);
+ __ PrepareCallCFunction(5, eax);
__ mov(eax, Immediate(0));
Label context_check;
__ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
@@ -4019,15 +4011,13 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kSystemPointerSize),
Immediate(static_cast<int>(deopt_kind)));
- __ mov(Operand(esp, 2 * kSystemPointerSize),
- Immediate(Deoptimizer::kFixedExitSizeMarker)); // Bailout id.
- __ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
- __ Move(Operand(esp, 5 * kSystemPointerSize),
+ __ mov(Operand(esp, 2 * kSystemPointerSize), ecx); // Code address or 0.
+ __ mov(Operand(esp, 3 * kSystemPointerSize), edx); // Fp-to-sp delta.
+ __ Move(Operand(esp, 4 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(masm->isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve deoptimizer object in register eax and get the input
@@ -4159,10 +4149,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -4288,12 +4274,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ pop(kInterpreterAccumulatorRegister);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
- // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
- // Sparkplug here.
- __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset),
- Immediate(0));
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
+ // disarm Sparkplug here.
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj);
} else {
__ jmp(code_obj);
diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc
index 10849667fd..b94128f409 100644
--- a/deps/v8/src/builtins/loong64/builtins-loong64.cc
+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc
@@ -847,14 +847,14 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, actual_marker,
- Operand(static_cast<int>(expected_marker)));
+ __ Branch(&no_match, ne, actual_state,
+ Operand(static_cast<int>(expected_state)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -902,29 +902,24 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ stop();
- }
+ DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ stop();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1006,10 +1001,10 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
Register scratch = t2;
// TODO(liuyu): Remove CHECK
@@ -1019,8 +1014,8 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing(
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1033,23 +1028,36 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ And(scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedMarker));
+ Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
- __ Ld_d(optimization_marker,
+ __ Ld_d(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
}
+namespace {
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
+ Register bytecode_array) {
+ // Reset code age and the OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ St_w(zero_reg,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
+}
+
+} // namespace
+
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
@@ -1072,17 +1080,17 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check for an tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = no_reg;
{
UseScratchRegisterScope temps(masm);
optimization_state = temps.Acquire();
- // optimization_state will be used only in |has_optimized_code_or_marker|
+ // optimization_state will be used only in |has_optimized_code_or_state|
// and outside it can be reused.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
}
// Increment invocation count for the function.
{
@@ -1115,20 +1123,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
- Register bytecodeArray = descriptor.GetRegisterParameter(
+ Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ St_h(zero_reg,
- FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrLoopNestingLevelOffset));
-
- __ Push(argc, bytecodeArray);
+ ResetBytecodeAgeAndOsrState(masm, bytecode_array);
+ __ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
@@ -1172,7 +1170,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429): Document this frame setup better.
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
UseScratchRegisterScope temps(masm);
@@ -1250,17 +1248,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
+ // is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Ld_w(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty or has a optimization marker.
- Label has_optimized_code_or_marker;
+ // Check if the optimized code slot is not empty or has a tiering state.
+ Label has_optimized_code_or_state;
__ andi(t0, optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
- __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
+ __ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1279,14 +1277,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1407,7 +1398,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
@@ -1426,10 +1417,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check for an tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ Move(a2, kInterpreterBytecodeArrayRegister);
@@ -1809,7 +1800,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
@@ -3405,17 +3396,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
- __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
- // Get the address of the location in the code object (a3) (return
+ // Get the address of the location in the code object (a2) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
- // register a4.
- __ mov(a3, ra);
- __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize));
+ // register a3.
+ __ mov(a2, ra);
+ __ Add_d(a3, sp, Operand(kSavedRegistersAreaSize));
- __ sub_d(a4, fp, a4);
+ __ sub_d(a3, fp, a3);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, a5);
+ __ PrepareCallCFunction(5, a4);
// Pass six arguments, according to n64 ABI.
__ mov(a0, zero_reg);
Label context_check;
@@ -3424,15 +3414,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(static_cast<int>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- // a4: already has fp-to-sp delta.
- __ li(a5, ExternalReference::isolate_address(isolate));
+ // a2: code address or 0 already loaded.
+ // a3: already has fp-to-sp delta.
+ __ li(a4, ExternalReference::isolate_address(isolate));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register a0 and get the input
@@ -3561,10 +3550,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3688,15 +3673,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
// TODO(liuyu): Remove Ld as arm64 after register reallocation.
__ Ld_d(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ St_h(zero_reg,
- FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index a907e0cedf..379af4b264 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -842,15 +842,15 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ Branch(&no_match, ne, actual_marker,
- Operand(static_cast<int>(expected_marker)));
+ __ Branch(&no_match, ne, actual_state,
+ Operand(static_cast<int>(expected_state)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -900,32 +900,26 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a int32 containing a non-zero optimization
+ // -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
ASM_CODE_COMMENT(masm);
- DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized /
- // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
- // here.
- if (FLAG_debug_code) {
- __ stop();
- }
+ DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ stop();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1008,18 +1002,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
Register scratch = t6;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1032,23 +1026,36 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ And(scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedMarker));
+ Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
- __ Lw(optimization_marker,
+ __ Lw(tiering_state,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
}
+namespace {
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
+ Register bytecode_array) {
+ // Reset code age and the OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ sw(zero_reg,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
+}
+
+} // namespace
+
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
@@ -1069,17 +1076,17 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check for an tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = no_reg;
{
UseScratchRegisterScope temps(masm);
optimization_state = temps.Acquire();
- // optimization_state will be used only in |has_optimized_code_or_marker|
+ // optimization_state will be used only in |has_optimized_code_or_state|
// and outside it can be reused.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
}
// Increment invocation count for the function.
{
@@ -1112,19 +1119,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
- Register bytecodeArray = descriptor.GetRegisterParameter(
+ Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrLoopNestingLevelOffset));
-
- __ Push(argc, bytecodeArray);
+ ResetBytecodeAgeAndOsrState(masm, bytecode_array);
+ __ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
@@ -1168,7 +1166,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429): Document this frame setup better.
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
UseScratchRegisterScope temps(masm);
@@ -1244,17 +1242,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
+ // is optimized code or an tiering state, call that instead.
Register optimization_state = t0;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty or has a optimization marker.
- Label has_optimized_code_or_marker;
+ // Check if the optimized code slot is not empty or has a tiering state.
+ Label has_optimized_code_or_state;
__ andi(t1, optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
- __ Branch(&has_optimized_code_or_marker, ne, t1, Operand(zero_reg));
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
+ __ Branch(&has_optimized_code_or_state, ne, t1, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1273,14 +1271,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1399,7 +1390,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&is_baseline);
@@ -1417,10 +1408,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lhu(t4, FieldMemOperand(t4, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, t4, Operand(FEEDBACK_VECTOR_TYPE));
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check for an tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ Move(a2, kInterpreterBytecodeArrayRegister);
@@ -1801,7 +1792,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
@@ -3852,16 +3843,15 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
static constexpr int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
- __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
- // Get the address of the location in the code object (a3) (return
+ // Get the address of the location in the code object (a2) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
- // register t0.
- __ mov(a3, ra);
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize));
- __ Subu(t0, fp, t0);
+ // register a3.
+ __ mov(a2, ra);
+ __ Addu(a3, sp, Operand(kSavedRegistersAreaSize));
+ __ Subu(a3, fp, a3);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, t1);
+ __ PrepareCallCFunction(5, t0);
// Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
__ mov(a0, zero_reg);
Label context_check;
@@ -3870,15 +3860,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(static_cast<int>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(t1, ExternalReference::isolate_address(isolate));
- __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
+ // a2: code address or 0 already loaded.
+ // a3: Fp-to-sp delta already loaded.
+ __ li(t0, ExternalReference::isolate_address(isolate));
+ __ sw(t0, CFunctionArgumentOperand(5)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register v0 and get the input
@@ -4009,10 +3998,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -4136,14 +4121,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
// TODO(liuyu): Remove Ld as arm64 after register reallocation.
__ Lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index ea574acfd8..f1eb2e1847 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -852,14 +852,14 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, actual_marker,
- Operand(static_cast<int>(expected_marker)));
+ __ Branch(&no_match, ne, actual_state,
+ Operand(static_cast<int>(expected_state)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -911,30 +911,25 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a int32 containing a non-zero optimization
+ // -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ stop();
- }
+ DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ stop();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1016,18 +1011,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
Register scratch = t2;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1040,22 +1035,35 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ And(scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedMarker));
+ Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
- __ Ld(optimization_marker,
+ __ Ld(tiering_state,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
}
+namespace {
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
+ Register bytecode_array) {
+ // Reset code age and the OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Sw(zero_reg,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
+}
+
+} // namespace
+
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
@@ -1076,17 +1084,17 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check for an tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = no_reg;
{
UseScratchRegisterScope temps(masm);
optimization_state = temps.Acquire();
- // optimization_state will be used only in |has_optimized_code_or_marker|
+ // optimization_state will be used only in |has_optimized_code_or_state|
// and outside it can be reused.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
}
// Increment invocation count for the function.
{
@@ -1119,19 +1127,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
- Register bytecodeArray = descriptor.GetRegisterParameter(
+ Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ Sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrLoopNestingLevelOffset));
-
- __ Push(argc, bytecodeArray);
+ ResetBytecodeAgeAndOsrState(masm, bytecode_array);
+ __ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
@@ -1175,7 +1174,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429): Document this frame setup better.
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
UseScratchRegisterScope temps(masm);
@@ -1251,17 +1250,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
+ // is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty or has a optimization marker.
- Label has_optimized_code_or_marker;
+ // Check if the optimized code slot is not empty or has a tiering state.
+ Label has_optimized_code_or_state;
__ andi(t0, optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
- __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
+ __ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1280,14 +1279,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1407,7 +1399,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&is_baseline);
@@ -1425,10 +1417,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check for an tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ Move(a2, kInterpreterBytecodeArrayRegister);
@@ -1805,7 +1797,7 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
@@ -3430,17 +3422,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
- __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
- // Get the address of the location in the code object (a3) (return
+ // Get the address of the location in the code object (a2) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
- // register a4.
- __ mov(a3, ra);
- __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize));
+ // register a3.
+ __ mov(a2, ra);
+ __ Daddu(a3, sp, Operand(kSavedRegistersAreaSize));
- __ Dsubu(a4, fp, a4);
+ __ Dsubu(a3, fp, a3);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, a5);
+ __ PrepareCallCFunction(5, a4);
// Pass six arguments, according to n64 ABI.
__ mov(a0, zero_reg);
Label context_check;
@@ -3449,15 +3440,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(static_cast<int>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- // a4: already has fp-to-sp delta.
- __ li(a5, ExternalReference::isolate_address(isolate));
+ // a2: code address or 0 already loaded.
+ // a3: already has fp-to-sp delta.
+ __ li(a4, ExternalReference::isolate_address(isolate));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register v0 and get the input
@@ -3587,10 +3577,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3713,14 +3699,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
// TODO(liuyu): Remove Ld as arm64 after register reallocation.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index cb43a1ea2a..94a723667b 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -18,20 +18,20 @@ transitioning macro ObjectFromEntriesFastCase(implicit context: Context)(
const pair: KeyValuePair =
collections::LoadKeyValuePairNoSideEffects(value)
otherwise IfSlow;
- // StorePropertyInLiteral only handles Names and Numbers. Bail out if
+ // CreateDataProperty only handles Names and Numbers. Bail out if
// the key is not one of those types. Note that JSReceivers should
// always bail to the slow path, as calling Symbol.toPrimitive,
// toString, or valueOf could invalidate assumptions about the
// iterable.
typeswitch (pair.key) {
case (Name): {
- SetPropertyInLiteral(result, pair.key, pair.value);
+ CreateDataProperty(result, pair.key, pair.value);
}
case (Number): {
- SetPropertyInLiteral(result, pair.key, pair.value);
+ CreateDataProperty(result, pair.key, pair.value);
}
case (oddball: Oddball): {
- SetPropertyInLiteral(result, oddball.to_string, pair.value);
+ CreateDataProperty(result, oddball.to_string, pair.value);
}
case (JSAny): {
goto IfSlow;
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 02421e5c21..64b45555a1 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -32,6 +32,224 @@ namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
+namespace {
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)), r0);
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
+ USE(GetSharedFunctionInfoBytecodeOrBaseline);
+ ASM_CODE_COMMENT(masm);
+ Label done;
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ b(ne, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ beq(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ beq(is_baseline);
+ }
+ __ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
+ __ bne(&done);
+ __ LoadTaggedPointerField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ intptr_t offset) {
+ __ AddS64(ip, entry_address, Operand(offset), r0);
+ __ mtlr(ip);
+
+ // "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
+ Register scratch) {
+ // Reset the bytecode age and OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ mov(scratch, Operand(0));
+ __ StoreU32(scratch,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
+ r0);
+}
+
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = r4;
+ __ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset),
+ r0);
+
+ // Get the Code object from the shared function info.
+ Register code_obj = r9;
+ __ LoadTaggedPointerField(
+ code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
+ r0);
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
+ __ b(eq, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+ }
+
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r6);
+ }
+
+ // Load the feedback vector.
+ Register feedback_vector = r5;
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
+ r0);
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ CompareObjectType(feedback_vector, r6, r6, FEEDBACK_VECTOR_TYPE);
+ __ b(ne, &install_baseline_code);
+
+ // Save BytecodeOffset from the stack frame.
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ // Replace BytecodeOffset with the feedback vector.
+ __ StoreU64(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+ Register get_baseline_pc = r6;
+ __ Move(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ CmpS64(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset),
+ r0);
+ __ b(eq, &function_entry_bytecode);
+ }
+
+ __ SubS64(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
+ {
+ Register arg_reg_1 = r3;
+ Register arg_reg_2 = r4;
+ Register arg_reg_3 = r5;
+ __ mr(arg_reg_1, code_obj);
+ __ mr(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ mr(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(4, 0, ip);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ AddS64(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ Register scratch = ip;
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
+ scratch);
+ Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
+ if (next_bytecode) {
+ __ Move(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ b(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ // Retry from the start after installing baseline code.
+ __ b(&start);
+}
+
+} // namespace
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
@@ -167,6 +385,57 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
}
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
+ ASM_CODE_COMMENT(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
+ }
+
+ // If the code object is null, just return to the caller.
+ Label skip;
+ __ CmpSmiLiteral(r3, Smi::zero(), r0);
+ __ bne(&skip);
+ __ Ret();
+
+ __ bind(&skip);
+
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
+ r0);
+
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
+ }
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ SmiUntag(r4,
+ FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)),
+ LeaveRC, r0);
+
+ // Compute the target address = code start + osr_offset
+ __ add(r0, r3, r4);
+
+ // And "return" to the OSR entry point of the function.
+ __ mtlr(r0);
+ __ blr();
+ }
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -854,13 +1123,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
Label no_match;
- __ cmpi(actual_marker, Operand(static_cast<int>(expected_marker)));
+ __ cmpi(actual_state, Operand(static_cast<int>(expected_state)));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -913,30 +1182,25 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- r3 : actual argument count
// -- r6 : new target (preserved for callee if needed, and caller)
// -- r4 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a int32 containing a non-zero optimization
+ // -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ stop();
- }
+ DCHECK(!AreAliased(feedback_vector, r4, r6, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ stop();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1025,24 +1289,177 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
- __ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
- r0);
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kTieringStateIsAnyRequestMask, r0);
__ beq(&maybe_has_optimized_code, cr0);
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ LoadAnyTaggedField(
- optimization_marker,
+ tiering_state,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset),
r0);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_state) {
+ ASM_CODE_COMMENT(masm);
+ USE(LoadTieringStateAndJumpIfNeedsProcessing);
+ DCHECK(!AreAliased(optimization_state, feedback_vector));
+ __ LoadU32(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ CHECK(is_uint16(
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ mov(
+ r0,
+ Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ AndU32(r0, optimization_state, r0, SetRC);
+ __ bne(has_optimized_code_or_state);
+}
+
+#if ENABLE_SPARKPLUG
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ auto descriptor =
+ Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = ip;
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
+ r0);
+
+ if (FLAG_debug_code) {
+ Register scratch = r11;
+ __ CompareObjectType(feedback_vector, scratch, scratch,
+ FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+
+ // Check for an tiering state.
+ Label has_optimized_code_or_state;
+ Register optimization_state = r11;
+ {
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
+ }
+
+ // Increment invocation count for the function.
+ {
+ Register invocation_count = r13;
+ __ LoadU64(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset),
+ r0);
+ __ AddS64(invocation_count, invocation_count, Operand(1));
+ __ StoreU64(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset),
+ r0);
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
+ // Normally the first thing we'd do here is Push(lr, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value lr before the call to this BaselineOutOfLinePrologue builtin.
+
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+ ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r13);
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ Register scratch = r13;
+ __ CompareObjectType(feedback_vector, scratch, scratch,
+ FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+ __ Push(feedback_vector);
+ }
+
+ Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+
+ Register sp_minus_frame_size = r13;
+ Register interrupt_limit = r0;
+ __ SubS64(sp_minus_frame_size, sp, frame_size);
+ __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
+ __ CmpU64(sp_minus_frame_size, interrupt_limit);
+ __ blt(&call_stack_guard);
+ }
+
+ // Do "fast" return to the caller pc in lr.
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_state);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+
+ // Drop the frame created by the baseline call.
+ __ Pop(r0, fp);
+ __ mtlr(r0);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ }
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Ret();
+}
+#endif
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1104,12 +1521,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
r0);
- // Check if the optimized code slot is not empty or has a optimization marker.
- Label has_optimized_code_or_marker;
- __ TestBitMask(optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
- r0);
- __ bne(&has_optimized_code_or_marker, cr0);
+ // Check if the optimized code slot is not empty or has a tiering state.
+ Label has_optimized_code_or_state;
+ __ TestBitMask(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask, r0);
+ __ bne(&has_optimized_code_or_state, cr0);
Label not_optimized;
__ bind(&not_optimized);
@@ -1134,17 +1551,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ li(r8, Operand(0));
- __ StoreU16(r8,
- FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset),
- r0);
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r8);
// Load initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@@ -1269,7 +1676,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
@@ -1637,52 +2044,16 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
- }
-
- // If the code object is null, just return to the caller.
- Label skip;
- __ CmpSmiLiteral(r3, Smi::zero(), r0);
- __ bne(&skip);
- __ Ret();
-
- __ bind(&skip);
-
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ LeaveFrame(StackFrame::STUB);
-
- // Load deoptimization data from the code object.
- // <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadTaggedPointerField(
- r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
- r0);
-
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
-
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
- }
-
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ SmiUntag(r4,
- FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)),
- LeaveRC, r0);
-
- // Compute the target address = code start + osr_offset
- __ add(r0, r3, r4);
+ return OnStackReplacement(masm, true);
+}
- // And "return" to the OSR entry point of the function.
- __ mtlr(r0);
- __ blr();
- }
+#if ENABLE_SPARKPLUG
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ LoadU64(kContextRegister,
+ MemOperand(fp, BaselineFrameConstants::kContextOffset), r0);
+ return OnStackReplacement(masm, false);
}
+#endif
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -3277,17 +3648,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
- __ mov(r5, Operand(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r7.
- __ mflr(r6);
- __ addi(r7, sp, Operand(kSavedRegistersAreaSize));
- __ sub(r7, fp, r7);
+ __ mflr(r5);
+ __ addi(r6, sp, Operand(kSavedRegistersAreaSize));
+ __ sub(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r3 to r8.
- __ PrepareCallCFunction(6, r8);
+ __ PrepareCallCFunction(5, r8);
__ li(r3, Operand::Zero());
Label context_check;
__ LoadU64(r4,
@@ -3296,14 +3666,13 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(r4, Operand(static_cast<int>(deopt_kind)));
- // r5: bailout id already loaded.
- // r6: code address or 0 already loaded.
- // r7: Fp-to-sp delta.
- __ Move(r8, ExternalReference::isolate_address(isolate));
+ // r5: code address or 0 already loaded.
+ // r6: Fp-to-sp delta already loaded.
+ __ Move(r7, ExternalReference::isolate_address(isolate));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register r3 and get the input
@@ -3466,10 +3835,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3477,19 +3842,19 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
- __ bkpt(0);
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
- __ bkpt(0);
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2800112.
- __ bkpt(0);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
#undef __
diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq
index eec333f4ce..b502eabf05 100644
--- a/deps/v8/src/builtins/promise-constructor.tq
+++ b/deps/v8/src/builtins/promise-constructor.tq
@@ -50,7 +50,7 @@ PromiseConstructor(
newTarget: JSAny)(executor: JSAny): JSAny {
// 1. If NewTarget is undefined, throw a TypeError exception.
if (newTarget == Undefined) {
- ThrowTypeError(MessageTemplate::kNotAPromise, newTarget);
+ ThrowTypeError(MessageTemplate::kPromiseNewTargetUndefined);
}
// 2. If IsCallable(executor) is false, throw a TypeError exception.
@@ -60,11 +60,11 @@ PromiseConstructor(
const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX);
- // Silently fail if the stack looks fishy.
+ // Throw no access type error if the stack looks fishy.
if (HasAccessCheckFailed(context, promiseFun, executor)) {
IncrementUseCounter(
context, SmiConstant(kPromiseConstructorReturnedUndefined));
- return Undefined;
+ runtime::ThrowNoAccess();
}
let result: JSPromise;
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index df3010669e..199fc31319 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -9,6 +9,8 @@ namespace runtime {
extern transitioning runtime
AllowDynamicFunction(implicit context: Context)(JSAny): JSAny;
+extern transitioning runtime ThrowNoAccess(implicit context: Context)(): never;
+
extern transitioning runtime
ReportMessageFromMicrotask(implicit context: Context)(JSAny): JSAny;
}
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index fd0976e1c1..d6fb8d279a 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -897,15 +897,15 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
MacroAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ Branch(&no_match, ne, actual_marker,
- Operand(static_cast<int>(expected_marker)), Label::Distance::kNear);
+ __ Branch(&no_match, ne, actual_state,
+ Operand(static_cast<int>(expected_state)), Label::Distance::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -958,34 +958,29 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a int32 containing a non-zero optimization
+ // -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
ASM_CODE_COMMENT(masm);
- DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
+ DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ stop();
- }
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ stop();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1071,10 +1066,10 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector));
UseScratchRegisterScope temps(masm);
@@ -1083,8 +1078,8 @@ static void LoadOptimizationStateAndJumpIfNeedsProcessing(
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ And(
scratch, optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1100,24 +1095,37 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ And(scratch, optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedMarker));
+ Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg),
Label::Distance::kNear);
}
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ LoadAnyTaggedField(
- optimization_marker,
+ tiering_state,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.Acquire(),
temps.Acquire());
}
+namespace {
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
+ Register bytecode_array) {
+ // Reset code age and the OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Sw(zero_reg,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
+}
+
+} // namespace
+
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
@@ -1139,11 +1147,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Operand(FEEDBACK_VECTOR_TYPE));
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check for an tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = temps.Acquire();
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
// Increment invocation count for the function.
{
@@ -1177,19 +1185,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
- Register bytecodeArray = descriptor.GetRegisterParameter(
+ Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ Sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrLoopNestingLevelOffset));
-
- __ Push(argc, bytecodeArray);
+ ResetBytecodeAgeAndOsrState(masm, bytecode_array);
+ __ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
@@ -1233,7 +1232,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429): Document this frame setup better.
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
@@ -1315,17 +1314,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label::Distance::kNear);
// Read off the optimization state in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
+ // is optimized code or an tiering state, call that instead.
Register optimization_state = a4;
__ Lw(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty or has a optimization marker.
- Label has_optimized_code_or_marker;
+ // Check if the optimized code slot is not empty or has a tiering state.
+ Label has_optimized_code_or_state;
__ And(scratch, optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
- __ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
+ __ Branch(&has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1344,14 +1343,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1474,7 +1466,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&is_baseline);
@@ -1495,10 +1487,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check for an tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ Move(a2, kInterpreterBytecodeArrayRegister);
@@ -1879,7 +1871,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
@@ -3516,18 +3508,17 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
- __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register a4.
- __ Move(a3, ra);
- __ Add64(a4, sp, Operand(kSavedRegistersAreaSize));
+ __ Move(a2, ra);
+ __ Add64(a3, sp, Operand(kSavedRegistersAreaSize));
- __ Sub64(a4, fp, a4);
+ __ Sub64(a3, fp, a3);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, a5);
- // Pass six arguments, according to n64 ABI.
+ __ PrepareCallCFunction(5, a4);
+ // Pass five arguments, according to n64 ABI.
__ Move(a0, zero_reg);
Label context_check;
__ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
@@ -3535,15 +3526,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(static_cast<int64_t>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- // a4: already has fp-to-sp delta.
- __ li(a5, ExternalReference::isolate_address(isolate));
+ // a2: code object address
+ // a3: fp-to-sp delta
+ __ li(a4, ExternalReference::isolate_address(isolate));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register a0 and get the input
@@ -3674,10 +3664,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -3807,13 +3793,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 60c3f60e66..afaae01b2a 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -83,6 +83,18 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
__ Ret();
}
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
+ Register scratch) {
+ // Reset the bytecode age and OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ mov(r0, Operand(0));
+ __ StoreU32(r0,
+ FieldMemOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
+ scratch);
+}
+
// Restarts execution either at the current or next (in execution order)
// bytecode. If there is baseline code on the shared function info, converts an
// interpreter frame into a baseline frame and continues execution in baseline
@@ -204,11 +216,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
- Register scratch = r1;
- __ mov(scratch, Operand(0));
- __ StoreU16(scratch,
- FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset));
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
+ // disarm Sparkplug here.
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -240,22 +250,27 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ b(&start);
}
-void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
+enum class OsrSourceTier {
+ kInterpreter,
+ kBaseline,
+};
+
+void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source) {
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
// If the code object is null, just return to the caller.
- Label skip;
+ Label jump_to_returned_code;
__ CmpSmiLiteral(r2, Smi::zero(), r0);
- __ bne(&skip);
+ __ bne(&jump_to_returned_code);
__ Ret();
- __ bind(&skip);
+ __ bind(&jump_to_returned_code);
- if (is_interpreter) {
+ if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
@@ -1144,13 +1159,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
Label no_match;
- __ CmpS64(actual_marker, Operand(static_cast<int>(expected_marker)));
+ __ CmpS64(actual_state, Operand(static_cast<int>(expected_state)));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -1200,30 +1215,25 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- r2 : actual argument count
// -- r5 : new target (preserved for callee if needed, and caller)
// -- r3 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a int32 containing a non-zero optimization
+ // -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ stop();
- }
+ DCHECK(!AreAliased(feedback_vector, r3, r5, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ stop();
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1312,39 +1322,39 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
- __ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
- r0);
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kTieringStateIsAnyRequestMask, r0);
__ beq(&maybe_has_optimized_code);
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ LoadAnyTaggedField(
- optimization_marker,
+ tiering_state,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
- USE(LoadOptimizationStateAndJumpIfNeedsProcessing);
+ USE(LoadTieringStateAndJumpIfNeedsProcessing);
DCHECK(!AreAliased(optimization_state, feedback_vector));
__ LoadU32(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- CHECK(
- is_uint16(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ CHECK(is_uint16(
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
__ tmll(
optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ b(Condition(7), has_optimized_code_or_marker);
+ Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ b(Condition(7), has_optimized_code_or_state);
}
#if ENABLE_SPARKPLUG
@@ -1373,13 +1383,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check for an tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = r9;
{
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
}
// Increment invocation count for the function.
@@ -1415,20 +1425,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by writing
- // a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- {
- Register scratch = r0;
- __ mov(scratch, Operand(0));
- __ StoreU16(scratch,
- FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrLoopNestingLevelOffset));
- }
+ ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r1);
__ Push(argc, bytecodeArray);
@@ -1466,7 +1463,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
@@ -1556,12 +1553,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadS32(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty or has a optimization marker.
- Label has_optimized_code_or_marker;
- __ TestBitMask(optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
- r0);
- __ bne(&has_optimized_code_or_marker);
+ // Check if the optimized code slot is not empty or has a tiering state.
+ Label has_optimized_code_or_state;
+ __ TestBitMask(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask, r0);
+ __ bne(&has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
@@ -1580,17 +1577,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ mov(r1, Operand(0));
- __ StoreU16(r1,
- FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset),
- r0);
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@@ -1716,7 +1703,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
@@ -1738,10 +1725,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &install_baseline_code);
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check for an tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ mov(r4, kInterpreterBytecodeArrayRegister);
@@ -2917,7 +2904,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
fp_regs.Count());
__ MultiPush(gp_regs);
- __ MultiPushF64OrV128(fp_regs);
+ __ MultiPushF64OrV128(fp_regs, ip);
// Pass instance and function index as explicit arguments to the runtime
// function.
@@ -2930,7 +2917,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ mov(ip, r2);
// Restore registers.
- __ MultiPopF64OrV128(fp_regs);
+ __ MultiPopF64OrV128(fp_regs, ip);
__ MultiPop(gp_regs);
}
// Finally, jump to the entrypoint.
@@ -2945,7 +2932,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
// Save all parameter registers. They might hold live values, we restore
// them after the runtime call.
__ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
- __ MultiPushF64OrV128(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPushF64OrV128(WasmDebugBreakFrameConstants::kPushedFpRegs, ip);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
@@ -2953,7 +2940,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
__ CallRuntime(Runtime::kWasmDebugBreak, 0);
// Restore registers.
- __ MultiPopF64OrV128(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPopF64OrV128(WasmDebugBreakFrameConstants::kPushedFpRegs, ip);
__ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
}
__ Ret();
@@ -3656,19 +3643,18 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
static constexpr int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
- __ mov(r4, Operand(Deoptimizer::kFixedExitSizeMarker));
// Cleanse the Return address for 31-bit
__ CleanseP(r14);
// Get the address of the location in the code object (r5)(return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r6.
- __ mov(r5, r14);
- __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
- __ SubS64(r6, fp, r6);
+ __ mov(r4, r14);
+ __ la(r5, MemOperand(sp, kSavedRegistersAreaSize));
+ __ SubS64(r5, fp, r5);
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
- __ PrepareCallCFunction(6, r7);
+ __ PrepareCallCFunction(5, r7);
__ mov(r2, Operand::Zero());
Label context_check;
__ LoadU64(r3,
@@ -3677,18 +3663,17 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ LoadU64(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(r3, Operand(static_cast<int>(deopt_kind)));
- // r4: bailout id already loaded.
- // r5: code address or 0 already loaded.
- // r6: Fp-to-sp delta.
+ // r4: code address or 0 already loaded.
+ // r5: Fp-to-sp delta already loaded.
// Parm6: isolate is passed on the stack.
- __ Move(r7, ExternalReference::isolate_address(isolate));
- __ StoreU64(r7,
+ __ Move(r6, ExternalReference::isolate_address(isolate));
+ __ StoreU64(r6,
MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve "deoptimizer" object in register r2 and get the input
@@ -3847,23 +3832,19 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- return OnStackReplacement(masm, true);
+ OnStackReplacement(masm, OsrSourceTier::kInterpreter);
}
#if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
__ LoadU64(kContextRegister,
MemOperand(fp, BaselineFrameConstants::kContextOffset));
- return OnStackReplacement(masm, false);
+ OnStackReplacement(masm, OsrSourceTier::kBaseline);
}
#endif
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 2d43be0a0f..7bbe48d2e4 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -77,8 +77,8 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
transitioning macro TypedArrayInitialize(implicit context: Context)(
initialize: constexpr bool, map: Map, length: uintptr,
- elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): JSTypedArray labels IfRangeError {
+ elementsInfo: typed_array::TypedArrayElementsInfo):
+ JSTypedArray labels IfRangeError {
const byteLength = elementsInfo.CalculateByteLength(length)
otherwise IfRangeError;
const byteLengthNum = Convert<Number>(byteLength);
@@ -86,11 +86,6 @@ transitioning macro TypedArrayInitialize(implicit context: Context)(
const byteOffset: uintptr = 0;
try {
- if (bufferConstructor != defaultConstructor) {
- goto AttachOffHeapBuffer(ConstructWithTarget(
- defaultConstructor, bufferConstructor, byteLengthNum));
- }
-
if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap;
const buffer = AllocateEmptyOnHeapBuffer();
@@ -131,10 +126,8 @@ transitioning macro ConstructByLength(implicit context: Context)(
elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
try {
const length: uintptr = ToIndex(lengthObj) otherwise RangeError;
- const defaultConstructor: Constructor = GetArrayBufferFunction();
const initialize: constexpr bool = true;
- return TypedArrayInitialize(
- initialize, map, length, elementsInfo, defaultConstructor)
+ return TypedArrayInitialize(initialize, map, length, elementsInfo)
otherwise RangeError;
} label RangeError deferred {
ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, lengthObj);
@@ -145,12 +138,11 @@ transitioning macro ConstructByLength(implicit context: Context)(
// ES #sec-typedarray-object
transitioning macro ConstructByArrayLike(implicit context: Context)(
map: Map, arrayLike: HeapObject, length: uintptr,
- elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): JSTypedArray {
+ elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
try {
const initialize: constexpr bool = false;
- const typedArray = TypedArrayInitialize(
- initialize, map, length, elementsInfo, bufferConstructor)
+ const typedArray =
+ TypedArrayInitialize(initialize, map, length, elementsInfo)
otherwise RangeError;
try {
@@ -198,21 +190,18 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
// ES #sec-typedarray-object
transitioning macro ConstructByIterable(implicit context: Context)(
iterable: JSReceiver, iteratorFn: Callable): never
- labels IfConstructByArrayLike(JSArray, uintptr, JSReceiver) {
+ labels IfConstructByArrayLike(JSArray, uintptr) {
const array: JSArray =
IterableToListMayPreserveHoles(context, iterable, iteratorFn);
// Max JSArray length is a valid JSTypedArray length so we just use it.
- goto IfConstructByArrayLike(
- array, array.length_uintptr, GetArrayBufferFunction());
+ goto IfConstructByArrayLike(array, array.length_uintptr);
}
// 22.2.4.3 TypedArray ( typedArray )
// ES #sec-typedarray-typedarray
transitioning macro ConstructByTypedArray(implicit context: Context)(
srcTypedArray: JSTypedArray): never
- labels IfConstructByArrayLike(JSTypedArray, uintptr, JSReceiver) {
- let bufferConstructor: JSReceiver = GetArrayBufferFunction();
- const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
+ labels IfConstructByArrayLike(JSTypedArray, uintptr) {
let length: uintptr;
try {
// TODO(petermarshall): Throw on detached typedArray.
@@ -222,20 +211,7 @@ transitioning macro ConstructByTypedArray(implicit context: Context)(
length = 0;
}
- // The spec requires that constructing a typed array using a SAB-backed
- // typed array use the ArrayBuffer constructor, not the species constructor.
- // See https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
- if (!IsSharedArrayBuffer(srcBuffer)) {
- bufferConstructor = SpeciesConstructor(srcBuffer, bufferConstructor);
- try {
- // TODO(petermarshall): Throw on detached typedArray.
- length = LoadJSTypedArrayLengthAndCheckDetached(srcTypedArray)
- otherwise DetachedOrOutOfBounds;
- } label DetachedOrOutOfBounds {
- length = 0;
- }
- }
- goto IfConstructByArrayLike(srcTypedArray, length, bufferConstructor);
+ goto IfConstructByArrayLike(srcTypedArray, length);
}
// 22.2.4.5 TypedArray ( buffer, byteOffset, length )
@@ -369,7 +345,7 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)(
transitioning macro ConstructByJSReceiver(implicit context: Context)(
obj: JSReceiver): never
- labels IfConstructByArrayLike(JSReceiver, uintptr, JSReceiver) {
+ labels IfConstructByArrayLike(JSReceiver, uintptr) {
try {
// TODO(v8:8906): Use iterator::GetIteratorMethod() once it supports
// labels.
@@ -385,7 +361,7 @@ transitioning macro ConstructByJSReceiver(implicit context: Context)(
// anyway.
const length: uintptr = ChangeSafeIntegerNumberToUintPtr(lengthNumber)
otherwise goto IfInvalidLength(lengthNumber);
- goto IfConstructByArrayLike(obj, length, GetArrayBufferFunction());
+ goto IfConstructByArrayLike(obj, length);
} label IfInvalidLength(length: Number) {
ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, length);
} label IfIteratorNotCallable(_value: JSAny) deferred {
@@ -428,14 +404,12 @@ transitioning builtin CreateTypedArray(
const elementsInfo = GetTypedArrayElementsInfo(map);
return ConstructByLength(map, length, elementsInfo);
- } label IfConstructByArrayLike(
- arrayLike: JSReceiver, length: uintptr, bufferConstructor: JSReceiver) {
+ } label IfConstructByArrayLike(arrayLike: JSReceiver, length: uintptr) {
const map = GetDerivedMap(target, newTarget);
// 5. Let elementSize be the Number value of the Element Size value in Table
// 56 for constructorName.
const elementsInfo = GetTypedArrayElementsInfo(map);
- return ConstructByArrayLike(
- map, arrayLike, length, elementsInfo, bufferConstructor);
+ return ConstructByArrayLike(map, arrayLike, length, elementsInfo);
}
}
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index aa9966bade..60e62865eb 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -114,18 +114,16 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
target: JSTypedArray, targetLength: uintptr, arrayArg: JSAny,
targetOffset: uintptr,
targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds {
- // Steps 3-7 are not observable, do them later.
-
- // 8. Let src be ? ToObject(source).
+ // 4. Let src be ? ToObject(source).
const src: JSReceiver = ToObject_Inline(context, arrayArg);
- // 9. Let srcLength be ? LengthOfArrayLike(src).
+ // 5. Let srcLength be ? LengthOfArrayLike(src).
const srcLengthNum: Number = GetLengthProperty(src);
- // 10. If targetOffset is +∞, throw a RangeError exception.
+ // 6. If targetOffset is +∞, throw a RangeError exception.
if (targetOffsetOverflowed) goto IfOffsetOutOfBounds;
- // 11. If srcLength + targetOffset > targetLength, throw a RangeError
+ // 7. If srcLength + targetOffset > targetLength, throw a RangeError
// exception.
const srcLength = ChangeSafeIntegerNumberToUintPtr(srcLengthNum)
otherwise IfOffsetOutOfBounds;
@@ -136,12 +134,6 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
// to do with the empty source array.
if (srcLength == 0) return;
- // 4. Let targetName be the String value of target.[[TypedArrayName]].
- // 5. Let targetElementSize be the Element Size value specified in
- // Table 69 for targetName.
- // 6. Let targetType be the Element Type value in Table 69 for
- // targetName.
-
try {
// BigInt typed arrays are not handled by
// CopyFastNumberJSArrayElementsToTypedArray.
diff --git a/deps/v8/src/builtins/typed-array-sort.tq b/deps/v8/src/builtins/typed-array-sort.tq
index 868af426b5..37760ccb5c 100644
--- a/deps/v8/src/builtins/typed-array-sort.tq
+++ b/deps/v8/src/builtins/typed-array-sort.tq
@@ -15,25 +15,10 @@ transitioning macro CallCompare(
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
const v: Number = ToNumber_Inline(Call(context, comparefn, Undefined, a, b));
- // b. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- // c. Let getBufferByteLength be
- // MakeIdempotentArrayBufferByteLengthGetter(SeqCst).
- // d. If IsIntegerIndexedObjectOutOfBounds(obj, getBufferByteLength) is true,
- // throw a TypeError exception.
- // TODO(v8:11111): Update this, depending on how
- // https://github.com/tc39/ecma262/pull/2646#issuecomment-1067456576 gets
- // resolved.
- try {
- LoadJSTypedArrayLengthAndCheckDetached(array)
- otherwise DetachedOrOutOfBounds;
- } label DetachedOrOutOfBounds {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSort);
- }
-
- // e. If v is NaN, return +0.
+ // b. If v is NaN, return +0.
if (NumberIsNaN(v)) return 0;
- // f. return v.
+ // c. return v.
return v;
}
@@ -149,17 +134,17 @@ transitioning javascript builtin TypedArrayPrototypeSort(
TypedArrayMergeSort(work2, 0, len, work1, array, comparefn);
- // Reload the length; it's possible the backing ArrayBuffer has been resized.
- // It cannot be OOB here though, since we've checked it as part of the
- // comparison function.
-
- // TODO(v8:11111): Update this, depending on how
- // https://github.com/tc39/ecma262/pull/2646#issuecomment-1067456576 gets
- // resolved.
- const newLen =
- LoadJSTypedArrayLengthAndCheckDetached(array) otherwise unreachable;
- if (newLen < len) {
- len = newLen;
+ // Reload the length; it's possible the backing ArrayBuffer has been resized
+ // to be OOB or detached, in which case treat it as length 0.
+
+ try {
+ const newLen = LoadJSTypedArrayLengthAndCheckDetached(array)
+ otherwise DetachedOrOutOfBounds;
+ if (newLen < len) {
+ len = newLen;
+ }
+ } label DetachedOrOutOfBounds {
+ len = 0;
}
// work1 contains the sorted numbers. Write them back.
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index 0c0bfea7e3..b8b8ebe6eb 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -491,8 +491,8 @@ macro GetTargetAndInstance(funcref: WasmInternalFunction): TargetAndInstance {
// Vector format:
// Two slots per call_ref instruction. These slots' values can be:
-// - uninitialized: (undefined, <unused>). Note: we use {undefined} as the
-// sentinel as an optimization, as it's the default value for FixedArrays.
+// - uninitialized: (0, <unused>). Note: we use {0} as the sentinel because
+// it also works as default for vector slots used as counts.
// - monomorphic: (funcref, count (smi)). The second slot is a counter for how
// often the funcref in the first slot has been seen.
// - polymorphic: (fixed_array, <unused>). In this case, the array
@@ -526,7 +526,8 @@ builtin CallRefIC(
// All other cases are some sort of miss and must compute the target/
// instance. They all fall through to returning the computed data.
const result = GetTargetAndInstance(funcref);
- if (TaggedEqual(value, Undefined)) {
+ if (TaggedEqual(value, SmiConstant(0))) {
+ // Was uninitialized.
vector.objects[index] = funcref;
vector.objects[index + 1] = SmiConstant(1);
} else if (Is<FixedArray>(value)) {
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 342660d21d..a59143273c 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -927,51 +927,45 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
TurboAssembler::kCountIncludesReceiver);
}
-// Tail-call |function_id| if |actual_marker| == |expected_marker|
-static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register actual_marker,
- OptimizationMarker expected_marker,
- Runtime::FunctionId function_id) {
+// Tail-call |function_id| if |actual_state| == |expected_state|
+static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
+ Register actual_state,
+ TieringState expected_state,
+ Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ Cmp(actual_marker, static_cast<int>(expected_marker));
+ __ Cmp(actual_state, static_cast<int>(expected_state));
__ j(not_equal, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
- Register optimization_marker) {
+ Register tiering_state) {
// ----------- S t a t e -------------
// -- rax : actual argument count
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
- DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
-
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileMaglev_NotConcurrent,
- Runtime::kCompileMaglev_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileMaglev_Concurrent,
- Runtime::kCompileMaglev_Concurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_NotConcurrent,
- Runtime::kCompileTurbofan_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
- OptimizationMarker::kCompileTurbofan_Concurrent,
- Runtime::kCompileTurbofan_Concurrent);
-
- // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
- // InOptimizationQueue and None shouldn't reach here.
- if (FLAG_debug_code) {
- __ int3();
- }
+ DCHECK(!AreAliased(feedback_vector, rdx, rdi, tiering_state));
+
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestMaglev_Synchronous,
+ Runtime::kCompileMaglev_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestMaglev_Concurrent,
+ Runtime::kCompileMaglev_Concurrent);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Synchronous,
+ Runtime::kCompileTurbofan_Synchronous);
+ TailCallRuntimeIfStateEquals(masm, tiering_state,
+ TieringState::kRequestTurbofan_Concurrent,
+ Runtime::kCompileTurbofan_Concurrent);
+
+ __ int3();
}
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
@@ -1112,17 +1106,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
}
// Read off the optimization state in the feedback vector and check if there
-// is optimized code or a optimization marker that needs to be processed.
-static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+// is optimized code or a tiering state that needs to be processed.
+static void LoadTieringStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
- Label* has_optimized_code_or_marker) {
+ Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(masm);
__ movl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ testl(
optimization_state,
- Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ j(not_zero, has_optimized_code_or_marker);
+ Immediate(
+ FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
+ __ j(not_zero, has_optimized_code_or_state);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
@@ -1132,12 +1127,12 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code;
__ testl(optimization_state,
- Immediate(FeedbackVector::kHasCompileOptimizedMarker));
+ Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
__ j(zero, &maybe_has_optimized_code);
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ Register tiering_state = optimization_state;
+ __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
+ MaybeOptimizeCode(masm, feedback_vector, tiering_state);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
@@ -1149,6 +1144,20 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
jump_mode);
}
+namespace {
+
+void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
+ Register bytecode_array) {
+ // Reset the bytecode age and OSR state (optimized to a single write).
+ static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ movl(FieldOperand(bytecode_array,
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
+ Immediate(0));
+}
+
+} // namespace
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1200,11 +1209,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
+ // Check the tiering state.
+ Label has_optimized_code_or_state;
Register optimization_state = rcx;
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+ LoadTieringStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
Label not_optimized;
__ bind(&not_optimized);
@@ -1224,15 +1233,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kJavaScriptCallTargetRegister); // Callee's JS function.
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
- // 8-bit fields next to each other, so we could just optimize by writing a
- // 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset),
- Immediate(0));
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ Move(kInterpreterBytecodeOffsetRegister,
@@ -1356,7 +1357,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector, closure);
@@ -1376,10 +1377,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &install_baseline_code);
- // Check for an optimization marker.
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector,
- &has_optimized_code_or_marker);
+ // Check the tiering state.
+ LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
+ feedback_vector,
+ &has_optimized_code_or_state);
// Load the baseline code into the closure.
__ Move(rcx, kInterpreterBytecodeArrayRegister);
@@ -1704,10 +1705,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
}
- // Check for an optimization marker.
- Label has_optimized_code_or_marker;
- LoadOptimizationStateAndJumpIfNeedsProcessing(
- masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+ // Check the tiering state.
+ Label has_optimized_code_or_state;
+ LoadTieringStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
// Increment invocation count for the function.
__ incl(
@@ -1738,16 +1739,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// onto the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
-
- // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
- // are 8-bit fields next to each other, so we could just optimize by
- // writing a 16-bit. These static asserts guard our assumption is valid.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
- STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ movw(FieldOperand(bytecode_array,
- BytecodeArray::kOsrLoopNestingLevelOffset),
- Immediate(0));
+ ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
@@ -1785,7 +1777,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret();
- __ bind(&has_optimized_code_or_marker);
+ __ bind(&has_optimized_code_or_state);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the return address, rebalancing the return stack buffer by using
@@ -2732,21 +2724,26 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
__ ret(0);
}
-void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
+enum class OsrSourceTier {
+ kInterpreter,
+ kBaseline,
+};
+
+void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ __ CallRuntime(Runtime::kCompileOptimizedOSR);
}
- Label skip;
+ Label jump_to_returned_code;
// If the code object is null, just return to the caller.
__ testq(rax, rax);
- __ j(not_equal, &skip, Label::kNear);
+ __ j(not_equal, &jump_to_returned_code, Label::kNear);
__ ret(0);
- __ bind(&skip);
+ __ bind(&jump_to_returned_code);
- if (is_interpreter) {
+ if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ leave();
@@ -2774,13 +2771,13 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
} // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- return OnStackReplacement(masm, true);
+ OnStackReplacement(masm, OsrSourceTier::kInterpreter);
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
__ movq(kContextRegister,
MemOperand(rbp, BaselineFrameConstants::kContextOffset));
- return OnStackReplacement(masm, false);
+ OnStackReplacement(masm, OsrSourceTier::kBaseline);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3415,6 +3412,8 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// reference parameters at the end of the integer parameters section.
Label ref_params_done;
// We check if we have seen a reference in the first parameter loop.
+ Register ref_param_count = param_count;
+ __ movq(ref_param_count, Immediate(0));
__ cmpq(MemOperand(rbp, kHasRefTypesOffset), Immediate(0));
__ j(equal, &ref_params_done);
// We re-calculate the beginning of the value-types array and the beginning of
@@ -3453,6 +3452,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// Place the param into the proper slot in Integer section.
__ bind(&move_ref_to_slot);
+ __ addq(ref_param_count, Immediate(1));
__ movq(MemOperand(current_int_param_slot, 0), param);
__ subq(current_int_param_slot, Immediate(kSystemPointerSize));
@@ -3466,6 +3466,9 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
__ j(not_equal, &ref_loop_through_params);
__ bind(&ref_params_done);
+ __ movq(valuetype, ref_param_count);
+ ref_param_count = valuetype;
+ valuetype = no_reg;
// -------------------------------------------
// Move the parameters into the proper param registers.
// -------------------------------------------
@@ -3542,6 +3545,22 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// params that didn't fit into param registers are pushed again.
Label loop_through_valuetypes;
+ Label loop_place_ref_params;
+ __ bind(&loop_place_ref_params);
+ __ testq(ref_param_count, ref_param_count);
+ __ j(zero, &loop_through_valuetypes);
+
+ __ cmpq(start_int_section, current_int_param_slot);
+ // if no int or ref param remains, directly iterate valuetypes
+ __ j(less_equal, &loop_through_valuetypes);
+
+ __ pushq(MemOperand(current_int_param_slot, 0));
+ __ addq(current_int_param_slot, Immediate(kSystemPointerSize));
+ __ subq(ref_param_count, Immediate(1));
+ __ jmp(&loop_place_ref_params);
+
+ valuetype = ref_param_count;
+ ref_param_count = no_reg;
__ bind(&loop_through_valuetypes);
// We iterated through the valuetypes array, we are one field over the end in
@@ -3577,6 +3596,10 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
__ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
__ j(equal, &place_float_param);
+ // ref params have already been pushed, so go through directly
+ __ addq(current_int_param_slot, Immediate(kSystemPointerSize));
+ __ jmp(&loop_through_valuetypes);
+
// All other types are reference types. We can just fall through to place them
// in the integer section.
@@ -4041,6 +4064,7 @@ void Builtins::Generate_WasmResume(MacroAssembler* masm) {
__ EnterFrame(StackFrame::STACK_SWITCH);
Register param_count = rax;
+ __ decq(param_count); // Exclude receiver.
Register closure = kJSFunctionRegister; // rdi
// These slots are not used in this builtin. But when we return from the
@@ -4817,22 +4841,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
rbp);
- // We use this to keep the value of the fifth argument temporarily.
- // Unfortunately we can't store it directly in r8 (used for passing
- // this on linux), since it is another parameter passing register on windows.
- Register arg5 = r15;
-
- __ Move(arg_reg_3, Deoptimizer::kFixedExitSizeMarker);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- __ movq(arg_reg_4, Operand(rsp, kCurrentOffsetToReturnAddress));
+ __ movq(arg_reg_3, Operand(rsp, kCurrentOffsetToReturnAddress));
// Load the fp-to-sp-delta.
- __ leaq(arg5, Operand(rsp, kCurrentOffsetToParentSP));
- __ subq(arg5, rbp);
- __ negq(arg5);
+ __ leaq(arg_reg_4, Operand(rsp, kCurrentOffsetToParentSP));
+ __ subq(arg_reg_4, rbp);
+ __ negq(arg_reg_4);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6);
+ __ PrepareCallCFunction(5);
__ Move(rax, 0);
Label context_check;
__ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
@@ -4844,19 +4862,19 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
- // has created space for this). On linux pass the arguments in r8 and r9.
+ // has created space for this). On linux pass the arguments in r8.
#ifdef V8_TARGET_OS_WIN
- __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
+ Register arg5 = r15;
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
- __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
+ __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
#else
- __ movq(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
+ // r8 is arg_reg_5 on Linux
+ __ LoadAddress(r8, ExternalReference::isolate_address(isolate));
#endif
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
@@ -4985,10 +5003,6 @@ void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
-void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
- Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
-}
-
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
@@ -5113,12 +5127,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ popq(kInterpreterAccumulatorRegister);
if (is_osr) {
- // Reset the OSR loop nesting depth to disarm back edges.
- // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
- // Sparkplug here.
- __ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrLoopNestingLevelOffset),
- Immediate(0));
+ // TODO(pthier): Separate Sparkplug and Turbofan OSR states.
+ ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj);
} else {
__ jmp(code_obj);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 0c7df90dbf..bae50a2a5c 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2648,9 +2648,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
// The above code must not emit constants either.
DCHECK(!has_pending_constants());
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 48b8a5f06a..803afc367d 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -1041,13 +1041,13 @@ void TurboAssembler::InitializeRootRegister() {
#endif
}
-void MacroAssembler::SmiTag(Register dst, Register src) {
+void TurboAssembler::SmiTag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Lsl(dst, src, kSmiShift);
}
-void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+void TurboAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
void TurboAssembler::SmiUntag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
@@ -1241,58 +1241,6 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
#endif
}
-template <TurboAssembler::StoreLRMode lr_mode>
-void TurboAssembler::PushCPURegList(CPURegList registers) {
- DCHECK_IMPLIES((lr_mode == kDontStoreLR), !registers.IncludesAliasOf(lr));
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- if (lr_mode == kSignLR && registers.IncludesAliasOf(lr)) {
- Pacibsp();
- }
-#endif
-
- int size = registers.RegisterSizeInBytes();
- DCHECK_EQ(0, (size * registers.Count()) % 16);
-
- // Push up to four registers at a time.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& src0 = registers.PopHighestIndex();
- const CPURegister& src1 = registers.PopHighestIndex();
- const CPURegister& src2 = registers.PopHighestIndex();
- const CPURegister& src3 = registers.PopHighestIndex();
- int count = count_before - registers.Count();
- PushHelper(count, size, src0, src1, src2, src3);
- }
-}
-
-template <TurboAssembler::LoadLRMode lr_mode>
-void TurboAssembler::PopCPURegList(CPURegList registers) {
- int size = registers.RegisterSizeInBytes();
- DCHECK_EQ(0, (size * registers.Count()) % 16);
-
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- bool contains_lr = registers.IncludesAliasOf(lr);
- DCHECK_IMPLIES((lr_mode == kDontLoadLR), !contains_lr);
-#endif
-
- // Pop up to four registers at a time.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& dst0 = registers.PopLowestIndex();
- const CPURegister& dst1 = registers.PopLowestIndex();
- const CPURegister& dst2 = registers.PopLowestIndex();
- const CPURegister& dst3 = registers.PopLowestIndex();
- int count = count_before - registers.Count();
- PopHelper(count, size, dst0, dst1, dst2, dst3);
- }
-
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- if (lr_mode == kAuthLR && contains_lr) {
- Autibsp();
- }
-#endif
-}
-
void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 191eb4bd20..552425edd4 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -56,6 +56,46 @@ constexpr int kStackSavedSavedFPSizeInBits = kDRegSizeInBits;
} // namespace
+void TurboAssembler::PushCPURegList(CPURegList registers) {
+ // If LR was stored here, we would need to sign it if
+ // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
+ DCHECK(!registers.IncludesAliasOf(lr));
+
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+ // Push up to four registers at a time.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+void TurboAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+ // If LR was loaded here, we would need to authenticate it if
+ // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
+ DCHECK(!registers.IncludesAliasOf(lr));
+
+ // Pop up to four registers at a time.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+}
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
auto list = kCallerSaved;
@@ -79,7 +119,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
list.Remove(exclusion);
list.Align();
- PushCPURegList<kDontStoreLR>(list);
+ PushCPURegList(list);
int bytes = list.TotalSizeInBytes();
@@ -106,7 +146,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
list.Remove(exclusion);
list.Align();
- PopCPURegList<kDontLoadLR>(list);
+ PopCPURegList(list);
bytes += list.TotalSizeInBytes();
return bytes;
@@ -2219,9 +2259,8 @@ void TurboAssembler::CallForDeoptimization(
BlockPoolsScope scope(this);
bl(jump_deoptimization_entry_label);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
@@ -3575,7 +3614,7 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
// Preserve all caller-saved registers as well as NZCV.
// PushCPURegList asserts that the size of each list is a multiple of 16
// bytes.
- PushCPURegList<kDontStoreLR>(saved_registers);
+ PushCPURegList(saved_registers);
PushCPURegList(kCallerSavedV);
// We can use caller-saved registers as scratch values (except for argN).
@@ -3628,7 +3667,7 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
}
PopCPURegList(kCallerSavedV);
- PopCPURegList<kDontLoadLR>(saved_registers);
+ PopCPURegList(saved_registers);
TmpList()->set_bits(old_tmp_list);
FPTmpList()->set_bits(old_fp_tmp_list);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 022d84cb60..ab56bba202 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -557,6 +557,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
+ inline void SmiTag(Register dst, Register src);
+ inline void SmiTag(Register smi);
+
inline void SmiToInt32(Register smi);
// Calls Abort(msg) if the condition cond is not satisfied.
@@ -862,15 +865,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// kSRegSizeInBits are supported.
//
// Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
- //
- // The methods take an optional LoadLRMode or StoreLRMode template argument.
- // When control flow integrity measures are enabled and the link register is
- // included in 'registers', passing kSignLR to PushCPURegList will sign the
- // link register before pushing the list, and passing kAuthLR to
- // PopCPURegList will authenticate it after popping the list.
- template <StoreLRMode lr_mode = kDontStoreLR>
void PushCPURegList(CPURegList registers);
- template <LoadLRMode lr_mode = kDontLoadLR>
void PopCPURegList(CPURegList registers);
// Calculate how much stack space (in bytes) are required to store caller
@@ -1752,31 +1747,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
tbx(vd, vn, vn2, vn3, vn4, vm);
}
- // For the 'lr_mode' template argument of the following methods, see
- // PushCPURegList/PopCPURegList.
- template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushSizeRegList(RegList registers, unsigned reg_size) {
- PushCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PushCPURegList(CPURegList(reg_size, registers));
}
- template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushSizeRegList(DoubleRegList registers, unsigned reg_size) {
- PushCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PushCPURegList(CPURegList(reg_size, registers));
}
- template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopSizeRegList(RegList registers, unsigned reg_size) {
- PopCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PopCPURegList(CPURegList(reg_size, registers));
}
- template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopSizeRegList(DoubleRegList registers, unsigned reg_size) {
- PopCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PopCPURegList(CPURegList(reg_size, registers));
}
- template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushXRegList(RegList regs) {
- PushSizeRegList<lr_mode>(regs, kXRegSizeInBits);
+ PushSizeRegList(regs, kXRegSizeInBits);
}
- template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopXRegList(RegList regs) {
- PopSizeRegList<lr_mode>(regs, kXRegSizeInBits);
+ PopSizeRegList(regs, kXRegSizeInBits);
}
inline void PushWRegList(RegList regs) {
PushSizeRegList(regs, kWRegSizeInBits);
@@ -1855,9 +1842,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---- SMI and Number Utilities ----
- inline void SmiTag(Register dst, Register src);
- inline void SmiTag(Register smi);
-
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
diff --git a/deps/v8/src/codegen/arm64/reglist-arm64.h b/deps/v8/src/codegen/arm64/reglist-arm64.h
index 9f29589098..fdc14391c8 100644
--- a/deps/v8/src/codegen/arm64/reglist-arm64.h
+++ b/deps/v8/src/codegen/arm64/reglist-arm64.h
@@ -26,12 +26,8 @@ class V8_EXPORT_PRIVATE CPURegList {
public:
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
- : list_(base::fold(
- [](uint64_t acc, CPURegister v) {
- if (!v.is_valid()) return acc;
- return acc | (uint64_t{1} << v.code());
- },
- 0, reg0, regs...)),
+ : list_(((uint64_t{1} << reg0.code()) | ... |
+ (regs.is_valid() ? uint64_t{1} << regs.code() : 0))),
size_(reg0.SizeInBits()),
type_(reg0.type()) {
DCHECK(AreSameSizeAndType(reg0, regs...));
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index cf01b360d6..c99730d1c7 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -44,6 +44,7 @@ namespace internal {
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
+ V(kOperandIsCleared, "Operand is cleared") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAConstructor, "Operand is not a constructor") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 364bfa029d..e6ff700927 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -8296,6 +8296,13 @@ TNode<Uint32T> CodeStubAssembler::LoadDetailsByDescriptorEntry(
}
TNode<Object> CodeStubAssembler::LoadValueByDescriptorEntry(
+ TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
+ return LoadDescriptorArrayElement<Object>(
+ container, DescriptorEntryToIndex(descriptor_entry),
+ DescriptorArray::ToValueIndex(0) * kTaggedSize);
+}
+
+TNode<Object> CodeStubAssembler::LoadValueByDescriptorEntry(
TNode<DescriptorArray> container, int descriptor_entry) {
return LoadDescriptorArrayElement<Object>(
container, IntPtrConstant(0),
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 55485d004a..bccdc34b74 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -3229,10 +3229,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return CallBuiltin(Builtin::kSetProperty, context, receiver, key, value);
}
- TNode<Object> SetPropertyInLiteral(TNode<Context> context,
- TNode<JSObject> receiver,
- TNode<Object> key, TNode<Object> value) {
- return CallBuiltin(Builtin::kSetPropertyInLiteral, context, receiver, key,
+ TNode<Object> CreateDataProperty(TNode<Context> context,
+ TNode<JSObject> receiver, TNode<Object> key,
+ TNode<Object> value) {
+ return CallBuiltin(Builtin::kCreateDataProperty, context, receiver, key,
value);
}
@@ -3996,6 +3996,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> LoadDetailsByDescriptorEntry(
TNode<DescriptorArray> descriptors, int descriptor);
TNode<Object> LoadValueByDescriptorEntry(TNode<DescriptorArray> descriptors,
+ TNode<IntPtrT> descriptor);
+ TNode<Object> LoadValueByDescriptorEntry(TNode<DescriptorArray> descriptors,
int descriptor);
TNode<MaybeObject> LoadFieldTypeByDescriptorEntry(
TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor);
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index df237d44bd..a0d2e45ffc 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -48,7 +48,6 @@
#include "src/logging/counters-scopes.h"
#include "src/logging/log-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
-#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/map.h"
@@ -66,6 +65,7 @@
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
#ifdef V8_ENABLE_MAGLEV
+#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/maglev/maglev.h"
#endif // V8_ENABLE_MAGLEV
@@ -74,6 +74,31 @@ namespace internal {
namespace {
+constexpr bool IsOSR(BytecodeOffset osr_offset) { return !osr_offset.IsNone(); }
+
+void SetTieringState(JSFunction function, BytecodeOffset osr_offset,
+ TieringState value) {
+ if (IsOSR(osr_offset)) {
+ function.set_osr_tiering_state(value);
+ } else {
+ function.set_tiering_state(value);
+ }
+}
+
+void ResetTieringState(JSFunction function, BytecodeOffset osr_offset) {
+ if (function.has_feedback_vector()) {
+ SetTieringState(function, osr_offset, TieringState::kNone);
+ }
+}
+
+void ResetProfilerTicks(JSFunction function, BytecodeOffset osr_offset) {
+ if (!IsOSR(osr_offset)) {
+ // Reset profiler ticks, the function is no longer considered hot.
+ // TODO(v8:7700): Update for Maglev tiering.
+ function.feedback_vector().set_profiler_ticks(0);
+ }
+}
+
class CompilerTracer : public AllStatic {
public:
static void TracePrepareJob(Isolate* isolate, OptimizedCompilationInfo* info,
@@ -95,6 +120,28 @@ class CompilerTracer : public AllStatic {
PrintTraceSuffix(scope);
}
+ static void TraceOptimizeOSR(Isolate* isolate, Handle<JSFunction> function,
+ BytecodeOffset osr_offset,
+ ConcurrencyMode mode) {
+ if (!FLAG_trace_osr) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - started. function: %s, osr offset: %d, mode: %s]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt(), ToString(mode));
+ }
+
+ static void TraceOptimizeOSRUnavailable(Isolate* isolate,
+ Handle<JSFunction> function,
+ BytecodeOffset osr_offset,
+ ConcurrencyMode mode) {
+ if (!FLAG_trace_osr) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - unavailable (failed or in progress). function: %s, osr "
+ "offset: %d, mode: %s]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt(), ToString(mode));
+ }
+
static void TraceCompilationStats(Isolate* isolate,
OptimizedCompilationInfo* info,
double ms_creategraph, double ms_optimize,
@@ -142,7 +189,7 @@ class CompilerTracer : public AllStatic {
if (!FLAG_trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintTracePrefix(scope, "found optimized code for", function, code_kind);
- if (!osr_offset.IsNone()) {
+ if (IsOSR(osr_offset)) {
PrintF(scope.file(), " at OSR bytecode offset %d", osr_offset.ToInt());
}
PrintTraceSuffix(scope);
@@ -288,12 +335,21 @@ struct ScopedTimer {
namespace {
-ScriptOriginOptions OriginOptionsForEval(Object script) {
- if (!script.IsScript()) return ScriptOriginOptions();
-
- const auto outer_origin_options = Script::cast(script).origin_options();
- return ScriptOriginOptions(outer_origin_options.IsSharedCrossOrigin(),
- outer_origin_options.IsOpaque());
+ScriptOriginOptions OriginOptionsForEval(
+ Object script, ParsingWhileDebugging parsing_while_debugging) {
+ bool is_shared_cross_origin =
+ parsing_while_debugging == ParsingWhileDebugging::kYes;
+ bool is_opaque = false;
+ if (script.IsScript()) {
+ auto script_origin_options = Script::cast(script).origin_options();
+ if (script_origin_options.IsSharedCrossOrigin()) {
+ is_shared_cross_origin = true;
+ }
+ if (script_origin_options.IsOpaque()) {
+ is_opaque = true;
+ }
+ }
+ return ScriptOriginOptions(is_shared_cross_origin, is_opaque);
}
} // namespace
@@ -386,7 +442,6 @@ void RecordUnoptimizedFunctionCompilation(
CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
DisallowJavascriptExecution no_js(isolate);
- CompilerTracer::TracePrepareJob(isolate, compilation_info(), compiler_name_);
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToPrepare);
@@ -414,22 +469,22 @@ CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
return UpdateState(FinalizeJobImpl(isolate), State::kSucceeded);
}
-CompilationJob::Status OptimizedCompilationJob::RetryOptimization(
+CompilationJob::Status TurbofanCompilationJob::RetryOptimization(
BailoutReason reason) {
DCHECK(compilation_info_->IsOptimizing());
compilation_info_->RetryOptimization(reason);
return UpdateState(FAILED, State::kFailed);
}
-CompilationJob::Status OptimizedCompilationJob::AbortOptimization(
+CompilationJob::Status TurbofanCompilationJob::AbortOptimization(
BailoutReason reason) {
DCHECK(compilation_info_->IsOptimizing());
compilation_info_->AbortOptimization(reason);
return UpdateState(FAILED, State::kFailed);
}
-void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
- Isolate* isolate) const {
+void TurbofanCompilationJob::RecordCompilationStats(ConcurrencyMode mode,
+ Isolate* isolate) const {
DCHECK(compilation_info()->IsOptimizing());
Handle<JSFunction> function = compilation_info()->closure();
double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
@@ -477,12 +532,12 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
base::TimeDelta time_foreground =
time_taken_to_prepare_ + time_taken_to_finalize_;
switch (mode) {
- case OptimizedCompilationJob::kConcurrent:
+ case ConcurrencyMode::kConcurrent:
time_background += time_taken_to_execute_;
counters->turbofan_optimize_concurrent_total_time()->AddSample(
static_cast<int>(ElapsedTime().InMicroseconds()));
break;
- case OptimizedCompilationJob::kSynchronous:
+ case ConcurrencyMode::kSynchronous:
counters->turbofan_optimize_non_concurrent_total_time()->AddSample(
static_cast<int>(ElapsedTime().InMicroseconds()));
time_foreground += time_taken_to_execute_;
@@ -498,7 +553,7 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
}
}
-void OptimizedCompilationJob::RecordFunctionCompilation(
+void TurbofanCompilationJob::RecordFunctionCompilation(
CodeEventListener::LogEventsAndTags tag, Isolate* isolate) const {
Handle<AbstractCode> abstract_code =
Handle<AbstractCode>::cast(compilation_info()->code());
@@ -835,75 +890,81 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
return true;
}
-V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> GetCodeFromOptimizedCodeCache(
- Handle<JSFunction> function, BytecodeOffset osr_offset,
- CodeKind code_kind) {
- Isolate* isolate = function->GetIsolate();
- RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- DisallowGarbageCollection no_gc;
- CodeT code;
- if (osr_offset.IsNone() && function->has_feedback_vector()) {
- FeedbackVector feedback_vector = function->feedback_vector();
- feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "GetCodeFromOptimizedCodeCache");
- code = feedback_vector.optimized_code();
- } else if (!osr_offset.IsNone()) {
- code = function->context()
- .native_context()
- .GetOSROptimizedCodeCache()
- .GetOptimizedCode(shared, osr_offset, isolate);
- }
- DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind);
- if (!code.is_null() && code.kind() == code_kind) {
- // Caching of optimized code enabled and optimized code found.
+// A wrapper to access either the OSR optimized code cache (one per native
+// context), or the optimized code cache slot on the feedback vector.
+class OptimizedCodeCache : public AllStatic {
+ public:
+ static V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> Get(
+ Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
+ CodeKind code_kind) {
+ if (!CodeKindIsStoredInOptimizedCodeCache(code_kind)) return {};
+
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo shared = function->shared();
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
+
+ CodeT code;
+ if (IsOSR(osr_offset)) {
+ // For OSR, check the OSR optimized code cache.
+ code = function->native_context().osr_code_cache().TryGet(
+ shared, osr_offset, isolate);
+ } else {
+ // Non-OSR code may be cached on the feedback vector.
+ if (function->has_feedback_vector()) {
+ FeedbackVector feedback_vector = function->feedback_vector();
+ feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
+ shared, "OptimizedCodeCache::Get");
+ code = feedback_vector.optimized_code();
+ }
+ }
+
+ DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind);
+ if (code.is_null() || code.kind() != code_kind) return {};
+
DCHECK(!code.marked_for_deoptimization());
- DCHECK(function->shared().is_compiled());
+ DCHECK(shared.is_compiled());
DCHECK(CodeKindIsStoredInOptimizedCodeCache(code.kind()));
- DCHECK_IMPLIES(!osr_offset.IsNone(), CodeKindCanOSR(code.kind()));
- return Handle<CodeT>(code, isolate);
- }
- return MaybeHandle<CodeT>();
-}
+ DCHECK_IMPLIES(IsOSR(osr_offset), CodeKindCanOSR(code.kind()));
-void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
- Handle<JSFunction> function = compilation_info->closure();
- if (compilation_info->osr_offset().IsNone()) {
- Handle<FeedbackVector> vector =
- handle(function->feedback_vector(), function->GetIsolate());
- vector->ClearOptimizationMarker();
+ CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset,
+ code_kind);
+ return handle(code, isolate);
}
-}
-void InsertCodeIntoOptimizedCodeCache(
- OptimizedCompilationInfo* compilation_info) {
- const CodeKind kind = compilation_info->code_kind();
- if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
+ static void Insert(OptimizedCompilationInfo* compilation_info) {
+ const CodeKind kind = compilation_info->code_kind();
+ if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
- if (compilation_info->function_context_specializing()) {
- // Function context specialization folds-in the function context, so no
- // sharing can occur. Make sure the optimized code cache is cleared.
- ClearOptimizedCodeCache(compilation_info);
- return;
- }
+ // Cache optimized code.
+ Handle<JSFunction> function = compilation_info->closure();
+ Isolate* isolate = function->GetIsolate();
+ Handle<CodeT> code = ToCodeT(compilation_info->code(), isolate);
+ const BytecodeOffset osr_offset = compilation_info->osr_offset();
- // Cache optimized code.
- Handle<JSFunction> function = compilation_info->closure();
- Isolate* isolate = function->GetIsolate();
- Handle<CodeT> code = ToCodeT(compilation_info->code(), isolate);
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<NativeContext> native_context(function->context().native_context(),
- isolate);
- if (compilation_info->osr_offset().IsNone()) {
- Handle<FeedbackVector> vector =
- handle(function->feedback_vector(), isolate);
- FeedbackVector::SetOptimizedCode(vector, code);
- } else {
- DCHECK(CodeKindCanOSR(kind));
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- compilation_info->osr_offset());
+ if (IsOSR(osr_offset)) {
+ DCHECK(CodeKindCanOSR(kind));
+ DCHECK(!compilation_info->function_context_specializing());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ osr_offset);
+ return;
+ }
+
+ DCHECK(!IsOSR(osr_offset));
+
+ if (compilation_info->function_context_specializing()) {
+ // Function context specialization folds-in the function context, so no
+ // sharing can occur. Make sure the optimized code cache is cleared.
+ if (function->feedback_vector().has_optimized_code()) {
+ function->feedback_vector().ClearOptimizedCode();
+ }
+ return;
+ }
+
+ function->feedback_vector().SetOptimizedCode(code);
}
-}
+};
// Runs PrepareJob in the proper compilation & canonical scopes. Handles will be
// allocated in a persistent handle scope that is detached and handed off to the
@@ -912,12 +973,17 @@ bool PrepareJobWithHandleScope(OptimizedCompilationJob* job, Isolate* isolate,
OptimizedCompilationInfo* compilation_info) {
CompilationHandleScope compilation(isolate, compilation_info);
CanonicalHandleScopeForTurbofan canonical(isolate, compilation_info);
+ CompilerTracer::TracePrepareJob(isolate, compilation_info,
+ job->compiler_name());
compilation_info->ReopenHandlesInNewHandleScope(isolate);
return job->PrepareJob(isolate) == CompilationJob::SUCCEEDED;
}
-bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
- OptimizedCompilationInfo* compilation_info) {
+bool CompileTurbofan_NotConcurrent(Isolate* isolate,
+ TurbofanCompilationJob* job) {
+ OptimizedCompilationInfo* const compilation_info = job->compilation_info();
+ DCHECK_EQ(compilation_info->code_kind(), CodeKind::TURBOFAN);
+
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeNonConcurrent);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
@@ -945,21 +1011,23 @@ bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
}
// Success!
- job->RecordCompilationStats(OptimizedCompilationJob::kSynchronous, isolate);
+ job->RecordCompilationStats(ConcurrencyMode::kSynchronous, isolate);
DCHECK(!isolate->has_pending_exception());
- InsertCodeIntoOptimizedCodeCache(compilation_info);
+ OptimizedCodeCache::Insert(compilation_info);
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, isolate);
return true;
}
-bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
- Isolate* isolate,
- OptimizedCompilationInfo* compilation_info,
- CodeKind code_kind, Handle<JSFunction> function) {
+bool CompileTurbofan_Concurrent(Isolate* isolate,
+ std::unique_ptr<TurbofanCompilationJob> job) {
+ OptimizedCompilationInfo* const compilation_info = job->compilation_info();
+ DCHECK_EQ(compilation_info->code_kind(), CodeKind::TURBOFAN);
+ Handle<JSFunction> function = compilation_info->closure();
+
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
- compilation_info->closure()->ShortPrint();
+ function->ShortPrint();
PrintF(" later.\n");
}
return false;
@@ -968,7 +1036,7 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
if (isolate->heap()->HighMemoryPressure()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** High memory pressure, will retry optimizing ");
- compilation_info->closure()->ShortPrint();
+ function->ShortPrint();
PrintF(" later.\n");
}
return false;
@@ -984,41 +1052,22 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
}
// The background recompile will own this job.
- isolate->optimizing_compile_dispatcher()->QueueForOptimization(job.get());
- job.release();
+ isolate->optimizing_compile_dispatcher()->QueueForOptimization(job.release());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queued ");
- compilation_info->closure()->ShortPrint();
+ function->ShortPrint();
PrintF(" for concurrent optimization.\n");
}
- if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
- function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
- }
+ SetTieringState(*function, compilation_info->osr_offset(),
+ TieringState::kInProgress);
- // Note: Usually the active tier is expected to be Ignition at this point (in
- // other words we don't expect to optimize if the function is already
- // TF-optimized). There is a special case for OSR though, for which we *can*
- // reach this point even if we've already generated non-OSR'd TF code.
- DCHECK(function->shared().HasBytecodeArray());
+ DCHECK(compilation_info->shared_info()->HasBytecodeArray());
return true;
}
-// Returns the code object at which execution continues after a concurrent
-// optimization job has been started (but not finished).
-Handle<CodeT> ContinuationForConcurrentOptimization(
- Isolate* isolate, Handle<JSFunction> function) {
- if (function->shared().HasBaselineCode()) {
- CodeT baseline_code = function->shared().baseline_code(kAcquireLoad);
- function->set_code(baseline_code);
- return handle(baseline_code, isolate);
- }
- DCHECK(function->ActiveTierIsIgnition());
- return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
-}
-
-enum class GetOptimizedCodeResultHandling {
+enum class CompileResultBehavior {
// Default behavior, i.e. install the result, insert into caches, etc.
kDefault,
// Used only for stress testing. The compilation result should be discarded.
@@ -1038,43 +1087,40 @@ bool ShouldOptimize(CodeKind code_kind, Handle<SharedFunctionInfo> shared) {
}
}
-MaybeHandle<CodeT> CompileTurbofan(
- Isolate* isolate, Handle<JSFunction> function,
- Handle<SharedFunctionInfo> shared, ConcurrencyMode mode,
- BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
- GetOptimizedCodeResultHandling result_handling) {
+MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> shared,
+ ConcurrencyMode mode,
+ BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame,
+ CompileResultBehavior result_behavior) {
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
- static constexpr CodeKind kCodeKind = CodeKind::TURBOFAN;
-
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
bool has_script = shared->script().IsScript();
// BUG(5946): This DCHECK is necessary to make certain that we won't
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
- std::unique_ptr<OptimizedCompilationJob> job(
- compiler::Pipeline::NewCompilationJob(isolate, function, kCodeKind,
- has_script, osr_offset, osr_frame));
- OptimizedCompilationInfo* compilation_info = job->compilation_info();
+ std::unique_ptr<TurbofanCompilationJob> job(
+ compiler::Pipeline::NewCompilationJob(isolate, function,
+ CodeKind::TURBOFAN, has_script,
+ osr_offset, osr_frame));
- if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
- compilation_info->set_discard_result_for_testing();
+ if (result_behavior == CompileResultBehavior::kDiscardForTesting) {
+ job->compilation_info()->set_discard_result_for_testing();
}
// Prepare the job and launch concurrent compilation, or compile now.
- if (mode == ConcurrencyMode::kConcurrent) {
- if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
- kCodeKind, function)) {
- return ContinuationForConcurrentOptimization(isolate, function);
- }
+ if (IsConcurrent(mode)) {
+ if (CompileTurbofan_Concurrent(isolate, std::move(job))) return {};
} else {
- DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
- if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
- return ToCodeT(compilation_info->code(), isolate);
+ DCHECK(IsSynchronous(mode));
+ if (CompileTurbofan_NotConcurrent(isolate, job.get())) {
+ return ToCodeT(job->compilation_info()->code(), isolate);
}
}
@@ -1082,30 +1128,43 @@ MaybeHandle<CodeT> CompileTurbofan(
return {};
}
-MaybeHandle<CodeT> CompileMaglev(
- Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
- BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
- GetOptimizedCodeResultHandling result_handling) {
+#ifdef V8_ENABLE_MAGLEV
+// TODO(v8:7700): Record maglev compilations better.
+void RecordMaglevFunctionCompilation(Isolate* isolate,
+ Handle<JSFunction> function) {
+ Handle<AbstractCode> abstract_code(
+ AbstractCode::cast(FromCodeT(function->code())), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Script> script(Script::cast(shared->script()), isolate);
+ Handle<FeedbackVector> feedback_vector(function->feedback_vector(), isolate);
+
+ // Optimistic estimate.
+ double time_taken_ms = 0;
+
+ LogFunctionCompilation(isolate, CodeEventListener::FUNCTION_TAG, script,
+ shared, feedback_vector, abstract_code,
+ abstract_code->kind(), time_taken_ms);
+}
+#endif // V8_ENABLE_MAGLEV
+
+MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
+ ConcurrencyMode mode,
+ BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame,
+ CompileResultBehavior result_behavior) {
#ifdef V8_ENABLE_MAGLEV
DCHECK(FLAG_maglev);
// TODO(v8:7700): Add missing support.
- CHECK(osr_offset.IsNone());
+ CHECK(!IsOSR(osr_offset));
CHECK(osr_frame == nullptr);
- CHECK(result_handling == GetOptimizedCodeResultHandling::kDefault);
+ CHECK(result_behavior == CompileResultBehavior::kDefault);
// TODO(v8:7700): Tracing, see CompileTurbofan.
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
- if (mode == ConcurrencyMode::kNotConcurrent) {
- function->ClearOptimizationMarker();
- return Maglev::Compile(isolate, function);
- }
-
- DCHECK_EQ(mode, ConcurrencyMode::kConcurrent);
-
- // TODO(v8:7700): See everything in GetOptimizedCodeLater.
+ // TODO(v8:7700): See everything in CompileTurbofan_Concurrent.
// - Tracing,
// - timers,
// - aborts on memory pressure,
@@ -1116,32 +1175,55 @@ MaybeHandle<CodeT> CompileMaglev(
CompilationJob::Status status = job->PrepareJob(isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED); // TODO(v8:7700): Use status.
+ if (IsSynchronous(mode)) {
+ function->reset_tiering_state();
+ {
+ // Park the main thread Isolate here, to be in the same state as
+ // background threads.
+ ParkedScope parked_scope(isolate->main_thread_local_isolate());
+ if (job->ExecuteJob(isolate->counters()->runtime_call_stats(),
+ isolate->main_thread_local_isolate()) !=
+ CompilationJob::SUCCEEDED) {
+ return {};
+ }
+ }
+
+ if (job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
+ return {};
+ }
+
+ RecordMaglevFunctionCompilation(isolate, function);
+ return handle(function->code(), isolate);
+ }
+
+ DCHECK(IsConcurrent(mode));
+
// Enqueue it.
isolate->maglev_concurrent_dispatcher()->EnqueueJob(std::move(job));
// Remember that the function is currently being processed.
- function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
+ SetTieringState(*function, osr_offset, TieringState::kInProgress);
- // The code that triggered optimization continues execution here.
- return ContinuationForConcurrentOptimization(isolate, function);
+ return {};
#else // V8_ENABLE_MAGLEV
UNREACHABLE();
#endif // V8_ENABLE_MAGLEV
}
-MaybeHandle<CodeT> GetOptimizedCode(
+MaybeHandle<CodeT> GetOrCompileOptimized(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
JavaScriptFrame* osr_frame = nullptr,
- GetOptimizedCodeResultHandling result_handling =
- GetOptimizedCodeResultHandling::kDefault) {
+ CompileResultBehavior result_behavior = CompileResultBehavior::kDefault) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- // Make sure we clear the optimization marker on the function so that we
- // don't try to re-optimize.
- if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
+ // Clear the optimization marker on the function so that we don't try to
+ // re-optimize.
+ if (!IsOSR(osr_offset)) {
+ ResetTieringState(*function, osr_offset);
+ }
// TODO(v8:7700): Distinguish between Maglev and Turbofan.
if (shared->optimization_disabled() &&
@@ -1165,29 +1247,23 @@ MaybeHandle<CodeT> GetOptimizedCode(
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
- // Check the optimized code cache (stored on the SharedFunctionInfo).
- if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
- Handle<CodeT> cached_code;
- if (GetCodeFromOptimizedCodeCache(function, osr_offset, code_kind)
- .ToHandle(&cached_code)) {
- CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset,
- code_kind);
- return cached_code;
- }
+ Handle<CodeT> cached_code;
+ if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind)
+ .ToHandle(&cached_code)) {
+ return cached_code;
}
- // Reset profiler ticks, the function is no longer considered hot.
- // TODO(v8:7700): Update for Maglev tiering.
DCHECK(shared->is_compiled());
- function->feedback_vector().set_profiler_ticks(0);
+
+ ResetProfilerTicks(*function, osr_offset);
if (code_kind == CodeKind::TURBOFAN) {
return CompileTurbofan(isolate, function, shared, mode, osr_offset,
- osr_frame, result_handling);
+ osr_frame, result_behavior);
} else {
DCHECK_EQ(code_kind, CodeKind::MAGLEV);
return CompileMaglev(isolate, function, mode, osr_offset, osr_frame,
- result_handling);
+ result_behavior);
}
}
@@ -1203,16 +1279,15 @@ void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
if (code_kind == CodeKind::MAGLEV) return;
DCHECK(FLAG_stress_concurrent_inlining &&
- isolate->concurrent_recompilation_enabled() &&
- mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->concurrent_recompilation_enabled() && IsSynchronous(mode) &&
isolate->node_observer() == nullptr);
- GetOptimizedCodeResultHandling result_handling =
+ CompileResultBehavior result_behavior =
FLAG_stress_concurrent_inlining_attach_code
- ? GetOptimizedCodeResultHandling::kDefault
- : GetOptimizedCodeResultHandling::kDiscardForTesting;
- USE(GetOptimizedCode(isolate, function, ConcurrencyMode::kConcurrent,
- code_kind, BytecodeOffset::None(), nullptr,
- result_handling));
+ ? CompileResultBehavior::kDefault
+ : CompileResultBehavior::kDiscardForTesting;
+ USE(GetOrCompileOptimized(isolate, function, ConcurrencyMode::kConcurrent,
+ code_kind, BytecodeOffset::None(), nullptr,
+ result_behavior));
}
bool FailAndClearPendingException(Isolate* isolate) {
@@ -1988,7 +2063,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
// We should never reach here if the function is already compiled or
// optimized.
DCHECK(!function->is_compiled());
- DCHECK(!function->HasOptimizationMarker());
+ DCHECK(IsNone(function->tiering_state()));
DCHECK(!function->HasAvailableOptimizedCode());
// Reset the JSFunction if we are recompiling due to the bytecode having been
@@ -2025,18 +2100,17 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
CodeKindForTopTier());
const CodeKind code_kind = CodeKindForTopTier();
- const ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
+ const ConcurrencyMode concurrency_mode = ConcurrencyMode::kSynchronous;
if (FLAG_stress_concurrent_inlining &&
isolate->concurrent_recompilation_enabled() &&
- concurrency_mode == ConcurrencyMode::kNotConcurrent &&
isolate->node_observer() == nullptr) {
SpawnDuplicateConcurrentJobForStressTesting(isolate, function,
concurrency_mode, code_kind);
}
Handle<CodeT> maybe_code;
- if (GetOptimizedCode(isolate, function, concurrency_mode, code_kind)
+ if (GetOrCompileOptimized(isolate, function, concurrency_mode, code_kind)
.ToHandle(&maybe_code)) {
code = maybe_code;
}
@@ -2092,9 +2166,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
shared->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) {
- // Arm back edges for OSR
- shared->GetBytecodeArray(isolate).set_osr_loop_nesting_level(
- AbstractCode::kMaxLoopNestingMarker);
+ shared->GetBytecodeArray(isolate).RequestOsrAtNextOpportunity();
}
}
double time_taken_ms = time_taken.InMillisecondsF();
@@ -2138,7 +2210,7 @@ bool Compiler::CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
// Bytecode must be available for maglev compilation.
DCHECK(is_compiled_scope->is_compiled());
// TODO(v8:7700): Support concurrent compilation.
- DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
+ DCHECK(IsSynchronous(mode));
// Maglev code needs a feedback vector.
JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
@@ -2188,36 +2260,27 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
DCHECK(AllowCompilation::IsAllowed(isolate));
if (FLAG_stress_concurrent_inlining &&
- isolate->concurrent_recompilation_enabled() &&
- mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->concurrent_recompilation_enabled() && IsSynchronous(mode) &&
isolate->node_observer() == nullptr) {
SpawnDuplicateConcurrentJobForStressTesting(isolate, function, mode,
code_kind);
}
Handle<CodeT> code;
- if (!GetOptimizedCode(isolate, function, mode, code_kind).ToHandle(&code)) {
- // Optimization failed, get the existing code. We could have optimized code
- // from a lower tier here. Unoptimized code must exist already if we are
- // optimizing.
- DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared().is_compiled());
- DCHECK(function->shared().HasBytecodeArray());
- code = ContinuationForConcurrentOptimization(isolate, function);
+ if (GetOrCompileOptimized(isolate, function, mode, code_kind)
+ .ToHandle(&code)) {
+ function->set_code(*code, kReleaseStore);
}
- function->set_code(*code, kReleaseStore);
-
- // Check postconditions on success.
+#ifdef DEBUG
DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared().is_compiled());
DCHECK(function->is_compiled());
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->IsInOptimizationQueue());
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->ChecksOptimizationMarker());
- DCHECK_IMPLIES(function->IsInOptimizationQueue(),
- mode == ConcurrencyMode::kConcurrent);
+ DCHECK(function->shared().HasBytecodeArray());
+ const TieringState tiering_state = function->tiering_state();
+ DCHECK(IsNone(tiering_state) || IsInProgress(tiering_state));
+ DCHECK_IMPLIES(IsInProgress(tiering_state), function->ChecksTieringState());
+ DCHECK_IMPLIES(IsInProgress(tiering_state), IsConcurrent(mode));
+#endif // DEBUG
}
// static
@@ -2290,9 +2353,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (!context->IsNativeContext()) {
maybe_outer_scope_info = handle(context->scope_info(), isolate);
}
- script =
- parse_info.CreateScript(isolate, source, kNullMaybeHandle,
- OriginOptionsForEval(outer_info->script()));
+ script = parse_info.CreateScript(
+ isolate, source, kNullMaybeHandle,
+ OriginOptionsForEval(outer_info->script(), parsing_while_debugging));
script->set_eval_from_shared(*outer_info);
if (eval_position == kNoSourcePosition) {
// If the position is missing, attempt to get the code offset by
@@ -2303,7 +2366,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
FrameSummary summary = it.GetTopValidFrame();
script->set_eval_from_shared(
summary.AsJavaScript().function()->shared());
- script->set_origin_options(OriginOptionsForEval(*summary.script()));
+ script->set_origin_options(
+ OriginOptionsForEval(*summary.script(), parsing_while_debugging));
eval_position = -summary.code_offset();
} else {
eval_position = 0;
@@ -3309,21 +3373,67 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
// static
-MaybeHandle<CodeT> Compiler::GetOptimizedCodeForOSR(
- Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
- JavaScriptFrame* osr_frame) {
- DCHECK(!osr_offset.IsNone());
- DCHECK_NOT_NULL(osr_frame);
- return GetOptimizedCode(isolate, function, ConcurrencyMode::kNotConcurrent,
- CodeKindForOSR(), osr_offset, osr_frame);
+MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
+ Handle<JSFunction> function,
+ BytecodeOffset osr_offset,
+ UnoptimizedFrame* frame,
+ ConcurrencyMode mode) {
+ DCHECK(IsOSR(osr_offset));
+ DCHECK_NOT_NULL(frame);
+
+ if (V8_UNLIKELY(isolate->serializer_enabled())) return {};
+ if (V8_UNLIKELY(function->shared().optimization_disabled())) return {};
+
+ // TODO(chromium:1031479): Currently, OSR triggering mechanism is tied to the
+ // bytecode array. So, it might be possible to mark closure in one native
+ // context and optimize a closure from a different native context. So check if
+ // there is a feedback vector before OSRing. We don't expect this to happen
+ // often.
+ if (V8_UNLIKELY(!function->has_feedback_vector())) return {};
+
+ // One OSR job per function at a time.
+ if (IsInProgress(function->osr_tiering_state())) {
+ return {};
+ }
+
+ // -- Alright, decided to proceed. --
+
+ // Disarm all back edges, i.e. reset the OSR urgency and install target.
+ //
+ // Note that the bytecode array active on the stack might be different from
+ // the one installed on the function (e.g. patched by debugger). This however
+ // is fine because we guarantee the layout to be in sync, hence any
+ // BytecodeOffset representing the entry point will be valid for any copy of
+ // the bytecode.
+ Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate);
+ bytecode->reset_osr_urgency_and_install_target();
+
+ CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset, mode);
+ MaybeHandle<CodeT> result = GetOrCompileOptimized(
+ isolate, function, mode, CodeKind::TURBOFAN, osr_offset, frame);
+
+ if (result.is_null()) {
+ CompilerTracer::TraceOptimizeOSRUnavailable(isolate, function, osr_offset,
+ mode);
+ }
+
+ return result;
}
// static
-bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
- Isolate* isolate) {
+void Compiler::DisposeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ bool restore_function_code) {
+ Handle<JSFunction> function = job->compilation_info()->closure();
+ ResetTieringState(*function, job->compilation_info()->osr_offset());
+ if (restore_function_code) {
+ function->set_code(function->shared().GetCode(), kReleaseStore);
+ }
+}
+
+// static
+bool Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ Isolate* isolate) {
VMState<COMPILER> state(isolate);
- // Take ownership of the job. Deleting the job also tears down the zone.
- std::unique_ptr<OptimizedCompilationJob> job_scope(job);
OptimizedCompilationInfo* compilation_info = job->compilation_info();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
@@ -3331,12 +3441,14 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentFinalize");
+ Handle<JSFunction> function = compilation_info->closure();
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
const bool use_result = !compilation_info->discard_result_for_testing();
+ const BytecodeOffset osr_offset = compilation_info->osr_offset();
+
if (V8_LIKELY(use_result)) {
- // Reset profiler ticks, function is no longer considered hot.
- compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
+ ResetProfilerTicks(*function, osr_offset);
}
DCHECK(!shared->HasBreakInfo());
@@ -3350,15 +3462,23 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
if (shared->optimization_disabled()) {
job->RetryOptimization(BailoutReason::kOptimizationDisabled);
} else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
- job->RecordCompilationStats(OptimizedCompilationJob::kConcurrent,
- isolate);
+ job->RecordCompilationStats(ConcurrencyMode::kConcurrent, isolate);
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
isolate);
if (V8_LIKELY(use_result)) {
- InsertCodeIntoOptimizedCodeCache(compilation_info);
+ ResetTieringState(*function, osr_offset);
+ OptimizedCodeCache::Insert(compilation_info);
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
- compilation_info->closure()->set_code(*compilation_info->code(),
- kReleaseStore);
+ if (IsOSR(osr_offset)) {
+ if (FLAG_trace_osr) {
+ PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
+ "[OSR - requesting install. function: %s, osr offset: %d]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt());
+ }
+ shared->GetBytecodeArray(isolate).set_osr_install_target(osr_offset);
+ } else {
+ function->set_code(*compilation_info->code(), kReleaseStore);
+ }
}
return CompilationJob::SUCCEEDED;
}
@@ -3367,16 +3487,25 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
if (V8_LIKELY(use_result)) {
- compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
- // Clear the InOptimizationQueue marker, if it exists.
- if (compilation_info->closure()->IsInOptimizationQueue()) {
- compilation_info->closure()->ClearOptimizationMarker();
+ ResetTieringState(*function, osr_offset);
+ if (!IsOSR(osr_offset)) {
+ function->set_code(shared->GetCode(), kReleaseStore);
}
}
return CompilationJob::FAILED;
}
// static
+bool Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
+ Isolate* isolate) {
+#ifdef V8_ENABLE_MAGLEV
+ VMState<COMPILER> state(isolate);
+ RecordMaglevFunctionCompilation(isolate, job->function());
+#endif
+ return CompilationJob::SUCCEEDED;
+}
+
+// static
void Compiler::PostInstantiation(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
@@ -3416,7 +3545,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
CompilerTracer::TraceMarkForAlwaysOpt(isolate, function);
JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
function->MarkForOptimization(isolate, CodeKind::TURBOFAN,
- ConcurrencyMode::kNotConcurrent);
+ ConcurrencyMode::kSynchronous);
}
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index f34c0a3326..4f8270f1e4 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -30,24 +30,23 @@ namespace internal {
// Forward declarations.
class AlignedCachedData;
-class AstRawString;
class BackgroundCompileTask;
class IsCompiledScope;
-class JavaScriptFrame;
class OptimizedCompilationInfo;
-class OptimizedCompilationJob;
class ParseInfo;
-class Parser;
class RuntimeCallStats;
class TimedHistogram;
+class TurbofanCompilationJob;
class UnoptimizedCompilationInfo;
class UnoptimizedCompilationJob;
+class UnoptimizedFrame;
class WorkerThreadRuntimeCallStats;
struct ScriptDetails;
struct ScriptStreamingData;
-using UnoptimizedCompilationJobList =
- std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>;
+namespace maglev {
+class MaglevCompilationJob;
+} // namespace maglev
// The V8 compiler API.
//
@@ -96,6 +95,13 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static void CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind);
+ // Generate and return optimized code for OSR. The empty handle is returned
+ // either on failure, or after spawning a concurrent OSR task (in which case
+ // a future OSR request will pick up the resulting code object).
+ V8_WARN_UNUSED_RESULT static MaybeHandle<CodeT> CompileOptimizedOSR(
+ Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
+ UnoptimizedFrame* frame, ConcurrencyMode mode);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
Isolate* isolate);
@@ -111,9 +117,17 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Isolate* isolate,
ClearExceptionFlag flag);
- // Finalize and install optimized code from previously run job.
- static bool FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
- Isolate* isolate);
+ // Dispose a job without finalization.
+ static void DisposeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ bool restore_function_code);
+
+ // Finalize and install Turbofan code from a previously run job.
+ static bool FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ Isolate* isolate);
+
+ // Finalize and install Maglev code from a previously run job.
+ static bool FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
+ Isolate* isolate);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
@@ -222,20 +236,6 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static Handle<SharedFunctionInfo> GetSharedFunctionInfo(FunctionLiteral* node,
Handle<Script> script,
IsolateT* isolate);
-
- // ===========================================================================
- // The following family of methods provides support for OSR. Code generated
- // for entry via OSR might not be suitable for normal entry, hence will be
- // returned directly to the caller.
- //
- // Please note this interface is the only part dealing with {Code} objects
- // directly. Other methods are agnostic to {Code} and can use an interpreter
- // instead of generating JIT code for a function at all.
-
- // Generate and return optimized code for OSR, or empty handle on failure.
- V8_WARN_UNUSED_RESULT static MaybeHandle<CodeT> GetOptimizedCodeForOSR(
- Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
- JavaScriptFrame* osr_frame);
};
// A base class for compilation jobs intended to run concurrent to the main
@@ -364,24 +364,48 @@ class UnoptimizedCompilationJob : public CompilationJob {
// Each of the three phases can either fail or succeed.
class OptimizedCompilationJob : public CompilationJob {
public:
- OptimizedCompilationJob(OptimizedCompilationInfo* compilation_info,
- const char* compiler_name,
- State initial_state = State::kReadyToPrepare)
- : CompilationJob(initial_state),
- compilation_info_(compilation_info),
- compiler_name_(compiler_name) {}
+ OptimizedCompilationJob(const char* compiler_name, State initial_state)
+ : CompilationJob(initial_state), compiler_name_(compiler_name) {}
// Prepare the compile job. Must be called on the main thread.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status PrepareJob(Isolate* isolate);
- // Executes the compile job. Can be called on a background thread if
- // can_execute_on_background_thread() returns true.
+ // Executes the compile job. Can be called on a background thread.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status
ExecuteJob(RuntimeCallStats* stats, LocalIsolate* local_isolate = nullptr);
// Finalizes the compile job. Must be called on the main thread.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status FinalizeJob(Isolate* isolate);
+ const char* compiler_name() const { return compiler_name_; }
+
+ protected:
+ // Overridden by the actual implementation.
+ virtual Status PrepareJobImpl(Isolate* isolate) = 0;
+ virtual Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_heap) = 0;
+ virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
+
+ base::TimeDelta time_taken_to_prepare_;
+ base::TimeDelta time_taken_to_execute_;
+ base::TimeDelta time_taken_to_finalize_;
+
+ private:
+ const char* const compiler_name_;
+};
+
+// Thin wrapper to split off Turbofan-specific parts.
+class TurbofanCompilationJob : public OptimizedCompilationJob {
+ public:
+ TurbofanCompilationJob(OptimizedCompilationInfo* compilation_info,
+ State initial_state)
+ : OptimizedCompilationJob("Turbofan", initial_state),
+ compilation_info_(compilation_info) {}
+
+ OptimizedCompilationInfo* compilation_info() const {
+ return compilation_info_;
+ }
+
// Report a transient failure, try again next time. Should only be called on
// optimization compilation jobs.
Status RetryOptimization(BailoutReason reason);
@@ -390,28 +414,12 @@ class OptimizedCompilationJob : public CompilationJob {
// Should only be called on optimization compilation jobs.
Status AbortOptimization(BailoutReason reason);
- enum CompilationMode { kConcurrent, kSynchronous };
- void RecordCompilationStats(CompilationMode mode, Isolate* isolate) const;
+ void RecordCompilationStats(ConcurrencyMode mode, Isolate* isolate) const;
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Isolate* isolate) const;
- OptimizedCompilationInfo* compilation_info() const {
- return compilation_info_;
- }
-
- protected:
- // Overridden by the actual implementation.
- virtual Status PrepareJobImpl(Isolate* isolate) = 0;
- virtual Status ExecuteJobImpl(RuntimeCallStats* stats,
- LocalIsolate* local_heap) = 0;
- virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
-
private:
- OptimizedCompilationInfo* compilation_info_;
- base::TimeDelta time_taken_to_prepare_;
- base::TimeDelta time_taken_to_execute_;
- base::TimeDelta time_taken_to_finalize_;
- const char* compiler_name_;
+ OptimizedCompilationInfo* const compilation_info_;
};
class FinalizeUnoptimizedCompilationData {
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 150ffd6608..2755cd3e22 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -2033,9 +2033,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
ASM_CODE_COMMENT(this);
CallBuiltin(target);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 081614e9c4..ccee1823d2 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -114,7 +114,6 @@ namespace internal {
V(StoreGlobalWithVector) \
V(StoreTransition) \
V(StoreWithVector) \
- V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
IF_TSAN(V, TSANStore) \
@@ -1553,19 +1552,6 @@ class BinarySmiOp_BaselineDescriptor
static constexpr inline auto registers();
};
-// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
-// as they all have the same interface.
-class StringAtDescriptor final
- : public StaticCallInterfaceDescriptor<StringAtDescriptor> {
- public:
- DEFINE_PARAMETERS(kReceiver, kPosition)
- // TODO(turbofan): Return untagged value here.
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedSigned(), // result 1
- MachineType::AnyTagged(), // kReceiver
- MachineType::IntPtr()) // kPosition
- DECLARE_DESCRIPTOR(StringAtDescriptor)
-};
-
class StringAtAsStringDescriptor final
: public StaticCallInterfaceDescriptor<StringAtAsStringDescriptor> {
public:
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index d685aaafdd..11acc7c4d2 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -4087,9 +4087,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t7);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
index 734e7cf931..23e99b1c2f 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -417,6 +417,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ slli_d(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ add_w(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
@@ -998,18 +1010,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities.
- void SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (SmiValuesAre32Bits()) {
- slli_d(dst, src, 32);
- } else {
- DCHECK(SmiValuesAre31Bits());
- add_w(dst, src, src);
- }
- }
-
- void SmiTag(Register reg) { SmiTag(reg, reg); }
-
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 338c0debf6..b911fb9bfb 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -5571,9 +5571,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t9);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index f2ed9786c6..26f04401b9 100644
--- a/deps/v8/src/codegen/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -29,6 +29,13 @@ namespace internal {
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+// Currently, MIPS just use even float point register, except
+// for C function param registers.
+#define DOUBLE_USE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f13) \
+ V(f14) V(f15) V(f16) V(f18) V(f20) V(f22) V(f24) V(f26) \
+ V(f28) V(f30)
+
#define FLOAT_REGISTERS DOUBLE_REGISTERS
#define SIMD128_REGISTERS(V) \
V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 46be9ee787..d9eb08e1d1 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -6114,9 +6114,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t9);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 43f28ac40e..edcb8cda37 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -482,6 +482,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ dsll32(dst, src, 0);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Addu(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
@@ -1184,18 +1196,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities.
- void SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (SmiValuesAre32Bits()) {
- dsll32(dst, src, 0);
- } else {
- DCHECK(SmiValuesAre31Bits());
- Addu(dst, src, src);
- }
- }
-
- void SmiTag(Register reg) { SmiTag(reg, reg); }
-
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 6c9980f50d..00feb1c01c 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -29,6 +29,13 @@ namespace internal {
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+// Currently, MIPS64 just use even float point register, except
+// for C function param registers.
+#define DOUBLE_USE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f13) \
+ V(f14) V(f15) V(f16) V(f17) V(f18) V(f19) V(f20) V(f22) \
+ V(f24) V(f26) V(f28) V(f30)
+
#define FLOAT_REGISTERS DOUBLE_REGISTERS
#define SIMD128_REGISTERS(V) \
V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 7678298ab3..f699f2a80d 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -73,9 +73,6 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_analyze_environment_liveness) {
set_analyze_environment_liveness();
}
- if (FLAG_function_context_specialization) {
- set_function_context_specializing();
- }
if (FLAG_turbo_splitting) set_splitting();
break;
case CodeKind::BYTECODE_HANDLER:
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 9543f5f4b1..89bf6c3a3b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -76,7 +76,13 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#else
base::CPU cpu;
if (cpu.part() == base::CPU::kPPCPower10) {
+// IBMi does not yet support prefixed instructions introduced on Power10.
+// Run on P9 mode until OS adds support.
+#if defined(__PASE__)
+ supported_ |= (1u << PPC_9_PLUS);
+#else
supported_ |= (1u << PPC_10_PLUS);
+#endif
} else if (cpu.part() == base::CPU::kPPCPower9) {
supported_ |= (1u << PPC_9_PLUS);
} else if (cpu.part() == base::CPU::kPPCPower8) {
@@ -1135,6 +1141,110 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
}
#endif
+// Prefixed instructions.
+#define GENERATE_PREFIX_SUFFIX_BITS(immediate, prefix, suffix) \
+ CHECK(is_int34(immediate)); \
+ int32_t prefix = \
+ SIGN_EXT_IMM18((immediate >> 16) & kImm18Mask); /* 18 bits.*/ \
+ int16_t suffix = immediate & kImm16Mask; /* 16 bits.*/ \
+ DCHECK(is_int18(prefix));
+
+void Assembler::paddi(Register dst, Register src, const Operand& imm) {
+ CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
+ DCHECK(src != r0); // use pli instead to show intent.
+ intptr_t immediate = imm.immediate();
+ GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ addi(dst, src, Operand(lo));
+}
+
+void Assembler::pli(Register dst, const Operand& imm) {
+ CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
+ intptr_t immediate = imm.immediate();
+ GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ li(dst, Operand(lo));
+}
+
+void Assembler::psubi(Register dst, Register src, const Operand& imm) {
+ paddi(dst, src, Operand(-(imm.immediate())));
+}
+
+void Assembler::plbz(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lbz(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plhz(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lhz(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plha(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lha(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plwz(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lwz(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plwa(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_8ls(Operand(hi));
+ emit(PPLWA | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
+}
+
+void Assembler::pld(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_8ls(Operand(hi));
+ emit(PPLD | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
+}
+
+void Assembler::plfs(DoubleRegister dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lfs(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plfd(DoubleRegister dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lfd(dst, MemOperand(src.ra(), lo));
+}
+#undef GENERATE_PREFIX_SUFFIX_BITS
+
int Assembler::instructions_required_for_mov(Register dst,
const Operand& src) const {
bool canOptimize =
@@ -1162,7 +1272,9 @@ bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
#else
bool allowOverflow = !(canOptimize || dst == r0);
#endif
- if (canOptimize && is_int16(value)) {
+ if (canOptimize &&
+ (is_int16(value) ||
+ (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))) {
// Prefer a single-instruction load-immediate.
return false;
}
@@ -1209,7 +1321,10 @@ void Assembler::mov(Register dst, const Operand& src) {
bool canOptimize;
canOptimize =
- !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
+ !(relocatable ||
+ (is_trampoline_pool_blocked() &&
+ (!is_int16(value) ||
+ !(CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))));
if (!src.IsHeapObjectRequest() &&
use_constant_pool_for_mov(dst, src, canOptimize)) {
@@ -1239,6 +1354,8 @@ void Assembler::mov(Register dst, const Operand& src) {
if (canOptimize) {
if (is_int16(value)) {
li(dst, Operand(value));
+ } else if (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)) {
+ pli(dst, Operand(value));
} else {
uint16_t u16;
#if V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index fe21a1c8ad..b5b1899852 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -604,6 +604,22 @@ class Assembler : public AssemblerBase {
PPC_VC_OPCODE_LIST(DECLARE_PPC_VC_INSTRUCTIONS)
#undef DECLARE_PPC_VC_INSTRUCTIONS
+#define DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00(name, instr_name, instr_value) \
+ inline void name(const Operand& imm, const PRBit pr = LeavePR) { \
+ prefix_form(instr_name, imm, pr); \
+ }
+#define DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10(name, instr_name, instr_value) \
+ inline void name(const Operand& imm, const PRBit pr = LeavePR) { \
+ prefix_form(instr_name, imm, pr); \
+ }
+ inline void prefix_form(Instr instr, const Operand& imm, int pr) {
+ emit_prefix(instr | pr * B20 | (imm.immediate() & kImm18Mask));
+ }
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00)
+ PPC_PREFIX_OPCODE_TYPE_10_LIST(DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10)
+#undef DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00
+#undef DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10
+
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
// Code generation
@@ -1119,6 +1135,19 @@ class Assembler : public AssemblerBase {
void stxvx(const Simd128Register rt, const MemOperand& dst);
void xxspltib(const Simd128Register rt, const Operand& imm);
+ // Prefixed instructioons.
+ void paddi(Register dst, Register src, const Operand& imm);
+ void pli(Register dst, const Operand& imm);
+ void psubi(Register dst, Register src, const Operand& imm);
+ void plbz(Register dst, const MemOperand& src);
+ void plhz(Register dst, const MemOperand& src);
+ void plha(Register dst, const MemOperand& src);
+ void plwz(Register dst, const MemOperand& src);
+ void plwa(Register dst, const MemOperand& src);
+ void pld(Register dst, const MemOperand& src);
+ void plfs(DoubleRegister dst, const MemOperand& src);
+ void plfd(DoubleRegister dst, const MemOperand& src);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -1403,6 +1432,21 @@ class Assembler : public AssemblerBase {
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
}
+
+ void emit_prefix(Instr x) {
+ // Prefixed instructions cannot cross 64-byte boundaries. Add a nop if the
+ // boundary will be crossed mid way.
+ // Code is set to be 64-byte aligned on PPC64 after relocation (look for
+ // kCodeAlignment). We use pc_offset() instead of pc_ as current pc_
+ // alignment could be different after relocation.
+ if (((pc_offset() + sizeof(Instr)) & 63) == 0) {
+ nop();
+ }
+ // Do not emit trampoline pool in between prefix and suffix.
+ CHECK(is_trampoline_pool_blocked());
+ emit(x);
+ }
+
void TrackBranch() {
DCHECK(!trampoline_emitted_);
int count = tracked_branch_count_++;
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 37593003e1..74a1bfc89f 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -92,12 +92,18 @@ constexpr int kRootRegisterBias = 128;
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
+// sign-extend the least significant 14-bits of value <imm>
+#define SIGN_EXT_IMM18(imm) ((static_cast<int>(imm) << 14) >> 14)
+
// sign-extend the least significant 22-bits of value <imm>
#define SIGN_EXT_IMM22(imm) ((static_cast<int>(imm) << 10) >> 10)
// sign-extend the least significant 26-bits of value <imm>
#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
+// sign-extend the least significant 34-bits of prefix+suffix value <imm>
+#define SIGN_EXT_IMM34(imm) ((static_cast<int64_t>(imm) << 30) >> 30)
+
// -----------------------------------------------------------------------------
// Conditions.
@@ -2672,49 +2678,60 @@ immediate-specified index */ \
/* System Call */ \
V(sc, SC, 0x44000002)
-#define PPC_OPCODE_LIST(V) \
- PPC_X_OPCODE_LIST(V) \
- PPC_X_OPCODE_EH_S_FORM_LIST(V) \
- PPC_XO_OPCODE_LIST(V) \
- PPC_DS_OPCODE_LIST(V) \
- PPC_DQ_OPCODE_LIST(V) \
- PPC_MDS_OPCODE_LIST(V) \
- PPC_MD_OPCODE_LIST(V) \
- PPC_XS_OPCODE_LIST(V) \
- PPC_D_OPCODE_LIST(V) \
- PPC_I_OPCODE_LIST(V) \
- PPC_B_OPCODE_LIST(V) \
- PPC_XL_OPCODE_LIST(V) \
- PPC_A_OPCODE_LIST(V) \
- PPC_XFX_OPCODE_LIST(V) \
- PPC_M_OPCODE_LIST(V) \
- PPC_SC_OPCODE_LIST(V) \
- PPC_Z23_OPCODE_LIST(V) \
- PPC_Z22_OPCODE_LIST(V) \
- PPC_EVX_OPCODE_LIST(V) \
- PPC_XFL_OPCODE_LIST(V) \
- PPC_EVS_OPCODE_LIST(V) \
- PPC_VX_OPCODE_LIST(V) \
- PPC_VA_OPCODE_LIST(V) \
- PPC_VC_OPCODE_LIST(V) \
- PPC_XX1_OPCODE_LIST(V) \
- PPC_XX2_OPCODE_LIST(V) \
- PPC_XX3_OPCODE_VECTOR_LIST(V) \
- PPC_XX3_OPCODE_SCALAR_LIST(V) \
- PPC_XX4_OPCODE_LIST(V)
+#define PPC_PREFIX_OPCODE_TYPE_00_LIST(V) \
+ V(pload_store_8ls, PLOAD_STORE_8LS, 0x4000000) \
+ V(pplwa, PPLWA, 0xA4000000) \
+ V(ppld, PPLD, 0xE4000000)
+
+#define PPC_PREFIX_OPCODE_TYPE_10_LIST(V) \
+ V(pload_store_mls, PLOAD_STORE_MLS, 0x6000000)
+
+#define PPC_OPCODE_LIST(V) \
+ PPC_X_OPCODE_LIST(V) \
+ PPC_X_OPCODE_EH_S_FORM_LIST(V) \
+ PPC_XO_OPCODE_LIST(V) \
+ PPC_DS_OPCODE_LIST(V) \
+ PPC_DQ_OPCODE_LIST(V) \
+ PPC_MDS_OPCODE_LIST(V) \
+ PPC_MD_OPCODE_LIST(V) \
+ PPC_XS_OPCODE_LIST(V) \
+ PPC_D_OPCODE_LIST(V) \
+ PPC_I_OPCODE_LIST(V) \
+ PPC_B_OPCODE_LIST(V) \
+ PPC_XL_OPCODE_LIST(V) \
+ PPC_A_OPCODE_LIST(V) \
+ PPC_XFX_OPCODE_LIST(V) \
+ PPC_M_OPCODE_LIST(V) \
+ PPC_SC_OPCODE_LIST(V) \
+ PPC_Z23_OPCODE_LIST(V) \
+ PPC_Z22_OPCODE_LIST(V) \
+ PPC_EVX_OPCODE_LIST(V) \
+ PPC_XFL_OPCODE_LIST(V) \
+ PPC_EVS_OPCODE_LIST(V) \
+ PPC_VX_OPCODE_LIST(V) \
+ PPC_VA_OPCODE_LIST(V) \
+ PPC_VC_OPCODE_LIST(V) \
+ PPC_XX1_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX3_OPCODE_VECTOR_LIST(V) \
+ PPC_XX3_OPCODE_SCALAR_LIST(V) \
+ PPC_XX4_OPCODE_LIST(V) \
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(V) \
+ PPC_PREFIX_OPCODE_TYPE_10_LIST(V)
enum Opcode : uint32_t {
#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
opcode_name = opcode_value,
PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
- EXT0 = 0x10000000, // Extended code set 0
- EXT1 = 0x4C000000, // Extended code set 1
- EXT2 = 0x7C000000, // Extended code set 2
- EXT3 = 0xEC000000, // Extended code set 3
- EXT4 = 0xFC000000, // Extended code set 4
- EXT5 = 0x78000000, // Extended code set 5 - 64bit only
- EXT6 = 0xF0000000, // Extended code set 6
+ EXTP = 0x4000000, // Extended code set prefixed
+ EXT0 = 0x10000000, // Extended code set 0
+ EXT1 = 0x4C000000, // Extended code set 1
+ EXT2 = 0x7C000000, // Extended code set 2
+ EXT3 = 0xEC000000, // Extended code set 3
+ EXT4 = 0xFC000000, // Extended code set 4
+ EXT5 = 0x78000000, // Extended code set 5 - 64bit only
+ EXT6 = 0xF0000000, // Extended code set 6
};
// Instruction encoding bits and masks.
@@ -2752,6 +2769,7 @@ enum {
kImm24Mask = (1 << 24) - 1,
kOff16Mask = (1 << 16) - 1,
kImm16Mask = (1 << 16) - 1,
+ kImm18Mask = (1 << 18) - 1,
kImm22Mask = (1 << 22) - 1,
kImm26Mask = (1 << 26) - 1,
kBOfieldMask = 0x1f << 21,
@@ -2795,6 +2813,9 @@ enum LKBit { // Bit 0
LeaveLK = 0 // No action
};
+// Prefixed R bit.
+enum PRBit { SetPR = 1, LeavePR = 0 };
+
enum BOfield { // Bits 25-21
DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
@@ -2968,12 +2989,28 @@ class Instruction {
inline uint32_t OpcodeField() const {
return static_cast<Opcode>(BitField(31, 26));
}
+ inline uint32_t PrefixOpcodeField() const {
+ return static_cast<Opcode>(BitField(31, 25));
+ }
#define OPCODE_CASES(name, opcode_name, opcode_value) case opcode_name:
inline Opcode OpcodeBase() const {
- uint32_t opcode = OpcodeField();
- uint32_t extcode = OpcodeField();
+ uint32_t opcode = PrefixOpcodeField();
+ uint32_t extcode = PrefixOpcodeField();
+ // Check for prefix.
+ switch (opcode) {
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(OPCODE_CASES)
+ PPC_PREFIX_OPCODE_TYPE_10_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = OpcodeField();
+ extcode = OpcodeField();
+ // Check for suffix.
+ switch (opcode) {
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
switch (opcode) {
PPC_D_OPCODE_LIST(OPCODE_CASES)
PPC_I_OPCODE_LIST(OPCODE_CASES)
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 6275d14e89..b00ce7f1c5 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -279,6 +279,13 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
bind(&skip);
}
+void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this,
+ CommentForOffHeapTrampoline("tail call", builtin));
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Jump(ip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
@@ -620,6 +627,16 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
RecordComment("]");
}
+void TurboAssembler::LoadTaggedSignedField(Register destination,
+ MemOperand field_operand,
+ Register scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedSigned(destination, field_operand);
+ } else {
+ LoadU64(destination, field_operand, scratch);
+ }
+}
+
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register slot_address,
LinkRegisterStatus lr_status,
@@ -3603,6 +3620,19 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ ASM_CODE_COMMENT(this);
+ LoadU64(destination, EntryFromBuiltinAsOperand(builtin));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::BuiltinEntrySlotOffset(builtin));
+}
+
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@@ -3707,9 +3737,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::ZeroExtByte(Register dst, Register src) {
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 76ed4c2018..db6a53780e 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -50,6 +50,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
using TurboAssemblerBase::TurboAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond);
+ void TailCallBuiltin(Builtin builtin);
void Popcnt32(Register dst, Register src);
void Popcnt64(Register dst, Register src);
// Converts the integer (untagged smi) in |src| to a double, storing
@@ -707,6 +708,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
void LoadCodeObjectEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
@@ -768,6 +771,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
+ void Move(Register dst, const MemOperand& src) { LoadU64(dst, src); }
void SmiUntag(Register dst, const MemOperand& src, RCBit rc = LeaveRC,
Register scratch = no_reg);
@@ -788,6 +792,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
SmiUntag(smi);
}
+ // Shift left by kSmiShift
+ void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
+ void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
+ ShiftLeftU64(dst, src, Operand(kSmiShift), rc);
+ }
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -972,6 +982,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
+ void LoadTaggedSignedField(Register destination, MemOperand field_operand,
+ Register scratch);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
@@ -1295,12 +1307,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities
- // Shift left by kSmiShift
- void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
- void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
- ShiftLeftU64(dst, src, Operand(kSmiShift), rc);
- }
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value, r0);
diff --git a/deps/v8/src/codegen/register.h b/deps/v8/src/codegen/register.h
index 28dc5981fe..e36e4d1e9a 100644
--- a/deps/v8/src/codegen/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -20,22 +20,14 @@ constexpr bool ShouldPadArguments(int argument_count) {
}
#ifdef DEBUG
-struct CountIfValidRegisterFunctor {
- template <typename RegType>
- constexpr int operator()(int count, RegType reg) const {
- return count + (reg.is_valid() ? 1 : 0);
- }
-};
-
-template <typename RegType, typename... RegTypes,
+template <typename... RegTypes,
// All arguments must be either Register or DoubleRegister.
- typename = typename std::enable_if<
- base::is_same<Register, RegType, RegTypes...>::value ||
- base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
-inline constexpr bool AreAliased(RegType first_reg, RegTypes... regs) {
- int num_different_regs = RegListBase<RegType>{first_reg, regs...}.Count();
- int num_given_regs =
- base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
+ typename = typename std::enable_if_t<
+ std::conjunction_v<std::is_same<Register, RegTypes>...> ||
+ std::conjunction_v<std::is_same<DoubleRegister, RegTypes>...>>>
+inline constexpr bool AreAliased(RegTypes... regs) {
+ int num_different_regs = RegListBase{regs...}.Count();
+ int num_given_regs = (... + (regs.is_valid() ? 1 : 0));
return num_different_regs < num_given_regs;
}
#endif
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index d1b4ed2b92..d110e387b4 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -12,7 +12,7 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/code-inl.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index a5809286ef..c24fb31a7b 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -57,7 +57,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
-#if (defined CAN_USE_RVV_INSTRUCTIONS) || (defined USE_SIMULATOR)
+#if (defined CAN_USE_RVV_INSTRUCTIONS)
answer |= 1u << RISCV_SIMD;
#endif // def CAN_USE_RVV_INSTRUCTIONS || USE_SIMULATOR
return answer;
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 52bba9f21c..33816db57f 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -4987,9 +4987,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t6);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 1b04c73e6a..cb738a26dc 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -862,6 +862,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
VRegister v_scratch);
void Round_d(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
+
+ // -------------------------------------------------------------------------
+ // Smi utilities.
+
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ // Smi goes to upper 32
+ slli(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ // Smi is shifted left by 1
+ Add32(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@@ -1231,23 +1249,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2, Label* stack_overflow,
Label* done = nullptr);
- // -------------------------------------------------------------------------
- // Smi utilities.
-
- void SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (SmiValuesAre32Bits()) {
- // Smi goes to upper 32
- slli(dst, src, 32);
- } else {
- DCHECK(SmiValuesAre31Bits());
- // Smi is shifted left by 1
- Add32(dst, src, src);
- }
- }
-
- void SmiTag(Register reg) { SmiTag(reg, reg); }
-
// Left-shifted from int32 equivalent of Smi.
void SmiScale(Register dst, Register src, int scale) {
if (SmiValuesAre32Bits()) {
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 1037eff0cd..352b002327 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -296,8 +296,9 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1, Register exclusion2,
+ Register exclusion3) {
int bytes = 0;
RegList exclusions = {exclusion1, exclusion2, exclusion3};
@@ -306,18 +307,19 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPushF64OrV128(kCallerSavedDoubles);
+ MultiPushF64OrV128(kCallerSavedDoubles, scratch);
bytes += kStackSavedSavedFPSizeInBytes;
}
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1, Register exclusion2,
+ Register exclusion3) {
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPopF64OrV128(kCallerSavedDoubles);
+ MultiPopF64OrV128(kCallerSavedDoubles, scratch);
bytes += kStackSavedSavedFPSizeInBytes;
}
@@ -667,7 +669,8 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register location) {
+void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
+ Register location) {
int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
@@ -676,7 +679,7 @@ void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register location) {
if ((dregs.bits() & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
stack_offset -= kSimd128Size;
- StoreV128(dreg, MemOperand(location, stack_offset), r0);
+ StoreV128(dreg, MemOperand(location, stack_offset), scratch);
}
}
}
@@ -694,20 +697,21 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register location) {
+void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
+ Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
if ((dregs.bits() & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
- LoadV128(dreg, MemOperand(location, stack_offset), r0);
+ LoadV128(dreg, MemOperand(location, stack_offset), scratch);
stack_offset += kSimd128Size;
}
}
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
+void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
@@ -719,7 +723,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
LoadAndTestP(r1, r1); // If > 0 then simd is available.
ble(&push_doubles, Label::kNear);
// Save vector registers, don't save double registers anymore.
- MultiPushV128(dregs);
+ MultiPushV128(dregs, scratch);
b(&simd_pushed);
bind(&push_doubles);
// Simd not supported, only save double registers.
@@ -730,7 +734,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
bind(&simd_pushed);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
- MultiPushV128(dregs);
+ MultiPushV128(dregs, scratch);
} else {
MultiPushDoubles(dregs);
lay(sp, MemOperand(sp, -(dregs.Count() * kDoubleSize)));
@@ -741,7 +745,8 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
#endif
}
-void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register location) {
+void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
+ Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
@@ -752,7 +757,7 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register location) {
LoadAndTestP(r1, r1); // If > 0 then simd is available.
ble(&pop_doubles, Label::kNear);
// Pop vector registers, don't pop double registers anymore.
- MultiPopV128(dregs);
+ MultiPopV128(dregs, scratch);
b(&simd_popped);
bind(&pop_doubles);
// Simd not supported, only pop double registers.
@@ -761,7 +766,7 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register location) {
bind(&simd_popped);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
- MultiPopV128(dregs);
+ MultiPopV128(dregs, scratch);
} else {
lay(sp, MemOperand(sp, dregs.Count() * kDoubleSize));
MultiPopDoubles(dregs);
@@ -3771,6 +3776,7 @@ void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
}
void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) {
+ // TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgh(dst, mem);
#else
@@ -3787,6 +3793,7 @@ void TurboAssembler::LoadU16(Register dst, Register src) {
}
void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) {
+ // TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
#else
@@ -3803,6 +3810,7 @@ void TurboAssembler::LoadS8(Register dst, Register src) {
}
void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) {
+ // TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
#else
@@ -4073,6 +4081,7 @@ void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
Register scratch) {
+ DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
vl(dst, mem, Condition(0));
} else {
@@ -4102,6 +4111,7 @@ void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
Register scratch) {
+ DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
vst(src, mem, Condition(0));
} else {
@@ -4826,9 +4836,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::Trap() { stop(); }
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index f5abeb9860..8e89f3d1f9 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -178,11 +178,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPushDoubles(DoubleRegList dregs, Register location = sp);
void MultiPopDoubles(DoubleRegList dregs, Register location = sp);
- void MultiPushV128(DoubleRegList dregs, Register location = sp);
- void MultiPopV128(DoubleRegList dregs, Register location = sp);
+ void MultiPushV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
+ void MultiPopV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
- void MultiPushF64OrV128(DoubleRegList dregs, Register location = sp);
- void MultiPopF64OrV128(DoubleRegList dregs, Register location = sp);
+ void MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
+ void MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -193,13 +197,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push caller saved registers on the stack, and return the number of bytes
// stack pointer is adjusted.
- int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Restore caller saved registers from the stack, and return the number of
// bytes stack pointer is adjusted.
- int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1 = no_reg, Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Load an object from the root table.
@@ -1054,6 +1059,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
SmiUntag(smi);
}
+ // Shift left by kSmiShift
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+ void SmiTag(Register dst, Register src) {
+ ShiftLeftU64(dst, src, Operand(kSmiShift));
+ }
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1682,12 +1693,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities
- // Shift left by kSmiShift
- void SmiTag(Register reg) { SmiTag(reg, reg); }
- void SmiTag(Register dst, Register src) {
- ShiftLeftU64(dst, src, Operand(kSmiShift));
- }
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index 93ec1ae54f..7b6ae44ead 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -67,7 +67,11 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#if V8_TARGET_ARCH_IA32
and_(dst, src);
#elif V8_TARGET_ARCH_X64
- andq(dst, src);
+ if (is_uint32(src.value())) {
+ andl(dst, src);
+ } else {
+ andq(dst, src);
+ }
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index bffaa5c326..1f2a3dcfd9 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -185,7 +185,7 @@ struct MachineRepresentationOf {
// If T defines kMachineType, then we take the machine representation from
// there.
template <class T>
-struct MachineRepresentationOf<T, base::void_t<decltype(T::kMachineType)>> {
+struct MachineRepresentationOf<T, std::void_t<decltype(T::kMachineType)>> {
static const MachineRepresentation value = T::kMachineType.representation();
};
template <class T>
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index 24a237c16a..e12be0d567 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -10,7 +10,7 @@
#include "src/common/globals.h"
#include "src/execution/isolate-data.h"
#include "src/execution/isolate-inl.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index df15db18cc..a29357e78b 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -3372,6 +3372,28 @@ void Assembler::haddps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::cmpeqss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x00); // EQ == 0
+}
+
+void Assembler::cmpeqsd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x00); // EQ == 0
+}
+
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3389,6 +3411,13 @@ void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
+void Assembler::roundss(XMMRegister dst, Operand src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0A);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(!IsEnabled(AVX));
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0B);
@@ -3396,6 +3425,13 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
+void Assembler::roundsd(XMMRegister dst, Operand src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0B);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(!IsEnabled(AVX));
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x08);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 1d2d07ffdd..c6c2e7ed41 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -1288,6 +1288,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void haddps(XMMRegister dst, XMMRegister src);
void haddps(XMMRegister dst, Operand src);
+ void cmpeqsd(XMMRegister dst, XMMRegister src);
+ void cmpeqss(XMMRegister dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
@@ -1309,7 +1311,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pinsrq(XMMRegister dst, Operand src, uint8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundss(XMMRegister dst, Operand src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundsd(XMMRegister dst, Operand src, RoundingMode mode);
void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1556,11 +1560,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundss(XMMRegister dst, XMMRegister src1, Operand src2,
+ RoundingMode mode) {
+ vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
RoundingMode mode) {
vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundsd(XMMRegister dst, XMMRegister src1, Operand src2,
+ RoundingMode mode) {
+ vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
@@ -1625,45 +1639,76 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vpd(0x50, idst, xmm0, src);
}
void vpmovmskb(Register dst, XMMRegister src);
+ void vcmpeqss(XMMRegister dst, XMMRegister src) {
+ vss(0xC2, dst, dst, src);
+ emit(0x00); // EQ == 0
+ }
+ void vcmpeqsd(XMMRegister dst, XMMRegister src) {
+ vsd(0xC2, dst, dst, src);
+ emit(0x00); // EQ == 0
+ }
void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
+ void vcmpps(YMMRegister dst, YMMRegister src1, YMMRegister src2, int8_t cmp) {
+ vps(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
+ void vcmpps(YMMRegister dst, YMMRegister src1, Operand src2, int8_t cmp) {
+ vps(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
+ void vcmppd(YMMRegister dst, YMMRegister src1, YMMRegister src2, int8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
-
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmppd(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
- vcmppd(dst, src1, src2, imm8); \
- }
-
- AVX_CMP_P(vcmpeq, 0x0)
- AVX_CMP_P(vcmplt, 0x1)
- AVX_CMP_P(vcmple, 0x2)
- AVX_CMP_P(vcmpunord, 0x3)
- AVX_CMP_P(vcmpneq, 0x4)
- AVX_CMP_P(vcmpnlt, 0x5)
- AVX_CMP_P(vcmpnle, 0x6)
- AVX_CMP_P(vcmpge, 0xd)
+ void vcmppd(YMMRegister dst, YMMRegister src1, Operand src2, int8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
+#define AVX_CMP_P(instr, imm8, SIMDRegister) \
+ void instr##ps(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##ps(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ }
+
+ AVX_CMP_P(vcmpeq, 0x0, XMMRegister)
+ AVX_CMP_P(vcmpeq, 0x0, YMMRegister)
+ AVX_CMP_P(vcmplt, 0x1, XMMRegister)
+ AVX_CMP_P(vcmplt, 0x1, YMMRegister)
+ AVX_CMP_P(vcmple, 0x2, XMMRegister)
+ AVX_CMP_P(vcmple, 0x2, YMMRegister)
+ AVX_CMP_P(vcmpunord, 0x3, XMMRegister)
+ AVX_CMP_P(vcmpunord, 0x3, YMMRegister)
+ AVX_CMP_P(vcmpneq, 0x4, XMMRegister)
+ AVX_CMP_P(vcmpneq, 0x4, YMMRegister)
+ AVX_CMP_P(vcmpnlt, 0x5, XMMRegister)
+ AVX_CMP_P(vcmpnlt, 0x5, YMMRegister)
+ AVX_CMP_P(vcmpnle, 0x6, XMMRegister)
+ AVX_CMP_P(vcmpnle, 0x6, YMMRegister)
+ AVX_CMP_P(vcmpge, 0xd, XMMRegister)
+ AVX_CMP_P(vcmpge, 0xd, YMMRegister)
#undef AVX_CMP_P
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 4e28e4df66..ebaa40be0c 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -1215,6 +1215,23 @@ void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
}
+void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vcmpeqss(dst, src);
+ } else {
+ cmpeqss(dst, src);
+ }
+}
+
+void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vcmpeqsd(dst, src);
+ } else {
+ cmpeqsd(dst, src);
+ }
+}
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
@@ -3099,6 +3116,23 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
leaq(dst, Operand(&current, -pc));
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ LoadTaggedPointerField(scratch,
+ Operand(kJavaScriptCallCodeStartRegister, offset));
+ testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, not_zero);
+}
+
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
@@ -3108,9 +3142,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
// performance tuning which emits a different instruction sequence.
call(EntryFromBuiltinAsOperand(target));
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index f1aba1355c..1f07fdcf2b 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -169,6 +169,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, Operand src);
+ void Cmpeqss(XMMRegister dst, XMMRegister src);
+ void Cmpeqsd(XMMRegister dst, XMMRegister src);
+
void PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8);
void Pextrq(Register dst, XMMRegister src, int8_t imm8);
@@ -412,6 +415,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Condition cc = always);
+ void BailoutIfDeoptimized(Register scratch);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index cd374c8238..bc9a6eb1c3 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -434,6 +434,13 @@ F FUNCTION_CAST(Address addr) {
#define USES_FUNCTION_DESCRIPTORS 0
#endif
+constexpr bool StaticStringsEqual(const char* s1, const char* s2) {
+ for (;; ++s1, ++s2) {
+ if (*s1 != *s2) return false;
+ if (*s1 == '\0') return true;
+ }
+}
+
// -----------------------------------------------------------------------------
// Declarations for use in both the preparser and the rest of V8.
@@ -518,11 +525,8 @@ constexpr int kNoDeoptimizationId = -1;
// - Lazy: the code has been marked as dependent on some assumption which
// is checked elsewhere and can trigger deoptimization the next time the
// code is executed.
-// - Soft: similar to lazy deoptimization, but does not contribute to the
-// total deopt count which can lead to disabling optimization for a function.
enum class DeoptimizeKind : uint8_t {
kEager,
- kSoft,
kLazy,
};
constexpr DeoptimizeKind kFirstDeoptimizeKind = DeoptimizeKind::kEager;
@@ -532,16 +536,17 @@ constexpr int kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1;
inline size_t hash_value(DeoptimizeKind kind) {
return static_cast<size_t>(kind);
}
-inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
+constexpr const char* ToString(DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
- return os << "Eager";
- case DeoptimizeKind::kSoft:
- return os << "Soft";
+ return "Eager";
case DeoptimizeKind::kLazy:
- return os << "Lazy";
+ return "Lazy";
}
}
+inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
+ return os << ToString(kind);
+}
// Indicates whether the lookup is related to sloppy-mode block-scoped
// function hoisting, and is a synthetic assignment for that.
@@ -597,6 +602,10 @@ constexpr intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
// other architectures.
#if V8_TARGET_ARCH_X64
constexpr int kCodeAlignmentBits = 6;
+#elif V8_TARGET_ARCH_PPC64
+// 64 byte alignment is needed on ppc64 to make sure p10 prefixed instructions
+// don't cross 64-byte boundaries.
+constexpr int kCodeAlignmentBits = 6;
#else
constexpr int kCodeAlignmentBits = 5;
#endif
@@ -906,7 +915,7 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum PageSize { kRegular, kLarge };
+enum class PageSize { kRegular, kLarge };
enum class CodeFlushMode {
kFlushBytecode,
@@ -1611,45 +1620,52 @@ inline std::ostream& operator<<(std::ostream& os,
using FileAndLine = std::pair<const char*, int>;
-enum class OptimizationMarker : int32_t {
- // These values are set so that it is easy to check if there is a marker where
- // some processing needs to be done.
- kNone = 0b000,
- kInOptimizationQueue = 0b001,
- kCompileMaglev_NotConcurrent = 0b010,
- kCompileMaglev_Concurrent = 0b011,
- kCompileTurbofan_NotConcurrent = 0b100,
- kCompileTurbofan_Concurrent = 0b101,
- kLastOptimizationMarker = kCompileTurbofan_Concurrent,
+#define TIERING_STATE_LIST(V) \
+ V(None, 0b000) \
+ V(InProgress, 0b001) \
+ V(RequestMaglev_Synchronous, 0b010) \
+ V(RequestMaglev_Concurrent, 0b011) \
+ V(RequestTurbofan_Synchronous, 0b100) \
+ V(RequestTurbofan_Concurrent, 0b101)
+
+enum class TieringState : int32_t {
+#define V(Name, Value) k##Name = Value,
+ TIERING_STATE_LIST(V)
+#undef V
+ kLastTieringState = kRequestTurbofan_Concurrent,
};
-// For kNone or kInOptimizationQueue we don't need any special processing.
-// To check both cases using a single mask, we expect the kNone to be 0 and
-// kInOptimizationQueue to be 1 so that we can mask off the lsb for checking.
-STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b00 &&
- static_cast<int>(OptimizationMarker::kInOptimizationQueue) ==
- 0b01);
-STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <=
- 0b111);
-static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
-inline std::ostream& operator<<(std::ostream& os,
- const OptimizationMarker& marker) {
+// To efficiently check whether a marker is kNone or kInProgress using a single
+// mask, we expect the kNone to be 0 and kInProgress to be 1 so that we can
+// mask off the lsb for checking.
+STATIC_ASSERT(static_cast<int>(TieringState::kNone) == 0b00 &&
+ static_cast<int>(TieringState::kInProgress) == 0b01);
+STATIC_ASSERT(static_cast<int>(TieringState::kLastTieringState) <= 0b111);
+static constexpr uint32_t kNoneOrInProgressMask = 0b110;
+
+#define V(Name, Value) \
+ constexpr bool Is##Name(TieringState state) { \
+ return state == TieringState::k##Name; \
+ }
+TIERING_STATE_LIST(V)
+#undef V
+
+constexpr const char* ToString(TieringState marker) {
switch (marker) {
- case OptimizationMarker::kNone:
- return os << "OptimizationMarker::kNone";
- case OptimizationMarker::kCompileMaglev_NotConcurrent:
- return os << "OptimizationMarker::kCompileMaglev_NotConcurrent";
- case OptimizationMarker::kCompileMaglev_Concurrent:
- return os << "OptimizationMarker::kCompileMaglev_Concurrent";
- case OptimizationMarker::kCompileTurbofan_NotConcurrent:
- return os << "OptimizationMarker::kCompileTurbofan_NotConcurrent";
- case OptimizationMarker::kCompileTurbofan_Concurrent:
- return os << "OptimizationMarker::kCompileTurbofan_Concurrent";
- case OptimizationMarker::kInOptimizationQueue:
- return os << "OptimizationMarker::kInOptimizationQueue";
+#define V(Name, Value) \
+ case TieringState::k##Name: \
+ return "TieringState::k" #Name;
+ TIERING_STATE_LIST(V)
+#undef V
}
}
+inline std::ostream& operator<<(std::ostream& os, TieringState marker) {
+ return os << ToString(marker);
+}
+
+#undef TIERING_STATE_LIST
+
enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
enum class CallFeedbackContent { kTarget, kReceiver };
@@ -1665,12 +1681,19 @@ inline std::ostream& operator<<(std::ostream& os,
enum class BlockingBehavior { kBlock, kDontBlock };
-enum class ConcurrencyMode : uint8_t { kNotConcurrent, kConcurrent };
+enum class ConcurrencyMode : uint8_t { kSynchronous, kConcurrent };
+
+constexpr bool IsSynchronous(ConcurrencyMode mode) {
+ return mode == ConcurrencyMode::kSynchronous;
+}
+constexpr bool IsConcurrent(ConcurrencyMode mode) {
+ return mode == ConcurrencyMode::kConcurrent;
+}
-inline const char* ToString(ConcurrencyMode mode) {
+constexpr const char* ToString(ConcurrencyMode mode) {
switch (mode) {
- case ConcurrencyMode::kNotConcurrent:
- return "ConcurrencyMode::kNotConcurrent";
+ case ConcurrencyMode::kSynchronous:
+ return "ConcurrencyMode::kSynchronous";
case ConcurrencyMode::kConcurrent:
return "ConcurrencyMode::kConcurrent";
}
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index e5d4e91544..22fde49ffd 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -68,6 +68,7 @@ namespace internal {
T(CannotFreezeArrayBufferView, \
"Cannot freeze array buffer views with elements") \
T(CannotSeal, "Cannot seal") \
+ T(CannotWrap, "Cannot wrap target callable") \
T(CircularStructure, "Converting circular structure to JSON%") \
T(ConstructAbstractClass, "Abstract class % not directly constructable") \
T(ConstAssign, "Assignment to constant variable.") \
@@ -147,7 +148,8 @@ namespace internal {
T(NonStringImportAssertionValue, "Import assertion value must be a string") \
T(NoSetterInCallback, "Cannot set property % of % which has only a getter") \
T(NotAnIterator, "% is not an iterator") \
- T(NotAPromise, "% is not a promise") \
+ T(PromiseNewTargetUndefined, \
+ "Promise constructor cannot be invoked without 'new'") \
T(NotConstructor, "% is not a constructor") \
T(NotDateObject, "this is not a Date object.") \
T(NotGeneric, "% requires that 'this' be a %") \
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 873a8d982f..fe26d242f6 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -9,35 +9,20 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/execution/isolate.h"
#include "src/execution/local-isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/logging/runtime-call-stats-scope.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/js-function.h"
#include "src/tasks/cancelable-task.h"
#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
-namespace {
-
-void DisposeCompilationJob(OptimizedCompilationJob* job,
- bool restore_function_code) {
- if (restore_function_code) {
- Handle<JSFunction> function = job->compilation_info()->closure();
- function->set_code(function->shared().GetCode(), kReleaseStore);
- if (function->IsInOptimizationQueue()) {
- function->ClearOptimizationMarker();
- }
- }
- delete job;
-}
-
-} // namespace
-
class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
public:
explicit CompileTask(Isolate* isolate,
@@ -96,18 +81,18 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
DeleteArray(input_queue_);
}
-OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
+TurbofanCompilationJob* OptimizingCompileDispatcher::NextInput(
LocalIsolate* local_isolate) {
base::MutexGuard access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return nullptr;
- OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
+ TurbofanCompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
return job;
}
-void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
+void OptimizingCompileDispatcher::CompileNext(TurbofanCompilationJob* job,
LocalIsolate* local_isolate) {
if (!job) return;
@@ -129,26 +114,27 @@ void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
for (;;) {
- OptimizedCompilationJob* job = nullptr;
+ std::unique_ptr<TurbofanCompilationJob> job;
{
base::MutexGuard access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
- job = output_queue_.front();
+ job.reset(output_queue_.front());
output_queue_.pop();
}
- DisposeCompilationJob(job, restore_function_code);
+ Compiler::DisposeTurbofanCompilationJob(job.get(), restore_function_code);
}
}
void OptimizingCompileDispatcher::FlushInputQueue() {
base::MutexGuard access_input_queue_(&input_queue_mutex_);
while (input_queue_length_ > 0) {
- OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
+ std::unique_ptr<TurbofanCompilationJob> job(
+ input_queue_[InputQueueIndex(0)]);
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
- DisposeCompilationJob(job, true);
+ Compiler::DisposeTurbofanCompilationJob(job.get(), true);
}
}
@@ -196,25 +182,29 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
for (;;) {
- OptimizedCompilationJob* job = nullptr;
+ std::unique_ptr<TurbofanCompilationJob> job;
{
base::MutexGuard access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
- job = output_queue_.front();
+ job.reset(output_queue_.front());
output_queue_.pop();
}
OptimizedCompilationInfo* info = job->compilation_info();
Handle<JSFunction> function(*info->closure(), isolate_);
- if (function->HasAvailableCodeKind(info->code_kind())) {
+
+ // If another racing task has already finished compiling and installing the
+ // requested code kind on the function, throw out the current job.
+ if (!info->is_osr() && function->HasAvailableCodeKind(info->code_kind())) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Aborting compilation for ");
function->ShortPrint();
PrintF(" as it has already been optimized.\n");
}
- DisposeCompilationJob(job, false);
- } else {
- Compiler::FinalizeOptimizedCompilationJob(job, isolate_);
+ Compiler::DisposeTurbofanCompilationJob(job.get(), false);
+ continue;
}
+
+ Compiler::FinalizeTurbofanCompilationJob(job.get(), isolate_);
}
}
@@ -227,7 +217,7 @@ bool OptimizingCompileDispatcher::HasJobs() {
}
void OptimizingCompileDispatcher::QueueForOptimization(
- OptimizedCompilationJob* job) {
+ TurbofanCompilationJob* job) {
DCHECK(IsQueueAvailable());
{
// Add job to the back of the input queue.
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index ccfb4f2a4a..f583a7ab35 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -19,7 +19,7 @@ namespace v8 {
namespace internal {
class LocalHeap;
-class OptimizedCompilationJob;
+class TurbofanCompilationJob;
class RuntimeCallStats;
class SharedFunctionInfo;
@@ -32,7 +32,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
input_queue_shift_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
- input_queue_ = NewArray<OptimizedCompilationJob*>(input_queue_capacity_);
+ input_queue_ = NewArray<TurbofanCompilationJob*>(input_queue_capacity_);
}
~OptimizingCompileDispatcher();
@@ -40,7 +40,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
void Stop();
void Flush(BlockingBehavior blocking_behavior);
// Takes ownership of |job|.
- void QueueForOptimization(OptimizedCompilationJob* job);
+ void QueueForOptimization(TurbofanCompilationJob* job);
void AwaitCompileTasks();
void InstallOptimizedFunctions();
@@ -72,8 +72,8 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
bool restore_function_code);
void FlushInputQueue();
void FlushOutputQueue(bool restore_function_code);
- void CompileNext(OptimizedCompilationJob* job, LocalIsolate* local_isolate);
- OptimizedCompilationJob* NextInput(LocalIsolate* local_isolate);
+ void CompileNext(TurbofanCompilationJob* job, LocalIsolate* local_isolate);
+ TurbofanCompilationJob* NextInput(LocalIsolate* local_isolate);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -85,14 +85,14 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
Isolate* isolate_;
// Circular queue of incoming recompilation tasks (including OSR).
- OptimizedCompilationJob** input_queue_;
+ TurbofanCompilationJob** input_queue_;
int input_queue_capacity_;
int input_queue_length_;
int input_queue_shift_;
base::Mutex input_queue_mutex_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
- std::queue<OptimizedCompilationJob*> output_queue_;
+ std::queue<TurbofanCompilationJob*> output_queue_;
// Used for job based recompilation which has multiple producers on
// different threads.
base::Mutex output_queue_mutex_;
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index ab929915e1..d88aefcf6b 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -943,6 +943,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
+ if (FLAG_debug_code) {
+ // Checking that |value| is not a cleared weakref: our write barrier
+ // does not support that for now.
+ __ cmp(value, Operand(kClearedWeakHeapObjectLower32));
+ __ Check(ne, AbortReason::kOperandIsCleared);
+ }
+
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Operand offset(0);
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index de80d20d51..0aadd024e4 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -963,6 +963,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
offset = Operand(i.InputRegister(1));
}
Register value = i.InputRegister(2);
+
+ if (FLAG_debug_code) {
+ // Checking that |value| is not a cleared weakref: our write barrier
+ // does not support that for now.
+ __ cmp(value, Operand(kClearedWeakHeapObjectLower32));
+ __ Check(ne, AbortReason::kOperandIsCleared);
+ }
+
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
@@ -3195,7 +3203,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ PushCPURegList(saves_fp);
// Save registers.
- __ PushCPURegList<TurboAssembler::kSignLR>(saves);
+ __ PushCPURegList(saves);
if (returns != 0) {
__ Claim(returns);
@@ -3213,7 +3221,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Restore registers.
CPURegList saves =
CPURegList(kXRegSizeInBits, call_descriptor->CalleeSavedRegisters());
- __ PopCPURegList<TurboAssembler::kAuthLR>(saves);
+ __ PopCPURegList(saves);
// Restore fp registers.
CPURegList saves_fp =
@@ -3300,8 +3308,7 @@ void CodeGenerator::PrepareForDeoptimizationExits(
__ ForceConstantPoolEmissionWithoutJump();
// We are conservative here, reserving sufficient space for the largest deopt
// kind.
- DCHECK_GE(Deoptimizer::kLazyDeoptExitSize,
- Deoptimizer::kNonLazyDeoptExitSize);
+ DCHECK_GE(Deoptimizer::kLazyDeoptExitSize, Deoptimizer::kEagerDeoptExitSize);
__ CheckVeneerPool(
false, false,
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 7057b47369..ec0dce6ae2 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -171,7 +171,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
++lazy_deopt_count_;
tasm()->BindExceptionHandler(exit->label());
} else {
- ++non_lazy_deopt_count_;
+ ++eager_deopt_count_;
tasm()->bind(exit->label());
}
Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
@@ -327,17 +327,13 @@ void CodeGenerator::AssembleCode() {
// emitted before emitting the deoptimization exits.
PrepareForDeoptimizationExits(&deoptimization_exits_);
- if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
- deopt_exit_start_offset_ = tasm()->pc_offset();
- }
+ deopt_exit_start_offset_ = tasm()->pc_offset();
// Assemble deoptimization exits.
offsets_info_.deoptimization_exits = tasm()->pc_offset();
int last_updated = 0;
- // We sort the deoptimization exits here so that the lazy ones will
- // be visited second last, and eagerwithresume last. We need this as on
- // architectures where Deoptimizer::kSupportsFixedDeoptExitSizes is true,
- // lazy deopts and eagerwithresume might need additional instructions.
+ // We sort the deoptimization exits here so that the lazy ones will be visited
+ // last. We need this as lazy deopts might need additional instructions.
auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
// The deoptimization exits are sorted so that lazy deopt exits appear after
// eager deopts.
@@ -349,9 +345,7 @@ void CodeGenerator::AssembleCode() {
}
return a->pc_offset() < b->pc_offset();
};
- if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
- std::sort(deoptimization_exits_.begin(), deoptimization_exits_.end(), cmp);
- }
+ std::sort(deoptimization_exits_.begin(), deoptimization_exits_.end(), cmp);
{
#ifdef V8_TARGET_ARCH_PPC64
@@ -360,9 +354,7 @@ void CodeGenerator::AssembleCode() {
#endif
for (DeoptimizationExit* exit : deoptimization_exits_) {
if (exit->emitted()) continue;
- if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
- exit->set_deoptimization_id(next_deoptimization_id_++);
- }
+ exit->set_deoptimization_id(next_deoptimization_id_++);
result_ = AssembleDeoptimizerCall(exit);
if (result_ != kSuccess) return;
@@ -906,7 +898,7 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
- data->SetNonLazyDeoptCount(Smi::FromInt(non_lazy_deopt_count_));
+ data->SetEagerDeoptCount(Smi::FromInt(eager_deopt_count_));
data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count_));
if (info->has_shared_info()) {
@@ -1144,9 +1136,6 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
#else // DEBUG
0);
#endif // DEBUG
- if (!Deoptimizer::kSupportsFixedDeoptExitSizes) {
- exit->set_deoptimization_id(next_deoptimization_id_++);
- }
if (immediate_args_count != 0) {
auto immediate_args = zone()->New<ZoneVector<ImmediateOperand*>>(zone());
InstructionOperandIterator imm_iter(
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 4bceba3025..a18a2808b4 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -421,7 +421,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
ZoneVector<HandlerInfo> handlers_;
int next_deoptimization_id_ = 0;
int deopt_exit_start_offset_ = 0;
- int non_lazy_deopt_count_ = 0;
+ int eager_deopt_count_ = 0;
int lazy_deopt_count_ = 0;
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 2730728cdd..e4d47e0d31 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -972,6 +972,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(index);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
+
+ if (FLAG_debug_code) {
+ // Checking that |value| is not a cleared weakref: our write barrier
+ // does not support that for now.
+ __ cmp(value, Immediate(kClearedWeakHeapObjectLower32));
+ __ Check(not_equal, AbortReason::kOperandIsCleared);
+ }
+
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 3a4fb705b6..1efc9f812c 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -9,6 +9,7 @@
#include <type_traits>
#include <vector>
+#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/iterator.h"
#include "src/base/logging.h"
@@ -132,11 +133,12 @@ class IA32OperandGenerator final : public OperandGenerator {
size_t* input_count, RegisterMode register_mode = kRegister) {
AddressingMode mode = kMode_MRI;
if (displacement_mode == kNegativeDisplacement) {
- displacement = -displacement;
+ displacement = base::bits::WraparoundNeg32(displacement);
}
if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
- displacement += OpParameter<int32_t>(base->op());
+ displacement = base::bits::WraparoundAdd32(
+ displacement, OpParameter<int32_t>(base->op()));
base = nullptr;
}
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 12edbcb37e..3c96e7f30e 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -870,23 +870,10 @@ Instruction* InstructionSelector::EmitWithContinuation(
continuation_inputs_.push_back(g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
int immediate_args_count = 0;
- if (cont->has_extra_args()) {
- for (int i = 0; i < cont->extra_args_count(); i++) {
- InstructionOperand op = cont->extra_args()[i];
- continuation_inputs_.push_back(op);
- input_count++;
- if (op.IsImmediate()) {
- immediate_args_count++;
- } else {
- // All immediate args should be added last.
- DCHECK_EQ(immediate_args_count, 0);
- }
- }
- }
opcode |= DeoptImmedArgsCountField::encode(immediate_args_count) |
DeoptFrameStateOffsetField::encode(static_cast<int>(input_count));
- AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
- cont->reason(), cont->node_id(), cont->feedback(),
+ AppendDeoptimizeArguments(&continuation_inputs_, cont->reason(),
+ cont->node_id(), cont->feedback(),
FrameState{cont->frame_state()});
} else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
@@ -918,14 +905,12 @@ Instruction* InstructionSelector::EmitWithContinuation(
}
void InstructionSelector::AppendDeoptimizeArguments(
- InstructionOperandVector* args, DeoptimizeKind kind,
- DeoptimizeReason reason, NodeId node_id, FeedbackSource const& feedback,
- FrameState frame_state) {
+ InstructionOperandVector* args, DeoptimizeReason reason, NodeId node_id,
+ FeedbackSource const& feedback, FrameState frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
- DCHECK_NE(DeoptimizeKind::kLazy, kind);
int const state_id = sequence()->AddDeoptimizationEntry(
- descriptor, kind, reason, node_id, feedback);
+ descriptor, DeoptimizeKind::kEager, reason, node_id, feedback);
args->push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -1170,6 +1155,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
frame_state_entries);
+ USE(pushed_count);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && is_tail_call &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
@@ -1354,7 +1340,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
FrameState value{input->InputAt(0)};
- VisitDeoptimize(p.kind(), p.reason(), input->id(), p.feedback(), value);
+ VisitDeoptimize(p.reason(), input->id(), p.feedback(), value);
break;
}
case BasicBlock::kThrow:
@@ -3143,15 +3129,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
+ kNotEqual, p.reason(), node->id(), p.feedback(),
+ FrameState{node->InputAt(1)});
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), node->InputAt(1));
+ kEqual, p.reason(), node->id(), p.feedback(),
+ FrameState{node->InputAt(1)});
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
@@ -3179,14 +3166,12 @@ void InstructionSelector::EmitIdentity(Node* node) {
SetRename(node, node->InputAt(0));
}
-void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
- DeoptimizeReason reason,
+void InstructionSelector::VisitDeoptimize(DeoptimizeReason reason,
NodeId node_id,
FeedbackSource const& feedback,
FrameState frame_state) {
InstructionOperandVector args(instruction_zone());
- AppendDeoptimizeArguments(&args, kind, reason, node_id, feedback,
- frame_state);
+ AppendDeoptimizeArguments(&args, reason, node_id, feedback, frame_state);
Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr);
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index b33de8e856..c1a12d97ec 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -55,13 +55,21 @@ class FlagsContinuation final {
}
// Creates a new flags continuation for an eager deoptimization exit.
- static FlagsContinuation ForDeoptimize(
- FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
- InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
- return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
- node_id, feedback, frame_state, extra_args,
- extra_args_count);
+ static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+ DeoptimizeReason reason,
+ NodeId node_id,
+ FeedbackSource const& feedback,
+ FrameState frame_state) {
+ return FlagsContinuation(kFlags_deoptimize, condition, reason, node_id,
+ feedback, frame_state);
+ }
+ static FlagsContinuation ForDeoptimizeForTesting(
+ FlagsCondition condition, DeoptimizeReason reason, NodeId node_id,
+ FeedbackSource const& feedback, Node* frame_state) {
+ // test-instruction-scheduler.cc passes a dummy Node* as frame_state.
+ // Contents don't matter as long as it's not nullptr.
+ return FlagsContinuation(kFlags_deoptimize, condition, reason, node_id,
+ feedback, frame_state);
}
// Creates a new flags continuation for a boolean value.
@@ -90,10 +98,6 @@ class FlagsContinuation final {
DCHECK(!IsNone());
return condition_;
}
- DeoptimizeKind kind() const {
- DCHECK(IsDeoptimize());
- return kind_;
- }
DeoptimizeReason reason() const {
DCHECK(IsDeoptimize());
return reason_;
@@ -110,18 +114,6 @@ class FlagsContinuation final {
DCHECK(IsDeoptimize());
return frame_state_or_result_;
}
- bool has_extra_args() const {
- DCHECK(IsDeoptimize());
- return extra_args_ != nullptr;
- }
- const InstructionOperand* extra_args() const {
- DCHECK(has_extra_args());
- return extra_args_;
- }
- int extra_args_count() const {
- DCHECK(has_extra_args());
- return extra_args_count_;
- }
Node* result() const {
DCHECK(IsSet() || IsSelect());
return frame_state_or_result_;
@@ -207,19 +199,14 @@ class FlagsContinuation final {
}
FlagsContinuation(FlagsMode mode, FlagsCondition condition,
- DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback,
- Node* frame_state, InstructionOperand* extra_args,
- int extra_args_count)
+ DeoptimizeReason reason, NodeId node_id,
+ FeedbackSource const& feedback, Node* frame_state)
: mode_(mode),
condition_(condition),
- kind_(kind),
reason_(reason),
node_id_(node_id),
feedback_(feedback),
- frame_state_or_result_(frame_state),
- extra_args_(extra_args),
- extra_args_count_(extra_args_count) {
+ frame_state_or_result_(frame_state) {
DCHECK(mode == kFlags_deoptimize);
DCHECK_NOT_NULL(frame_state);
}
@@ -253,14 +240,11 @@ class FlagsContinuation final {
FlagsMode const mode_;
FlagsCondition condition_;
- DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
NodeId node_id_; // Only valid if mode_ == kFlags_deoptimize*
FeedbackSource feedback_; // Only valid if mode_ == kFlags_deoptimize*
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
// or mode_ == kFlags_set.
- InstructionOperand* extra_args_; // Only valid if mode_ == kFlags_deoptimize*
- int extra_args_count_; // Only valid if mode_ == kFlags_deoptimize*
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
TrapId trap_id_; // Only valid if mode_ == kFlags_trap.
@@ -501,8 +485,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
}
void AppendDeoptimizeArguments(InstructionOperandVector* args,
- DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback,
+ DeoptimizeReason reason, NodeId node_id,
+ FeedbackSource const& feedback,
FrameState frame_state);
void EmitTableSwitch(const SwitchInfo& sw,
@@ -637,9 +621,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
- void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback,
- FrameState frame_state);
+ void VisitDeoptimize(DeoptimizeReason reason, NodeId node_id,
+ FeedbackSource const& feedback, FrameState frame_state);
void VisitSelect(Node* node);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
index 6d70841fd7..3fd2fb441f 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -2043,7 +2043,7 @@ void SinglePassRegisterAllocator::EmitGapMoveFromOutput(InstructionOperand from,
DCHECK_EQ(data_->GetBlock(instr_index), block);
if (instr_index == block->last_instruction_index()) {
// Add gap move to the first instruction of every successor block.
- for (const RpoNumber succ : block->successors()) {
+ for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = data_->GetBlock(succ);
DCHECK_EQ(1, successor->PredecessorCount());
data_->AddGapMove(successor->first_instruction_index(),
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 1f9a4a70c9..905ae6f301 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -97,7 +97,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
break;
case kMode_MRI:
*first_index += 2;
- return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ return MemOperand(InputRegister(index + 0), InputInt64(index + 1));
case kMode_MRR:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
@@ -454,34 +454,42 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
- do { \
- DoubleRegister result = i.OutputDoubleRegister(); \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- bool is_atomic = i.InputInt32(2); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- __ asm_instrx(result, operand); \
- } \
- if (is_atomic) __ lwsync(); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrp, asm_instrx) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ bool is_atomic = i.InputInt32(2); \
+ if (mode == kMode_MRI) { \
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
+ __ asm_instrp(result, operand); \
+ } else { \
+ __ asm_instr(result, operand); \
+ } \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ if (is_atomic) __ lwsync(); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
- do { \
- Register result = i.OutputRegister(); \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- bool is_atomic = i.InputInt32(2); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- __ asm_instrx(result, operand); \
- } \
- if (is_atomic) __ lwsync(); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrp, asm_instrx) \
+ do { \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ bool is_atomic = i.InputInt32(2); \
+ if (mode == kMode_MRI) { \
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
+ __ asm_instrp(result, operand); \
+ } else { \
+ __ asm_instr(result, operand); \
+ } \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ if (is_atomic) __ lwsync(); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_LOAD_INTEGER_RR(asm_instr) \
@@ -1112,6 +1120,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1);
OutOfLineRecordWrite* ool;
+ if (FLAG_debug_code) {
+ // Checking that |value| is not a cleared weakref: our write barrier
+ // does not support that for now.
+ __ CmpS64(value, Operand(kClearedWeakHeapObjectLower32), kScratchReg);
+ __ Check(ne, AbortReason::kOperandIsCleared);
+ }
+
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
if (addressing_mode == kMode_MRI) {
@@ -1944,34 +1959,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kPPC_LoadWordU8:
- ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ ASSEMBLE_LOAD_INTEGER(lbz, plbz, lbzx);
break;
case kPPC_LoadWordS8:
- ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ ASSEMBLE_LOAD_INTEGER(lbz, plbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
break;
case kPPC_LoadWordU16:
- ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
+ ASSEMBLE_LOAD_INTEGER(lhz, plhz, lhzx);
break;
case kPPC_LoadWordS16:
- ASSEMBLE_LOAD_INTEGER(lha, lhax);
+ ASSEMBLE_LOAD_INTEGER(lha, plha, lhax);
break;
case kPPC_LoadWordU32:
- ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
break;
case kPPC_LoadWordS32:
- ASSEMBLE_LOAD_INTEGER(lwa, lwax);
+ ASSEMBLE_LOAD_INTEGER(lwa, plwa, lwax);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
- ASSEMBLE_LOAD_INTEGER(ld, ldx);
+ ASSEMBLE_LOAD_INTEGER(ld, pld, ldx);
break;
#endif
case kPPC_LoadFloat32:
- ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
+ ASSEMBLE_LOAD_FLOAT(lfs, plfs, lfsx);
break;
case kPPC_LoadDouble:
- ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
+ ASSEMBLE_LOAD_FLOAT(lfd, plfd, lfdx);
break;
case kPPC_LoadSimd128: {
Simd128Register result = i.OutputSimd128Register();
@@ -3767,18 +3782,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadDecompressTaggedSigned: {
CHECK(instr->HasOutput());
- ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
break;
}
case kPPC_LoadDecompressTaggedPointer: {
CHECK(instr->HasOutput());
- ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
case kPPC_LoadDecompressAnyTagged: {
CHECK(instr->HasOutput());
- ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
@@ -4035,7 +4050,7 @@ void CodeGenerator::AssembleConstructFrame() {
} else {
StackFrame::Type type = info()->GetOutputStackFrameType();
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
- // efficient intialization of the constant pool pointer register).
+ // efficient initialization of the constant pool pointer register).
__ StubPrologue(type);
#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall() ||
@@ -4243,11 +4258,10 @@ void CodeGenerator::PrepareForDeoptimizationExits(
for (DeoptimizationExit* exit : deoptimization_exits_) {
total_size += (exit->kind() == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize;
+ : Deoptimizer::kEagerDeoptExitSize;
}
__ CheckTrampolinePoolQuick(total_size);
- DCHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index dedd268dde..4eee159ea8 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -18,6 +18,7 @@ enum ImmediateMode {
kInt16Imm_Negate,
kInt16Imm_4ByteAligned,
kShift32Imm,
+ kInt34Imm,
kShift64Imm,
kNoImmediate
};
@@ -58,6 +59,8 @@ class PPCOperandGenerator final : public OperandGenerator {
return is_int16(value) && !(value & 3);
case kShift32Imm:
return 0 <= value && value < 32;
+ case kInt34Imm:
+ return is_int34(value);
case kShift64Imm:
return 0 <= value && value < 64;
case kNoImmediate:
@@ -173,7 +176,12 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
InstructionCode opcode = kArchNop;
- ImmediateMode mode = kInt16Imm;
+ ImmediateMode mode;
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ mode = kInt34Imm;
+ } else {
+ mode = kInt16Imm;
+ }
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kPPC_LoadFloat32;
@@ -196,7 +204,7 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
case MachineRepresentation::kSandboxedPointer: // Fall through.
#ifdef V8_COMPRESS_POINTERS
opcode = kPPC_LoadWordS32;
- mode = kInt16Imm_4ByteAligned;
+ if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
break;
#else
UNREACHABLE();
@@ -218,7 +226,7 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
#endif
case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
- mode = kInt16Imm_4ByteAligned;
+ if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
break;
case MachineRepresentation::kSimd128:
opcode = kPPC_LoadSimd128;
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.cc b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
index b4099c5fad..9073b18b8d 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
@@ -362,7 +362,7 @@ bool BlockAssessments::IsStaleReferenceStackSlot(InstructionOperand op) {
void BlockAssessments::Print() const {
StdoutStream os;
- for (const auto pair : map()) {
+ for (const auto& pair : map()) {
const InstructionOperand op = pair.first;
const Assessment* assessment = pair.second;
// Use operator<< so we can write the assessment on the same
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 56bb8c6879..69d1abb5ae 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -4075,11 +4075,10 @@ void CodeGenerator::PrepareForDeoptimizationExits(
for (DeoptimizationExit* exit : deoptimization_exits_) {
total_size += (exit->kind() == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize;
+ : Deoptimizer::kEagerDeoptExitSize;
}
__ CheckTrampolinePoolQuick(total_size);
- DCHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 24593d8cd9..a4547402e9 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -1361,6 +1361,59 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void EmitSignExtendWord(InstructionSelector* selector, Node* node) {
+ RiscvOperandGenerator g(selector);
+ Node* value = node->InputAt(0);
+ IrOpcode::Value lastOpCode = value->opcode();
+ if (lastOpCode == IrOpcode::kInt32Add || lastOpCode == IrOpcode::kInt32Sub ||
+ lastOpCode == IrOpcode::kWord32And || lastOpCode == IrOpcode::kWord32Or ||
+ lastOpCode == IrOpcode::kWord32Xor ||
+ lastOpCode == IrOpcode::kWord32Shl ||
+ lastOpCode == IrOpcode::kWord32Shr ||
+ lastOpCode == IrOpcode::kWord32Sar ||
+ lastOpCode == IrOpcode::kUint32Mod) {
+ selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
+ if (lastOpCode == IrOpcode::kInt32Mul) {
+ Node* left = value->InputAt(0);
+ Node* right = value->InputAt(1);
+ if (selector->CanCover(value, left) && selector->CanCover(value, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ return;
+ }
+ }
+ }
+ selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
+ if (lastOpCode == IrOpcode::kInt32Mod) {
+ Node* left = value->InputAt(0);
+ Node* right = value->InputAt(1);
+ if (selector->CanCover(value, left) && selector->CanCover(value, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Dmod.
+ selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ return;
+ }
+ }
+ }
+ selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
+ selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
+ g.UseRegister(value));
+}
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
if ((value->opcode() == IrOpcode::kLoad ||
@@ -1385,9 +1438,7 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
EmitLoad(this, value, opcode, node);
} else {
- RiscvOperandGenerator g(this);
- Emit(kRiscvShl32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.TempImmediate(0));
+ EmitSignExtendWord(this, node);
}
}
@@ -1452,8 +1503,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
// truncated value; arm treats it as nop thus the upper 32-bit as undefined;
// Riscv emits ext instruction which zero-extend the 32-bit value; for riscv,
// we do sign-extension of the truncated value
- Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ EmitSignExtendWord(this, node);
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
@@ -3236,9 +3286,7 @@ void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
}
void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
- RiscvOperandGenerator g(this);
- Emit(kRiscvShl32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.TempImmediate(0));
+ EmitSignExtendWord(this, node);
}
void InstructionSelector::VisitF32x4Pmin(Node* node) {
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 3128a2303e..480131f7c0 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1277,7 +1277,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
- int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ int bytes = __ PushCallerSaved(fp_mode_, ip, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
@@ -1291,7 +1291,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
- int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ int bytes = __ PopCallerSaved(fp_mode_, ip, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
@@ -1427,6 +1427,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1);
OutOfLineRecordWrite* ool;
+ if (FLAG_debug_code) {
+ // Checking that |value| is not a cleared weakref: our write barrier
+ // does not support that for now.
+ __ CmpS64(value, Operand(kClearedWeakHeapObjectLower32));
+ __ Check(ne, AbortReason::kOperandIsCleared);
+ }
+
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
if (addressing_mode == kMode_MRI) {
@@ -3400,7 +3407,7 @@ void CodeGenerator::AssembleConstructFrame() {
} else {
StackFrame::Type type = info()->GetOutputStackFrameType();
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
- // efficient intialization of the constant pool pointer register).
+ // efficient initialization of the constant pool pointer register).
__ StubPrologue(type);
#if V8_ENABLE_WEBASSEMBLY
if (call_descriptor->IsWasmFunctionCall() ||
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 949fc1ad43..5ec6fb2040 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -1159,22 +1159,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ Assert(equal, AbortReason::kWrongFunctionCodeStart);
}
-// Check if the code object is marked for deoptimization. If it is, then it
-// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
-// to:
-// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
-// 2. test kMarkedForDeoptimizationBit in those flags; and
-// 3. if it is not zero then it jumps to the builtin.
-void CodeGenerator::BailoutIfDeoptimized() {
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(rbx,
- Operand(kJavaScriptCallCodeStartRegister, offset));
- __ testl(FieldOperand(rbx, CodeDataContainer::kKindSpecificFlagsOffset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
- RelocInfo::CODE_TARGET, not_zero);
-}
+void CodeGenerator::BailoutIfDeoptimized() { __ BailoutIfDeoptimized(rbx); }
bool ShouldClearOutputRegisterBeforeInstruction(CodeGenerator* g,
Instruction* instr) {
@@ -1495,6 +1480,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(index);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
+
+ if (FLAG_debug_code) {
+ // Checking that |value| is not a cleared weakref: our write barrier
+ // does not support that for now.
+ __ Cmp(value, kClearedWeakHeapObjectLower32);
+ __ Check(not_equal, AbortReason::kOperandIsCleared);
+ }
+
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
@@ -2018,17 +2011,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEFloat32ToInt64:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
- } else {
- __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ case kSSEFloat32ToInt64: {
+ Register output_reg = i.OutputRegister(0);
+ if (instr->OutputCount() == 1) {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Cvttss2siq(output_reg, i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(output_reg, i.InputOperand(0));
+ }
+ break;
}
- if (instr->OutputCount() > 1) {
- __ Move(i.OutputRegister(1), 1);
+ DCHECK_EQ(2, instr->OutputCount());
+ Register success_reg = i.OutputRegister(1);
+ if (CpuFeatures::IsSupported(SSE4_1) || CpuFeatures::IsSupported(AVX)) {
+ DoubleRegister rounded = kScratchDoubleReg;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Roundss(rounded, i.InputDoubleRegister(0), kRoundToZero);
+ __ Cvttss2siq(output_reg, i.InputDoubleRegister(0));
+ } else {
+ __ Roundss(rounded, i.InputOperand(0), kRoundToZero);
+ // Convert {rounded} instead of the input operand, to avoid another
+ // load.
+ __ Cvttss2siq(output_reg, rounded);
+ }
+ DoubleRegister converted_back = i.TempSimd128Register(0);
+ __ Cvtqsi2ss(converted_back, output_reg);
+ // Compare the converted back value to the rounded value, set
+ // success_reg to 0 if they differ, or 1 on success.
+ __ Cmpeqss(converted_back, rounded);
+ __ Movq(success_reg, converted_back);
+ __ And(success_reg, Immediate(1));
+ } else {
+ // Less efficient code for non-AVX and non-SSE4_1 CPUs.
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ __ Move(success_reg, 1);
Label done;
Label fail;
- __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
+ __ Move(kScratchDoubleReg, float{INT64_MIN});
if (instr->InputAt(0)->IsFPRegister()) {
__ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
@@ -2038,26 +2061,57 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(parity_even, &fail, Label::kNear);
// If the input is INT64_MIN, then the conversion succeeds.
__ j(equal, &done, Label::kNear);
- __ cmpq(i.OutputRegister(0), Immediate(1));
+ __ cmpq(output_reg, Immediate(1));
// If the conversion results in INT64_MIN, but the input was not
// INT64_MIN, then the conversion fails.
__ j(no_overflow, &done, Label::kNear);
__ bind(&fail);
- __ Move(i.OutputRegister(1), 0);
+ __ Move(success_reg, 0);
__ bind(&done);
}
break;
- case kSSEFloat64ToInt64:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
- } else {
- __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
+ }
+ case kSSEFloat64ToInt64: {
+ Register output_reg = i.OutputRegister(0);
+ if (instr->OutputCount() == 1) {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Cvttsd2siq(output_reg, i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(output_reg, i.InputOperand(0));
+ }
+ break;
}
- if (instr->OutputCount() > 1) {
- __ Move(i.OutputRegister(1), 1);
+ DCHECK_EQ(2, instr->OutputCount());
+ Register success_reg = i.OutputRegister(1);
+ if (CpuFeatures::IsSupported(SSE4_1) || CpuFeatures::IsSupported(AVX)) {
+ DoubleRegister rounded = kScratchDoubleReg;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Roundsd(rounded, i.InputDoubleRegister(0), kRoundToZero);
+ __ Cvttsd2siq(output_reg, i.InputDoubleRegister(0));
+ } else {
+ __ Roundsd(rounded, i.InputOperand(0), kRoundToZero);
+ // Convert {rounded} instead of the input operand, to avoid another
+ // load.
+ __ Cvttsd2siq(output_reg, rounded);
+ }
+ DoubleRegister converted_back = i.TempSimd128Register(0);
+ __ Cvtqsi2sd(converted_back, output_reg);
+ // Compare the converted back value to the rounded value, set
+ // success_reg to 0 if they differ, or 1 on success.
+ __ Cmpeqsd(converted_back, rounded);
+ __ Movq(success_reg, converted_back);
+ __ And(success_reg, Immediate(1));
+ } else {
+ // Less efficient code for non-AVX and non-SSE4_1 CPUs.
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
+ }
+ __ Move(success_reg, 1);
Label done;
Label fail;
- __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
+ __ Move(kScratchDoubleReg, double{INT64_MIN});
if (instr->InputAt(0)->IsFPRegister()) {
__ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
@@ -2067,15 +2121,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(parity_even, &fail, Label::kNear);
// If the input is INT64_MIN, then the conversion succeeds.
__ j(equal, &done, Label::kNear);
- __ cmpq(i.OutputRegister(0), Immediate(1));
+ __ cmpq(output_reg, Immediate(1));
// If the conversion results in INT64_MIN, but the input was not
// INT64_MIN, then the conversion fails.
__ j(no_overflow, &done, Label::kNear);
__ bind(&fail);
- __ Move(i.OutputRegister(1), 0);
+ __ Move(success_reg, 0);
__ bind(&done);
}
break;
+ }
case kSSEFloat32ToUint64: {
Label fail;
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index a30b50183c..d40f659e4b 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -784,7 +784,21 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
void InstructionSelector::VisitWord64And(Node* node) {
- VisitBinop(this, node, kX64And);
+ X64OperandGenerator g(this);
+ Uint64BinopMatcher m(node);
+ if (m.right().Is(0xFF)) {
+ Emit(kX64Movzxbq, g.DefineAsRegister(node), g.Use(m.left().node()));
+ } else if (m.right().Is(0xFFFF)) {
+ Emit(kX64Movzxwq, g.DefineAsRegister(node), g.Use(m.left().node()));
+ } else if (m.right().Is(0xFFFFFFFF)) {
+ Emit(kX64Movl, g.DefineAsRegister(node), g.Use(m.left().node()));
+ } else if (m.right().IsInRange(std::numeric_limits<uint32_t>::min(),
+ std::numeric_limits<uint32_t>::max())) {
+ Emit(kX64And32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseImmediate(static_cast<int32_t>(m.right().ResolvedValue())));
+ } else {
+ VisitBinop(this, node, kX64And);
+ }
}
void InstructionSelector::VisitWord32Or(Node* node) {
@@ -1431,30 +1445,36 @@ void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
+ InstructionOperand temps[1];
size_t output_count = 0;
+ size_t temp_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
+ temps[temp_count++] = g.TempSimd128Register();
}
- Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
+ Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs, temp_count, temps);
}
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
+ InstructionOperand temps[1];
size_t output_count = 0;
+ size_t temp_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
+ temps[temp_count++] = g.TempSimd128Register();
}
- Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
+ Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs, temp_count, temps);
}
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
diff --git a/deps/v8/src/compiler/branch-condition-duplicator.cc b/deps/v8/src/compiler/branch-condition-duplicator.cc
new file mode 100644
index 0000000000..06fec18144
--- /dev/null
+++ b/deps/v8/src/compiler/branch-condition-duplicator.cc
@@ -0,0 +1,109 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/branch-condition-duplicator.h"
+
+#include "src/compiler/backend/instruction-codes.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+bool IsBranch(Node* node) { return node->opcode() == IrOpcode::kBranch; }
+
+bool CanDuplicate(Node* node) {
+ // We only allow duplication of comparisons and "cheap" binary operations
+ // (cheap = not multiplication or division). The idea is that those
+ // instructions set the ZF flag, and thus do not require a "== 0" to be added
+ // before the branch. Duplicating other nodes, on the other hand, makes little
+ // sense, because a "== 0" would need to be inserted in branches anyways.
+ switch (node->opcode()) {
+#define BRANCH_CASE(op) \
+ case IrOpcode::k##op: \
+ break;
+ MACHINE_COMPARE_BINOP_LIST(BRANCH_CASE)
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt32Sub:
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord32Or:
+ case IrOpcode::kInt64Add:
+ case IrOpcode::kInt64Sub:
+ case IrOpcode::kWord64And:
+ case IrOpcode::kWord64Or:
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord32Shr:
+ case IrOpcode::kWord64Shl:
+ case IrOpcode::kWord64Shr:
+ break;
+ default:
+ return false;
+ }
+
+ // We do not duplicate nodes if all their inputs are used a single time,
+ // because this would keep those inputs alive, thus increasing register
+ // pressure.
+ int all_inputs_have_only_a_single_use = true;
+ for (Node* input : node->inputs()) {
+ if (input->UseCount() > 1) {
+ all_inputs_have_only_a_single_use = false;
+ }
+ }
+ if (all_inputs_have_only_a_single_use) {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+Node* BranchConditionDuplicator::DuplicateNode(Node* node) {
+ return graph_->CloneNode(node);
+}
+
+void BranchConditionDuplicator::DuplicateConditionIfNeeded(Node* node) {
+ if (!IsBranch(node)) return;
+
+ Node* condNode = node->InputAt(0);
+ if (condNode->UseCount() > 1 && CanDuplicate(condNode)) {
+ node->ReplaceInput(0, DuplicateNode(condNode));
+ }
+}
+
+void BranchConditionDuplicator::Enqueue(Node* node) {
+ if (seen_.Get(node)) return;
+ seen_.Set(node, true);
+ to_visit_.push(node);
+}
+
+void BranchConditionDuplicator::VisitNode(Node* node) {
+ DuplicateConditionIfNeeded(node);
+
+ for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+ Enqueue(NodeProperties::GetControlInput(node, i));
+ }
+}
+
+void BranchConditionDuplicator::ProcessGraph() {
+ Enqueue(graph_->end());
+ while (!to_visit_.empty()) {
+ Node* node = to_visit_.front();
+ to_visit_.pop();
+ VisitNode(node);
+ }
+}
+
+BranchConditionDuplicator::BranchConditionDuplicator(Zone* zone, Graph* graph)
+ : graph_(graph), to_visit_(zone), seen_(graph, 2) {}
+
+void BranchConditionDuplicator::Reduce() { ProcessGraph(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/branch-condition-duplicator.h b/deps/v8/src/compiler/branch-condition-duplicator.h
new file mode 100644
index 0000000000..76e97bc291
--- /dev/null
+++ b/deps/v8/src/compiler/branch-condition-duplicator.h
@@ -0,0 +1,85 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BRANCH_CONDITION_DUPLICATOR_H_
+#define V8_COMPILER_BRANCH_CONDITION_DUPLICATOR_H_
+
+#include "src/base/macros.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declare.
+class Graph;
+
+// BranchConditionDuplicator makes sure that the condition nodes of branches are
+// used only once. When it finds a branch node whose condition has multiples
+// uses, this condition is duplicated.
+//
+// Doing this enables the InstructionSelector to generate more efficient code
+// for branches. For instance, consider this code:
+//
+// if (a + b == 0) { /* some code */ }
+// if (a + b == 0) { /* more code */ }
+//
+// Then the generated code will be something like (using registers "ra" for "a"
+// and "rb" for "b", and "rt" a temporary register):
+//
+// add ra, rb ; a + b
+// cmp ra, 0 ; (a + b) == 0
+// sete rt ; rt = (a + b) == 0
+// cmp rt, 0 ; rt == 0
+// jz
+// ...
+// cmp rt, 0 ; rt == 0
+// jz
+//
+// As you can see, TurboFan materialized the == bit into a temporary register.
+// However, since the "add" instruction sets the ZF flag (on x64), it can be
+// used to determine wether the jump should be taken or not. The code we'd like
+// to generate instead if thus:
+//
+// add ra, rb
+// jnz
+// ...
+// add ra, rb
+// jnz
+//
+// However, this requires to generate twice the instruction "add ra, rb". Due to
+// how virtual registers are assigned in TurboFan (there is a map from node ID
+// to virtual registers), both "add" instructions will use the same virtual
+// register as output, which will break SSA.
+//
+// In order to overcome this issue, BranchConditionDuplicator duplicates branch
+// conditions that are used more than once, so that they can be generated right
+// before each branch without worrying about breaking SSA.
+
+class V8_EXPORT_PRIVATE BranchConditionDuplicator final {
+ public:
+ BranchConditionDuplicator(Zone* zone, Graph* graph);
+ ~BranchConditionDuplicator() = default;
+
+ void Reduce();
+
+ Node* DuplicateNode(Node* node);
+ void DuplicateConditionIfNeeded(Node* node);
+ void Enqueue(Node* node);
+ void VisitNode(Node* node);
+ void ProcessGraph();
+
+ private:
+ Graph* const graph_;
+ ZoneQueue<Node*> to_visit_;
+ NodeMarker<bool> seen_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BRANCH_CONDITION_DUPLICATOR_H_
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index e331d4960e..6ad55bde60 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -333,9 +333,8 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(
- common()->Deoptimize(p.kind(), p.reason(), p.feedback()), frame_state,
- effect, control);
+ control = graph()->NewNode(common()->Deoptimize(p.reason(), p.feedback()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 1a46dade26..0b5ee57767 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -212,9 +212,9 @@ void UpdateInLiveness(BytecodeLivenessState* in_liveness,
if (BytecodeOperands::WritesAccumulator(implicit_register_use)) {
in_liveness->MarkAccumulatorDead();
}
- ITERATE_PACK(
- UpdateInLivenessForOutOperand<bytecode, operand_types, operand_index>(
- in_liveness, iterator));
+ (UpdateInLivenessForOutOperand<bytecode, operand_types, operand_index>(
+ in_liveness, iterator),
+ ...);
if (Bytecodes::WritesImplicitRegister(bytecode)) {
in_liveness->MarkRegisterDead(Register::FromShortStar(bytecode).index());
@@ -223,9 +223,9 @@ void UpdateInLiveness(BytecodeLivenessState* in_liveness,
if (BytecodeOperands::ReadsAccumulator(implicit_register_use)) {
in_liveness->MarkAccumulatorLive();
}
- ITERATE_PACK(
- UpdateInLivenessForInOperand<bytecode, operand_types, operand_index>(
- in_liveness, iterator));
+ (UpdateInLivenessForInOperand<bytecode, operand_types, operand_index>(
+ in_liveness, iterator),
+ ...);
}
template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use,
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 7fe1b626b1..a86f16886a 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -3977,19 +3977,24 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->FalseConstant());
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
- NewIfFalse();
+ NewIfTrue();
environment()->BindAccumulator(jsgraph()->FalseConstant());
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
- NewIfTrue();
- environment()->BindAccumulator(jsgraph()->TrueConstant());
+ NewIfFalse();
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TrueConstant());
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -3997,7 +4002,6 @@ void BytecodeGraphBuilder::BuildJumpIfTrue() {
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
NewIfFalse();
- environment()->BindAccumulator(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index b97f0a342f..26d0cb0994 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -9,13 +9,13 @@
#include <map>
#include <memory>
#include <sstream>
+#include <type_traits>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "include/cppgc/source-location.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
-#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/code-factory.h"
@@ -1086,6 +1086,20 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Word32T> Word32Shr(TNode<Word32T> value, int shift);
TNode<Word32T> Word32Sar(TNode<Word32T> value, int shift);
+ // Convenience overloads.
+ TNode<Int32T> Int32Sub(TNode<Int32T> left, int right) {
+ return Int32Sub(left, Int32Constant(right));
+ }
+ TNode<Word32T> Word32And(TNode<Word32T> left, int right) {
+ return Word32And(left, Int32Constant(right));
+ }
+ TNode<Int32T> Word32Shl(TNode<Int32T> left, int right) {
+ return Word32Shl(left, Int32Constant(right));
+ }
+ TNode<BoolT> Word32Equal(TNode<Word32T> left, int right) {
+ return Word32Equal(left, Int32Constant(right));
+ }
+
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
TNode<ResType> name(TNode<ArgType> a);
@@ -1253,9 +1267,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... CArgs>
Node* CallCFunction(Node* function, base::Optional<MachineType> return_type,
CArgs... cargs) {
- static_assert(v8::internal::conjunction<
- std::is_convertible<CArgs, CFunctionArg>...>::value,
- "invalid argument types");
+ static_assert(
+ std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
+ "invalid argument types");
return CallCFunction(function, return_type, {cargs...});
}
@@ -1264,9 +1278,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* CallCFunctionWithoutFunctionDescriptor(Node* function,
MachineType return_type,
CArgs... cargs) {
- static_assert(v8::internal::conjunction<
- std::is_convertible<CArgs, CFunctionArg>...>::value,
- "invalid argument types");
+ static_assert(
+ std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
+ "invalid argument types");
return CallCFunctionWithoutFunctionDescriptor(function, return_type,
{cargs...});
}
@@ -1277,9 +1291,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
MachineType return_type,
SaveFPRegsMode mode,
CArgs... cargs) {
- static_assert(v8::internal::conjunction<
- std::is_convertible<CArgs, CFunctionArg>...>::value,
- "invalid argument types");
+ static_assert(
+ std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
+ "invalid argument types");
return CallCFunctionWithCallerSavedRegisters(function, return_type, mode,
{cargs...});
}
@@ -1570,7 +1584,7 @@ class CodeAssemblerParameterizedLabel
{PhiMachineRepresentationOf<Types>...});
auto it = phi_nodes.begin();
USE(it);
- ITERATE_PACK(AssignPhi(results, *(it++)));
+ (AssignPhi(results, *(it++)), ...);
}
template <class T>
static void AssignPhi(TNode<T>* result, Node* phi) {
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 8882ee8038..51e6ace862 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -13,45 +13,26 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
- Node* unwrapped = SkipValueIdentities(cond);
- switch (unwrapped->opcode()) {
- case IrOpcode::kInt32Constant: {
- Int32Matcher m(unwrapped);
- return m.ResolvedValue() ? Decision::kTrue : Decision::kFalse;
- }
- case IrOpcode::kHeapConstant: {
- HeapObjectMatcher m(unwrapped);
- base::Optional<bool> maybe_result = m.Ref(broker).TryGetBooleanValue();
- if (!maybe_result.has_value()) return Decision::kUnknown;
- return *maybe_result ? Decision::kTrue : Decision::kFalse;
- }
- default:
- return Decision::kUnknown;
- }
-}
-
-} // namespace
-
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
JSHeapBroker* broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
- Zone* temp_zone)
+ Zone* temp_zone,
+ BranchSemantics branch_semantics)
: AdvancedReducer(editor),
graph_(graph),
broker_(broker),
common_(common),
machine_(machine),
dead_(graph->NewNode(common->Dead())),
- zone_(temp_zone) {
+ zone_(temp_zone),
+ branch_semantics_(branch_semantics) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -86,6 +67,27 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return NoChange();
}
+Decision CommonOperatorReducer::DecideCondition(Node* const cond) {
+ Node* unwrapped = SkipValueIdentities(cond);
+ switch (unwrapped->opcode()) {
+ case IrOpcode::kInt32Constant: {
+ DCHECK_EQ(branch_semantics_, BranchSemantics::kMachine);
+ Int32Matcher m(unwrapped);
+ return m.ResolvedValue() ? Decision::kTrue : Decision::kFalse;
+ }
+ case IrOpcode::kHeapConstant: {
+ if (branch_semantics_ == BranchSemantics::kMachine) {
+ return Decision::kTrue;
+ }
+ HeapObjectMatcher m(unwrapped);
+ base::Optional<bool> maybe_result = m.Ref(broker_).TryGetBooleanValue();
+ if (!maybe_result.has_value()) return Decision::kUnknown;
+ return *maybe_result ? Decision::kTrue : Decision::kFalse;
+ }
+ default:
+ return Decision::kUnknown;
+ }
+}
Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
@@ -97,8 +99,8 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
// not (i.e. true being returned in the false case and vice versa).
if (cond->opcode() == IrOpcode::kBooleanNot ||
(cond->opcode() == IrOpcode::kSelect &&
- DecideCondition(broker(), cond->InputAt(1)) == Decision::kFalse &&
- DecideCondition(broker(), cond->InputAt(2)) == Decision::kTrue)) {
+ DecideCondition(cond->InputAt(1)) == Decision::kFalse &&
+ DecideCondition(cond->InputAt(2)) == Decision::kTrue)) {
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -120,7 +122,7 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
return Changed(node);
}
- Decision const decision = DecideCondition(broker(), cond);
+ Decision const decision = DecideCondition(cond);
if (decision == Decision::kUnknown) return NoChange();
Node* const control = node->InputAt(1);
for (Node* const use : node->uses()) {
@@ -154,20 +156,18 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(
- node,
- condition_is_true
- ? common()->DeoptimizeIf(p.kind(), p.reason(), p.feedback())
- : common()->DeoptimizeUnless(p.kind(), p.reason(), p.feedback()));
+ node, condition_is_true
+ ? common()->DeoptimizeIf(p.reason(), p.feedback())
+ : common()->DeoptimizeUnless(p.reason(), p.feedback()));
return Changed(node);
}
- Decision const decision = DecideCondition(broker(), condition);
+ Decision const decision = DecideCondition(condition);
if (decision == Decision::kUnknown) return NoChange();
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(
- common()->Deoptimize(p.kind(), p.reason(), p.feedback()), frame_state,
- effect, control);
+ control = graph()->NewNode(common()->Deoptimize(p.reason(), p.feedback()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
@@ -392,7 +392,7 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
Node* const vtrue = node->InputAt(1);
Node* const vfalse = node->InputAt(2);
if (vtrue == vfalse) return Replace(vtrue);
- switch (DecideCondition(broker(), cond)) {
+ switch (DecideCondition(cond)) {
case Decision::kTrue:
return Replace(vtrue);
case Decision::kFalse:
@@ -469,7 +469,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
Reduction CommonOperatorReducer::ReduceStaticAssert(Node* node) {
DCHECK_EQ(IrOpcode::kStaticAssert, node->opcode());
Node* const cond = node->InputAt(0);
- Decision decision = DecideCondition(broker(), cond);
+ Decision decision = DecideCondition(cond);
if (decision == Decision::kTrue) {
RelaxEffectsAndControls(node);
return Changed(node);
@@ -483,7 +483,7 @@ Reduction CommonOperatorReducer::ReduceTrapConditional(Node* trap) {
trap->opcode() == IrOpcode::kTrapUnless);
bool trapping_condition = trap->opcode() == IrOpcode::kTrapIf;
Node* const cond = trap->InputAt(0);
- Decision decision = DecideCondition(broker(), cond);
+ Decision decision = DecideCondition(cond);
if (decision == Decision::kUnknown) {
return NoChange();
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index da07703b25..af004333a4 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/common/globals.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
@@ -26,7 +27,8 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
public:
CommonOperatorReducer(Editor* editor, Graph* graph, JSHeapBroker* broker,
CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine, Zone* temp_zone);
+ MachineOperatorBuilder* machine, Zone* temp_zone,
+ BranchSemantics branch_semantics);
~CommonOperatorReducer() final = default;
const char* reducer_name() const override { return "CommonOperatorReducer"; }
@@ -48,6 +50,9 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
+ // Helper to determine if conditions are true or false.
+ Decision DecideCondition(Node* const cond);
+
Graph* graph() const { return graph_; }
JSHeapBroker* broker() const { return broker_; }
CommonOperatorBuilder* common() const { return common_; }
@@ -60,6 +65,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
MachineOperatorBuilder* const machine_;
Node* const dead_;
Zone* zone_;
+ BranchSemantics branch_semantics_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 587eb578ec..5800021708 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -67,8 +67,7 @@ int ValueInputCountOfReturn(Operator const* const op) {
}
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
- return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
- lhs.feedback() == rhs.feedback();
+ return lhs.reason() == rhs.reason() && lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -77,11 +76,11 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
size_t hash_value(DeoptimizeParameters p) {
FeedbackSource::Hash feebdack_hash;
- return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()));
+ return base::hash_combine(p.reason(), feebdack_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ", " << p.reason() << ", " << p.feedback();
+ return os << p.reason() << ", " << p.feedback();
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -408,6 +407,39 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
return OpParameter<IfValueParameters>(op);
}
+V8_EXPORT_PRIVATE bool operator==(const SLVerifierHintParameters& p1,
+ const SLVerifierHintParameters& p2) {
+ return p1.semantics() == p2.semantics() &&
+ p1.override_output_type() == p2.override_output_type();
+}
+
+size_t hash_value(const SLVerifierHintParameters& p) {
+ return base::hash_combine(
+ p.semantics(),
+ p.override_output_type() ? hash_value(*p.override_output_type()) : 0);
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& out,
+ const SLVerifierHintParameters& p) {
+ if (p.semantics()) {
+ p.semantics()->PrintTo(out);
+ } else {
+ out << "nullptr";
+ }
+ out << ", ";
+ if (const auto& t = p.override_output_type()) {
+ t->PrintTo(out);
+ } else {
+ out << ", nullopt";
+ }
+ return out;
+}
+
+const SLVerifierHintParameters& SLVerifierHintParametersOf(const Operator* op) {
+ DCHECK_EQ(op->opcode(), IrOpcode::kSLVerifierHint);
+ return OpParameter<SLVerifierHintParameters>(op);
+}
+
#define COMMON_CACHED_OP_LIST(V) \
V(Plug, Operator::kNoProperties, 0, 0, 0, 1, 0, 0) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
@@ -477,28 +509,28 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(7) \
V(8)
-#define CACHED_DEOPTIMIZE_LIST(V) \
- V(Eager, MinusZero) \
- V(Eager, WrongMap) \
- V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
- V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
+#define CACHED_DEOPTIMIZE_LIST(V) \
+ V(MinusZero) \
+ V(WrongMap) \
+ V(InsufficientTypeFeedbackForGenericKeyedAccess) \
+ V(InsufficientTypeFeedbackForGenericNamedAccess)
#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(Eager, DivisionByZero) \
- V(Eager, Hole) \
- V(Eager, MinusZero) \
- V(Eager, Overflow) \
- V(Eager, Smi)
+ V(DivisionByZero) \
+ V(Hole) \
+ V(MinusZero) \
+ V(Overflow) \
+ V(Smi)
#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(Eager, LostPrecision) \
- V(Eager, LostPrecisionOrNaN) \
- V(Eager, NotAHeapNumber) \
- V(Eager, NotANumberOrOddball) \
- V(Eager, NotASmi) \
- V(Eager, OutOfBounds) \
- V(Eager, WrongInstanceType) \
- V(Eager, WrongMap)
+ V(LostPrecision) \
+ V(LostPrecisionOrNaN) \
+ V(NotAHeapNumber) \
+ V(NotANumberOrOddball) \
+ V(NotASmi) \
+ V(OutOfBounds) \
+ V(WrongInstanceType) \
+ V(WrongMap)
#define CACHED_TRAP_IF_LIST(V) \
V(TrapDivUnrepresentable) \
@@ -680,7 +712,7 @@ struct CommonOperatorGlobalCache final {
CACHED_LOOP_EXIT_VALUE_LIST(CACHED_LOOP_EXIT_VALUE)
#undef CACHED_LOOP_EXIT_VALUE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ template <DeoptimizeReason kReason>
struct DeoptimizeOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -688,15 +720,14 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
+ DeoptimizeParameters(kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE(Kind, Reason) \
- DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
- kDeoptimize##Kind##Reason##Operator;
+#define CACHED_DEOPTIMIZE(Reason) \
+ DeoptimizeOperator<DeoptimizeReason::k##Reason> kDeoptimize##Reason##Operator;
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ template <DeoptimizeReason kReason>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -704,15 +735,15 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
+ DeoptimizeParameters(kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
- kDeoptimizeIf##Kind##Reason##Operator;
+#define CACHED_DEOPTIMIZE_IF(Reason) \
+ DeoptimizeIfOperator<DeoptimizeReason::k##Reason> \
+ kDeoptimizeIf##Reason##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ template <DeoptimizeReason kReason>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@@ -721,12 +752,11 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
+ DeoptimizeParameters(kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
- DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
- DeoptimizeReason::k##Reason> \
- kDeoptimizeUnless##Kind##Reason##Operator;
+#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
+ DeoptimizeUnlessOperator<DeoptimizeReason::k##Reason> \
+ kDeoptimizeUnless##Reason##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -892,6 +922,14 @@ const Operator* CommonOperatorBuilder::StaticAssert(const char* source) {
1, 0, source);
}
+const Operator* CommonOperatorBuilder::SLVerifierHint(
+ const Operator* semantics,
+ const base::Optional<Type>& override_output_type) {
+ return zone()->New<Operator1<SLVerifierHintParameters>>(
+ IrOpcode::kSLVerifierHint, Operator::kNoProperties, "SLVerifierHint", 1,
+ 0, 0, 1, 0, 0, SLVerifierHintParameters(semantics, override_output_type));
+}
+
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
#define CACHED_BRANCH(Hint) \
if (hint == BranchHint::k##Hint) { \
@@ -903,17 +941,15 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
}
const Operator* CommonOperatorBuilder::Deoptimize(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback) {
-#define CACHED_DEOPTIMIZE(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
- return &cache_.kDeoptimize##Kind##Reason##Operator; \
+ DeoptimizeReason reason, FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE(Reason) \
+ if (reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimize##Reason##Operator; \
}
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -923,17 +959,15 @@ const Operator* CommonOperatorBuilder::Deoptimize(
}
const Operator* CommonOperatorBuilder::DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
+ DeoptimizeReason reason, FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Reason) \
+ if (reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -943,17 +977,15 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
}
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+ DeoptimizeReason reason, FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
+ if (reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 58e04f9cf6..d56bd6bba9 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -32,6 +32,13 @@ class Operator;
class Type;
class Node;
+// The semantics of IrOpcode::kBranch changes throughout the pipeline, and in
+// particular is not the same before SimplifiedLowering (JS semantics) and after
+// (machine branch semantics). Some passes are applied both before and after
+// SimplifiedLowering, and use the BranchSemantics enum to know how branches
+// should be treated.
+enum class BranchSemantics { kJS, kMachine };
+
// Prediction hint for branches.
enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
@@ -73,16 +80,13 @@ int ValueInputCountOfReturn(Operator const* const op);
// Parameters for the {Deoptimize} operator.
class DeoptimizeParameters final {
public:
- DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback)
- : kind_(kind), reason_(reason), feedback_(feedback) {}
+ DeoptimizeParameters(DeoptimizeReason reason, FeedbackSource const& feedback)
+ : reason_(reason), feedback_(feedback) {}
- DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const FeedbackSource& feedback() const { return feedback_; }
private:
- DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
FeedbackSource const feedback_;
};
@@ -420,6 +424,33 @@ const StringConstantBase* StringConstantBaseOf(const Operator* op)
const char* StaticAssertSourceOf(const Operator* op);
+class SLVerifierHintParameters final {
+ public:
+ explicit SLVerifierHintParameters(const Operator* semantics,
+ base::Optional<Type> override_output_type)
+ : semantics_(semantics), override_output_type_(override_output_type) {}
+
+ const Operator* semantics() const { return semantics_; }
+ const base::Optional<Type>& override_output_type() const {
+ return override_output_type_;
+ }
+
+ private:
+ const Operator* semantics_;
+ base::Optional<Type> override_output_type_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(const SLVerifierHintParameters& p1,
+ const SLVerifierHintParameters& p2);
+
+size_t hash_value(const SLVerifierHintParameters& p);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& out,
+ const SLVerifierHintParameters& p);
+
+V8_EXPORT_PRIVATE const SLVerifierHintParameters& SLVerifierHintParametersOf(
+ const Operator* op) V8_WARN_UNUSED_RESULT;
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
@@ -438,6 +469,12 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
const Operator* StaticAssert(const char* source);
+ // SLVerifierHint is used only during SimplifiedLowering. It may be introduced
+ // during lowering to provide additional hints for the verifier. These nodes
+ // are removed at the end of SimplifiedLowering after verification.
+ const Operator* SLVerifierHint(
+ const Operator* semantics,
+ const base::Optional<Type>& override_output_type);
const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
@@ -449,11 +486,11 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
BranchHint hint = BranchHint::kNone);
const Operator* IfDefault(BranchHint hint = BranchHint::kNone);
const Operator* Throw();
- const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
+ const Operator* Deoptimize(DeoptimizeReason reason,
FeedbackSource const& feedback);
- const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ const Operator* DeoptimizeIf(DeoptimizeReason reason,
FeedbackSource const& feedback);
- const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ const Operator* DeoptimizeUnless(DeoptimizeReason reason,
FeedbackSource const& feedback);
const Operator* TrapIf(TrapId trap_id);
const Operator* TrapUnless(TrapId trap_id);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 6bf38dd2bb..191f50e9e1 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -162,6 +162,7 @@ class EffectControlLinearizer {
Node* LowerStringConcat(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharCodeAt(Node* node);
+ Node* StringCharCodeAt(Node* receiver, Node* position);
Node* LowerStringCodePointAt(Node* node);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
@@ -3828,10 +3829,8 @@ Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
__ NoContextConstant());
}
-Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
- Node* receiver = node->InputAt(0);
- Node* position = node->InputAt(1);
-
+Node* EffectControlLinearizer::StringCharCodeAt(Node* receiver,
+ Node* position) {
// We need a loop here to properly deal with indirect strings
// (SlicedString, ConsString and ThinString).
auto loop = __ MakeLoopLabel(MachineRepresentation::kTagged,
@@ -3977,19 +3976,41 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
return loop_done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+ return StringCharCodeAt(receiver, position);
+}
+
Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtin::kStringCodePointAt);
- Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), flags, properties);
- return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
- position, __ NoContextConstant());
+ auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
+ Node* first_code_unit = StringCharCodeAt(receiver, position);
+
+ __ GotoIfNot(
+ __ Word32Equal(__ Word32And(first_code_unit, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xD800)),
+ &return_result, BranchHint::kFalse, first_code_unit);
+
+ auto length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
+ auto next_index = __ IntAdd(position, __ IntPtrConstant(1));
+ __ GotoIfNot(__ IntLessThan(next_index, length), &return_result,
+ first_code_unit);
+ Node* second_code_unit = StringCharCodeAt(receiver, next_index);
+ __ GotoIfNot(
+ __ Word32Equal(__ Word32And(second_code_unit, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xDC00)),
+ &return_result, first_code_unit);
+
+ auto surrogate_offset = __ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+ auto result = __ Int32Add(__ Word32Shl(first_code_unit, __ Int32Constant(10)),
+ __ Int32Add(second_code_unit, surrogate_offset));
+ __ Goto(&return_result, result);
+
+ __ Bind(&return_result);
+ return return_result.PhiAt(0);
}
Node* EffectControlLinearizer::LoadFromSeqString(Node* receiver, Node* position,
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 24da29a232..5ad54a0487 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -377,6 +377,12 @@ TNode<FixedArrayBase> JSGraphAssembler::MaybeGrowFastElements(
new_length, old_length, effect(), control()));
}
+Node* JSGraphAssembler::StringCharCodeAt(TNode<String> string,
+ TNode<Number> position) {
+ return AddNode(graph()->NewNode(simplified()->StringCharCodeAt(), string,
+ position, effect(), control()));
+}
+
Node* GraphAssembler::TypeGuard(Type type, Node* value) {
return AddNode(
graph()->NewNode(common()->TypeGuard(type), value, effect(), control()));
@@ -506,33 +512,15 @@ Node* GraphAssembler::BitcastMaybeObjectToWord(Node* value) {
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* condition, Node* frame_state) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
- condition, frame_state, effect(), control()));
-}
-
-Node* GraphAssembler::DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- Node* condition, Node* frame_state) {
- return AddNode(
- graph()->NewNode(common()->DeoptimizeIf(kind, reason, feedback),
- condition, frame_state, effect(), control()));
-}
-
-Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
- DeoptimizeReason reason,
- FeedbackSource const& feedback,
- Node* condition, Node* frame_state) {
- return AddNode(
- graph()->NewNode(common()->DeoptimizeUnless(kind, reason, feedback),
- condition, frame_state, effect(), control()));
+ return AddNode(graph()->NewNode(common()->DeoptimizeIf(reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* condition, Node* frame_state) {
- return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
- frame_state);
+ return AddNode(graph()->NewNode(common()->DeoptimizeUnless(reason, feedback),
+ condition, frame_state, effect(), control()));
}
TNode<Object> GraphAssembler::Call(const CallDescriptor* call_descriptor,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 3715226bd0..1d569c07f5 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -325,12 +325,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback,
Node* condition, Node* frame_state);
- Node* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition,
- Node* frame_state);
- Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition,
- Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
Node* condition, Node* frame_state);
TNode<Object> Call(const CallDescriptor* call_descriptor, int inputs_size,
@@ -878,6 +872,7 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
TNode<FixedArrayBase> elements,
TNode<Number> new_length,
TNode<Number> old_length);
+ Node* StringCharCodeAt(TNode<String> string, TNode<Number> position);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 62f6417166..8d442e6855 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -268,153 +268,139 @@ static const char* SafeMnemonic(Node* node) {
return node == nullptr ? "null" : node->op()->mnemonic();
}
-class JSONGraphNodeWriter {
- public:
- JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph,
- const SourcePositionTable* positions,
- const NodeOriginTable* origins)
- : os_(os),
- all_(zone, graph, false),
- live_(zone, graph, true),
- positions_(positions),
- origins_(origins),
- first_node_(true) {}
- JSONGraphNodeWriter(const JSONGraphNodeWriter&) = delete;
- JSONGraphNodeWriter& operator=(const JSONGraphNodeWriter&) = delete;
-
- void Print() {
- for (Node* const node : all_.reachable) PrintNode(node);
- os_ << "\n";
- }
+JSONGraphWriter::JSONGraphWriter(std::ostream& os, const Graph* graph,
+ const SourcePositionTable* positions,
+ const NodeOriginTable* origins)
+ : os_(os),
+ zone_(nullptr),
+ graph_(graph),
+ positions_(positions),
+ origins_(origins),
+ first_node_(true),
+ first_edge_(true) {}
+
+void JSONGraphWriter::PrintPhase(const char* phase_name) {
+ os_ << "{\"name\":\"" << phase_name << "\",\"type\":\"graph\",\"data\":";
+ Print();
+ os_ << "},\n";
+}
- void PrintNode(Node* node) {
- if (first_node_) {
- first_node_ = false;
- } else {
- os_ << ",\n";
- }
- std::ostringstream label, title, properties;
- node->op()->PrintTo(label, Operator::PrintVerbosity::kSilent);
- node->op()->PrintTo(title, Operator::PrintVerbosity::kVerbose);
- node->op()->PrintPropsTo(properties);
- os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << JSONEscaped(label)
- << "\""
- << ",\"title\":\"" << JSONEscaped(title) << "\""
- << ",\"live\": " << (live_.IsLive(node) ? "true" : "false")
- << ",\"properties\":\"" << JSONEscaped(properties) << "\"";
- IrOpcode::Value opcode = node->opcode();
- if (IrOpcode::IsPhiOpcode(opcode)) {
- os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
- << "]";
- os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
- << "]";
- } else if (opcode == IrOpcode::kIfTrue || opcode == IrOpcode::kIfFalse ||
- opcode == IrOpcode::kLoop) {
- os_ << ",\"rankInputs\":[" << NodeProperties::FirstControlIndex(node)
- << "]";
- }
- if (opcode == IrOpcode::kBranch) {
- os_ << ",\"rankInputs\":[0]";
- }
- if (positions_ != nullptr) {
- SourcePosition position = positions_->GetSourcePosition(node);
- if (position.IsKnown()) {
- os_ << ", \"sourcePosition\" : " << AsJSON(position);
- }
- }
- if (origins_) {
- NodeOrigin origin = origins_->GetNodeOrigin(node);
- if (origin.IsKnown()) {
- os_ << ", \"origin\" : " << AsJSON(origin);
- }
+void JSONGraphWriter::Print() {
+ AccountingAllocator allocator;
+ Zone tmp_zone(&allocator, ZONE_NAME);
+ zone_ = &tmp_zone;
+
+ AllNodes all(zone_, graph_, false);
+ AllNodes live(zone_, graph_, true);
+
+ os_ << "{\n\"nodes\":[";
+ for (Node* const node : all.reachable) PrintNode(node, live.IsLive(node));
+ os_ << "\n";
+ os_ << "],\n\"edges\":[";
+ for (Node* const node : all.reachable) PrintEdges(node);
+ os_ << "\n";
+ os_ << "]}";
+ zone_ = nullptr;
+}
+
+void JSONGraphWriter::PrintNode(Node* node, bool is_live) {
+ if (first_node_) {
+ first_node_ = false;
+ } else {
+ os_ << ",\n";
+ }
+ std::ostringstream label, title, properties;
+ node->op()->PrintTo(label, Operator::PrintVerbosity::kSilent);
+ node->op()->PrintTo(title, Operator::PrintVerbosity::kVerbose);
+ node->op()->PrintPropsTo(properties);
+ os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << JSONEscaped(label)
+ << "\""
+ << ",\"title\":\"" << JSONEscaped(title) << "\""
+ << ",\"live\": " << (is_live ? "true" : "false") << ",\"properties\":\""
+ << JSONEscaped(properties) << "\"";
+ IrOpcode::Value opcode = node->opcode();
+ if (IrOpcode::IsPhiOpcode(opcode)) {
+ os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
+ << "]";
+ os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
+ << "]";
+ } else if (opcode == IrOpcode::kIfTrue || opcode == IrOpcode::kIfFalse ||
+ opcode == IrOpcode::kLoop) {
+ os_ << ",\"rankInputs\":[" << NodeProperties::FirstControlIndex(node)
+ << "]";
+ }
+ if (opcode == IrOpcode::kBranch) {
+ os_ << ",\"rankInputs\":[0]";
+ }
+ if (positions_ != nullptr) {
+ SourcePosition position = positions_->GetSourcePosition(node);
+ if (position.IsKnown()) {
+ os_ << ", \"sourcePosition\" : " << AsJSON(position);
}
- os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
- os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
- : "false");
- os_ << ",\"opinfo\":\"" << node->op()->ValueInputCount() << " v "
- << node->op()->EffectInputCount() << " eff "
- << node->op()->ControlInputCount() << " ctrl in, "
- << node->op()->ValueOutputCount() << " v "
- << node->op()->EffectOutputCount() << " eff "
- << node->op()->ControlOutputCount() << " ctrl out\"";
- if (NodeProperties::IsTyped(node)) {
- Type type = NodeProperties::GetType(node);
- std::ostringstream type_out;
- type.PrintTo(type_out);
- os_ << ",\"type\":\"" << JSONEscaped(type_out) << "\"";
+ }
+ if (origins_) {
+ NodeOrigin origin = origins_->GetNodeOrigin(node);
+ if (origin.IsKnown()) {
+ os_ << ", \"origin\" : " << AsJSON(origin);
}
- os_ << "}";
}
-
- private:
- std::ostream& os_;
- AllNodes all_;
- AllNodes live_;
- const SourcePositionTable* positions_;
- const NodeOriginTable* origins_;
- bool first_node_;
-};
-
-
-class JSONGraphEdgeWriter {
- public:
- JSONGraphEdgeWriter(std::ostream& os, Zone* zone, const Graph* graph)
- : os_(os), all_(zone, graph, false), first_edge_(true) {}
- JSONGraphEdgeWriter(const JSONGraphEdgeWriter&) = delete;
- JSONGraphEdgeWriter& operator=(const JSONGraphEdgeWriter&) = delete;
-
- void Print() {
- for (Node* const node : all_.reachable) PrintEdges(node);
- os_ << "\n";
+ os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
+ os_ << ",\"control\":"
+ << (NodeProperties::IsControl(node) ? "true" : "false");
+ os_ << ",\"opinfo\":\"" << node->op()->ValueInputCount() << " v "
+ << node->op()->EffectInputCount() << " eff "
+ << node->op()->ControlInputCount() << " ctrl in, "
+ << node->op()->ValueOutputCount() << " v "
+ << node->op()->EffectOutputCount() << " eff "
+ << node->op()->ControlOutputCount() << " ctrl out\"";
+ if (auto type_opt = GetType(node)) {
+ std::ostringstream type_out;
+ type_opt->PrintTo(type_out);
+ os_ << ",\"type\":\"" << JSONEscaped(type_out) << "\"";
}
+ os_ << "}";
+}
- void PrintEdges(Node* node) {
- for (int i = 0; i < node->InputCount(); i++) {
- Node* input = node->InputAt(i);
- if (input == nullptr) continue;
- PrintEdge(node, i, input);
- }
+void JSONGraphWriter::PrintEdges(Node* node) {
+ for (int i = 0; i < node->InputCount(); i++) {
+ Node* input = node->InputAt(i);
+ if (input == nullptr) continue;
+ PrintEdge(node, i, input);
}
+}
- void PrintEdge(Node* from, int index, Node* to) {
- if (first_edge_) {
- first_edge_ = false;
- } else {
- os_ << ",\n";
- }
- const char* edge_type = nullptr;
- if (index < NodeProperties::FirstValueIndex(from)) {
- edge_type = "unknown";
- } else if (index < NodeProperties::FirstContextIndex(from)) {
- edge_type = "value";
- } else if (index < NodeProperties::FirstFrameStateIndex(from)) {
- edge_type = "context";
- } else if (index < NodeProperties::FirstEffectIndex(from)) {
- edge_type = "frame-state";
- } else if (index < NodeProperties::FirstControlIndex(from)) {
- edge_type = "effect";
- } else {
- edge_type = "control";
- }
- os_ << "{\"source\":" << SafeId(to) << ",\"target\":" << SafeId(from)
- << ",\"index\":" << index << ",\"type\":\"" << edge_type << "\"}";
+void JSONGraphWriter::PrintEdge(Node* from, int index, Node* to) {
+ if (first_edge_) {
+ first_edge_ = false;
+ } else {
+ os_ << ",\n";
+ }
+ const char* edge_type = nullptr;
+ if (index < NodeProperties::FirstValueIndex(from)) {
+ edge_type = "unknown";
+ } else if (index < NodeProperties::FirstContextIndex(from)) {
+ edge_type = "value";
+ } else if (index < NodeProperties::FirstFrameStateIndex(from)) {
+ edge_type = "context";
+ } else if (index < NodeProperties::FirstEffectIndex(from)) {
+ edge_type = "frame-state";
+ } else if (index < NodeProperties::FirstControlIndex(from)) {
+ edge_type = "effect";
+ } else {
+ edge_type = "control";
}
+ os_ << "{\"source\":" << SafeId(to) << ",\"target\":" << SafeId(from)
+ << ",\"index\":" << index << ",\"type\":\"" << edge_type << "\"}";
+}
- private:
- std::ostream& os_;
- AllNodes all_;
- bool first_edge_;
-};
+base::Optional<Type> JSONGraphWriter::GetType(Node* node) {
+ if (!NodeProperties::IsTyped(node)) return base::nullopt;
+ return NodeProperties::GetType(node);
+}
std::ostream& operator<<(std::ostream& os, const GraphAsJSON& ad) {
- AccountingAllocator allocator;
- Zone tmp_zone(&allocator, ZONE_NAME);
- os << "{\n\"nodes\":[";
- JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions, ad.origins)
- .Print();
- os << "],\n\"edges\":[";
- JSONGraphEdgeWriter(os, &tmp_zone, &ad.graph).Print();
- os << "]}";
+ JSONGraphWriter writer(os, &ad.graph, ad.positions, ad.origins);
+ writer.Print();
return os;
}
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 39a2ef5021..847431be7b 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -29,11 +29,13 @@ class Instruction;
class InstructionBlock;
class InstructionOperand;
class InstructionSequence;
+class Node;
class NodeOrigin;
class NodeOriginTable;
class RegisterAllocationData;
class Schedule;
class SourcePositionTable;
+class Type;
struct TurboJsonFile : public std::ofstream {
TurboJsonFile(OptimizedCompilationInfo* info, std::ios_base::openmode mode);
@@ -95,6 +97,34 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
const char* phase,
const char* suffix);
+class JSONGraphWriter {
+ public:
+ JSONGraphWriter(std::ostream& os, const Graph* graph,
+ const SourcePositionTable* positions,
+ const NodeOriginTable* origins);
+
+ JSONGraphWriter(const JSONGraphWriter&) = delete;
+ JSONGraphWriter& operator=(const JSONGraphWriter&) = delete;
+
+ void PrintPhase(const char* phase_name);
+ void Print();
+
+ protected:
+ void PrintNode(Node* node, bool is_live);
+ void PrintEdges(Node* node);
+ void PrintEdge(Node* from, int index, Node* to);
+ virtual base::Optional<Type> GetType(Node* node);
+
+ protected:
+ std::ostream& os_;
+ Zone* zone_;
+ const Graph* graph_;
+ const SourcePositionTable* positions_;
+ const NodeOriginTable* origins_;
+ bool first_node_;
+ bool first_edge_;
+};
+
struct GraphAsJSON {
GraphAsJSON(const Graph& g, SourcePositionTable* p, NodeOriginTable* o)
: graph(g), positions(p), origins(o) {}
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 25682dc9ae..a5ac963921 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -70,7 +70,7 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
// for argument types convertible to Node* during overload resolution.
template <typename... Nodes,
typename = typename std::enable_if_t<
- base::all(std::is_convertible<Nodes, Node*>::value...)>>
+ std::conjunction_v<std::is_convertible<Nodes, Node*>...>>>
Node* NewNode(const Operator* op, Nodes... nodes) {
std::array<Node*, sizeof...(nodes)> nodes_arr{
{static_cast<Node*>(nodes)...}};
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index fe9072eb10..638d83ff1f 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -1300,20 +1300,22 @@ base::Optional<int> StringRef::length() const {
}
}
-base::Optional<uint16_t> StringRef::GetFirstChar() {
+base::Optional<uint16_t> StringRef::GetFirstChar() const { return GetChar(0); }
+
+base::Optional<uint16_t> StringRef::GetChar(int index) const {
if (data_->kind() == kNeverSerializedHeapObject && !SupportedStringKind()) {
TRACE_BROKER_MISSING(
broker(),
- "first char for kNeverSerialized unsupported string kind " << *this);
+ "get char for kNeverSerialized unsupported string kind " << *this);
return base::nullopt;
}
if (!broker()->IsMainThread()) {
- return object()->Get(0, broker()->local_isolate());
+ return object()->Get(index, broker()->local_isolate());
} else {
// TODO(solanes, v8:7790): Remove this case once the inlining phase is
// done concurrently all the time.
- return object()->Get(0);
+ return object()->Get(index);
}
}
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 8f62862f9b..83eb11ae38 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -920,7 +920,8 @@ class StringRef : public NameRef {
// base::nullopt for these methods.
base::Optional<Handle<String>> ObjectIfContentAccessible();
base::Optional<int> length() const;
- base::Optional<uint16_t> GetFirstChar();
+ base::Optional<uint16_t> GetFirstChar() const;
+ base::Optional<uint16_t> GetChar(int index) const;
base::Optional<double> ToNumber();
bool IsSeqString() const;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 7935a68c62..9a10331a14 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -86,6 +86,9 @@ class JSCallReducerAssembler : public JSGraphAssembler {
TNode<Object> ReduceMathUnary(const Operator* op);
TNode<Object> ReduceMathBinary(const Operator* op);
TNode<String> ReduceStringPrototypeSubstring();
+ TNode<Boolean> ReduceStringPrototypeStartsWith();
+ TNode<Boolean> ReduceStringPrototypeStartsWith(
+ const StringRef& search_element_string);
TNode<String> ReduceStringPrototypeSlice();
TNode<Object> TargetInput() const { return JSCallNode{node_ptr()}.target(); }
@@ -265,8 +268,6 @@ class JSCallReducerAssembler : public JSGraphAssembler {
// Common operators.
TNode<Smi> TypeGuardUnsignedSmall(TNode<Object> value);
- TNode<Number> TypeGuardNumber(TNode<Object> value);
- TNode<String> TypeGuardString(TNode<Object> value);
TNode<Object> TypeGuardNonInternal(TNode<Object> value);
TNode<Number> TypeGuardFixedArrayLength(TNode<Object> value);
TNode<Object> Call4(const Callable& callable, TNode<Context> context,
@@ -521,15 +522,12 @@ class JSCallReducerAssembler : public JSGraphAssembler {
};
ForBuilder0 ForZeroUntil(TNode<Number> excluded_limit) {
- return ForStartUntil(ZeroConstant(), excluded_limit);
- }
-
- ForBuilder0 ForStartUntil(TNode<Number> start, TNode<Number> excluded_limit) {
+ TNode<Number> initial_value = ZeroConstant();
auto cond = [=](TNode<Number> i) {
return NumberLessThan(i, excluded_limit);
};
auto step = [=](TNode<Number> i) { return NumberAdd(i, OneConstant()); };
- return {this, start, cond, step};
+ return {this, initial_value, cond, step};
}
ForBuilder0 Forever(TNode<Number> initial_value, const StepFunction1& step) {
@@ -1052,14 +1050,6 @@ TNode<Smi> JSCallReducerAssembler::TypeGuardUnsignedSmall(TNode<Object> value) {
return TNode<Smi>::UncheckedCast(TypeGuard(Type::UnsignedSmall(), value));
}
-TNode<Number> JSCallReducerAssembler::TypeGuardNumber(TNode<Object> value) {
- return TNode<Smi>::UncheckedCast(TypeGuard(Type::Number(), value));
-}
-
-TNode<String> JSCallReducerAssembler::TypeGuardString(TNode<Object> value) {
- return TNode<String>::UncheckedCast(TypeGuard(Type::String(), value));
-}
-
TNode<Object> JSCallReducerAssembler::TypeGuardNonInternal(
TNode<Object> value) {
return TNode<Object>::UncheckedCast(TypeGuard(Type::NonInternal(), value));
@@ -1207,6 +1197,91 @@ TNode<String> JSCallReducerAssembler::ReduceStringPrototypeSubstring() {
return StringSubstring(receiver_string, from, to);
}
+TNode<Boolean> JSCallReducerAssembler::ReduceStringPrototypeStartsWith(
+ const StringRef& search_element_string) {
+ TNode<Object> receiver = ReceiverInput();
+ TNode<Object> start = ArgumentOrZero(1);
+
+ TNode<String> receiver_string = CheckString(receiver);
+ TNode<Smi> start_smi = CheckSmi(start);
+ TNode<Number> length = StringLength(receiver_string);
+
+ TNode<Number> zero = ZeroConstant();
+ TNode<Number> clamped_start = NumberMin(NumberMax(start_smi, zero), length);
+
+ int search_string_length = search_element_string.length().value();
+ DCHECK(search_string_length <= JSCallReducer::kMaxInlineMatchSequence);
+
+ auto out = MakeLabel(MachineRepresentation::kTagged);
+
+ auto search_string_too_long =
+ NumberLessThan(NumberSubtract(length, clamped_start),
+ NumberConstant(search_string_length));
+
+ GotoIf(search_string_too_long, &out, BranchHint::kFalse, FalseConstant());
+
+ STATIC_ASSERT(String::kMaxLength <= kSmiMaxValue);
+
+ for (int i = 0; i < search_string_length; i++) {
+ TNode<Number> k = NumberConstant(i);
+ TNode<Number> receiver_string_position = TNode<Number>::UncheckedCast(
+ TypeGuard(Type::UnsignedSmall(), NumberAdd(k, clamped_start)));
+ Node* receiver_string_char =
+ StringCharCodeAt(receiver_string, receiver_string_position);
+ Node* search_string_char =
+ jsgraph()->Constant(search_element_string.GetChar(i).value());
+ auto is_equal = graph()->NewNode(simplified()->NumberEqual(),
+ search_string_char, receiver_string_char);
+ GotoIfNot(is_equal, &out, FalseConstant());
+ }
+
+ Goto(&out, TrueConstant());
+
+ Bind(&out);
+ return out.PhiAt<Boolean>(0);
+}
+
+TNode<Boolean> JSCallReducerAssembler::ReduceStringPrototypeStartsWith() {
+ TNode<Object> receiver = ReceiverInput();
+ TNode<Object> search_element = ArgumentOrUndefined(0);
+ TNode<Object> start = ArgumentOrZero(1);
+
+ TNode<String> receiver_string = CheckString(receiver);
+ TNode<String> search_string = CheckString(search_element);
+ TNode<Smi> start_smi = CheckSmi(start);
+ TNode<Number> length = StringLength(receiver_string);
+
+ TNode<Number> zero = ZeroConstant();
+ TNode<Number> clamped_start = NumberMin(NumberMax(start_smi, zero), length);
+
+ TNode<Number> search_string_length = StringLength(search_string);
+
+ auto out = MakeLabel(MachineRepresentation::kTagged);
+
+ auto search_string_too_long = NumberLessThan(
+ NumberSubtract(length, clamped_start), search_string_length);
+
+ GotoIf(search_string_too_long, &out, BranchHint::kFalse, FalseConstant());
+
+ STATIC_ASSERT(String::kMaxLength <= kSmiMaxValue);
+
+ ForZeroUntil(search_string_length).Do([&](TNode<Number> k) {
+ TNode<Number> receiver_string_position = TNode<Number>::UncheckedCast(
+ TypeGuard(Type::UnsignedSmall(), NumberAdd(k, clamped_start)));
+ Node* receiver_string_char =
+ StringCharCodeAt(receiver_string, receiver_string_position);
+ Node* search_string_char = StringCharCodeAt(search_string, k);
+ auto is_equal = graph()->NewNode(simplified()->NumberEqual(),
+ receiver_string_char, search_string_char);
+ GotoIfNot(is_equal, &out, FalseConstant());
+ });
+
+ Goto(&out, TrueConstant());
+
+ Bind(&out);
+ return out.PhiAt<Boolean>(0);
+}
+
TNode<String> JSCallReducerAssembler::ReduceStringPrototypeSlice() {
TNode<Object> receiver = ReceiverInput();
TNode<Object> start = Argument(0);
@@ -1996,33 +2071,38 @@ namespace {
Callable GetCallableForArrayIndexOfIncludes(ArrayIndexOfIncludesVariant variant,
ElementsKind elements_kind,
Isolate* isolate) {
- DCHECK(IsHoleyElementsKind(elements_kind));
if (variant == ArrayIndexOfIncludesVariant::kIndexOf) {
switch (elements_kind) {
+ case PACKED_SMI_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
case HOLEY_ELEMENTS:
return Builtins::CallableFor(isolate,
Builtin::kArrayIndexOfSmiOrObject);
- case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ return Builtins::CallableFor(isolate,
+ Builtin::kArrayIndexOfPackedDoubles);
+ default:
+ DCHECK_EQ(HOLEY_DOUBLE_ELEMENTS, elements_kind);
return Builtins::CallableFor(isolate,
Builtin::kArrayIndexOfHoleyDoubles);
- default: {
- UNREACHABLE();
- }
}
} else {
DCHECK_EQ(variant, ArrayIndexOfIncludesVariant::kIncludes);
switch (elements_kind) {
+ case PACKED_SMI_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
case HOLEY_ELEMENTS:
return Builtins::CallableFor(isolate,
Builtin::kArrayIncludesSmiOrObject);
- case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ return Builtins::CallableFor(isolate,
+ Builtin::kArrayIncludesPackedDoubles);
+ default:
+ DCHECK_EQ(HOLEY_DOUBLE_ELEMENTS, elements_kind);
return Builtins::CallableFor(isolate,
Builtin::kArrayIncludesHoleyDoubles);
- default: {
- UNREACHABLE();
- }
}
}
UNREACHABLE();
@@ -2038,7 +2118,13 @@ IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeIndexOfIncludes(
TNode<Object> search_element = ArgumentOrUndefined(0);
TNode<Object> from_index = ArgumentOrZero(1);
- TNode<Number> original_length = LoadJSArrayLength(receiver, kind);
+ // TODO(jgruber): This currently only reduces to a stub call. Create a full
+ // reduction (similar to other higher-order array builtins) instead of
+ // lowering to a builtin call. E.g. Array.p.every and Array.p.some have almost
+ // identical functionality.
+
+ TNode<Number> length = LoadJSArrayLength(receiver, kind);
+ TNode<FixedArrayBase> elements = LoadElements(receiver);
const bool have_from_index = ArgumentCount() > 1;
if (have_from_index) {
@@ -2048,279 +2134,18 @@ IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeIndexOfIncludes(
// therefore needs to be added to the length. If the result is still
// negative, it needs to be clamped to 0.
TNode<Boolean> cond = NumberLessThan(from_index_smi, ZeroConstant());
- from_index =
- SelectIf<Number>(cond)
- .Then(_ {
- return NumberMax(NumberAdd(original_length, from_index_smi),
- ZeroConstant());
- })
- .Else(_ { return from_index_smi; })
- .ExpectFalse()
- .Value();
- }
-
- if (IsHoleyElementsKind(kind)) {
- TNode<FixedArrayBase> elements = LoadElements(receiver);
- return Call4(GetCallableForArrayIndexOfIncludes(variant, kind, isolate()),
- context, elements, search_element, original_length,
- from_index);
+ from_index = SelectIf<Number>(cond)
+ .Then(_ {
+ return NumberMax(NumberAdd(length, from_index_smi),
+ ZeroConstant());
+ })
+ .Else(_ { return from_index_smi; })
+ .ExpectFalse()
+ .Value();
}
- auto out = MakeLabel(MachineRepresentation::kTagged);
-
- DCHECK(IsFastPackedElementsKind(kind));
-
- Node* fail_value;
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- fail_value = FalseConstant();
- } else {
- fail_value = NumberConstant(-1);
- }
- TNode<FixedArrayBase> elements = LoadElements(receiver);
-
- switch (kind) {
- case PACKED_SMI_ELEMENTS: {
- TNode<Boolean> is_finite_number = AddNode<Boolean>(graph()->NewNode(
- simplified()->ObjectIsFiniteNumber(), search_element));
- GotoIfNot(is_finite_number, &out, fail_value);
-
- TNode<Number> search_element_number = TypeGuardNumber(search_element);
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- // if from_index is not smi, it will early bailout, so here
- // we could LoadElement directly.
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
-
- auto cond = NumberEqual(search_element_number,
- TNode<Number>::UncheckedCast(element));
-
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- GotoIf(cond, &out, TrueConstant());
- } else {
- GotoIf(cond, &out, k);
- }
- });
- Goto(&out, fail_value);
- break;
- }
- case PACKED_DOUBLE_ELEMENTS: {
- auto nan_loop = MakeLabel();
- TNode<Boolean> is_number = AddNode<Boolean>(
- graph()->NewNode(simplified()->ObjectIsNumber(), search_element));
- GotoIfNot(is_number, &out, fail_value);
-
- TNode<Number> search_element_number = TypeGuardNumber(search_element);
-
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- // https://tc39.es/ecma262/#sec-array.prototype.includes use
- // SameValueZero, NaN == NaN, so we need to check.
- TNode<Boolean> is_nan = AddNode<Boolean>(graph()->NewNode(
- simplified()->NumberIsNaN(), search_element_number));
- GotoIf(is_nan, &nan_loop);
- } else {
- DCHECK(variant == ArrayIndexOfIncludesVariant::kIndexOf);
- // https://tc39.es/ecma262/#sec-array.prototype.indexOf use
- // IsStrictEqual, NaN != NaN, NaN compare will be handled by
- // NumberEqual.
- }
-
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
-
- auto cond = NumberEqual(search_element_number,
- TNode<Number>::UncheckedCast(element));
-
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- GotoIf(cond, &out, TrueConstant());
- } else {
- GotoIf(cond, &out, k);
- }
- });
- Goto(&out, fail_value);
-
- // https://tc39.es/ecma262/#sec-array.prototype.includes use
- // SameValueZero, NaN == NaN, we need to bind nan_loop to check.
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- Bind(&nan_loop);
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
-
- auto cond = AddNode<Boolean>(
- graph()->NewNode(simplified()->NumberIsNaN(),
- TNode<Number>::UncheckedCast(element)));
- GotoIf(cond, &out, TrueConstant());
- });
- Goto(&out, fail_value);
- }
- break;
- }
- case PACKED_ELEMENTS: {
- auto number_loop = MakeLabel();
- auto not_number = MakeLabel();
- auto string_loop = MakeLabel();
- auto bigint_loop = MakeLabel();
- auto ident_loop = MakeLabel();
-
- auto is_number = AddNode(
- graph()->NewNode(simplified()->ObjectIsNumber(), search_element));
- GotoIf(is_number, &number_loop);
- Goto(&not_number);
-
- Bind(&not_number);
- auto is_string = AddNode(
- graph()->NewNode(simplified()->ObjectIsString(), search_element));
- GotoIf(is_string, &string_loop);
- auto is_bigint = AddNode(
- graph()->NewNode(simplified()->ObjectIsBigInt(), search_element));
- GotoIf(is_bigint, &bigint_loop);
-
- Goto(&ident_loop);
- Bind(&ident_loop);
-
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- // if from_index is not smi, it will early bailout, so here
- // we could LoadElement directly.
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
- auto cond = AddNode(graph()->NewNode(simplified()->ReferenceEqual(),
- search_element, element));
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- GotoIf(cond, &out, TrueConstant());
- } else {
- GotoIf(cond, &out, k);
- }
- });
-
- Goto(&out, fail_value);
-
- Bind(&number_loop);
- TNode<Number> search_element_number = TypeGuardNumber(search_element);
-
- auto nan_loop = MakeLabel();
-
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- // https://tc39.es/ecma262/#sec-array.prototype.includes use
- // SameValueZero, NaN == NaN, so we need to check.
- auto is_nan = AddNode(graph()->NewNode(simplified()->NumberIsNaN(),
- search_element_number));
- GotoIf(is_nan, &nan_loop);
- } else {
- DCHECK(variant == ArrayIndexOfIncludesVariant::kIndexOf);
- // https://tc39.es/ecma262/#sec-array.prototype.indexOf use
- // IsStrictEqual, NaN != NaN, NaN compare will be handled by
- // NumberEqual.
- }
-
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- auto continue_label = MakeLabel();
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
-
- auto is_number = AddNode(
- graph()->NewNode(simplified()->ObjectIsNumber(), element));
-
- GotoIfNot(is_number, &continue_label);
-
- TNode<Number> element_number = TypeGuardNumber(element);
- auto cond = NumberEqual(search_element_number, element_number);
- GotoIfNot(cond, &continue_label);
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- Goto(&out, TrueConstant());
- } else {
- Goto(&out, k);
- }
-
- Bind(&continue_label);
- });
- Goto(&out, fail_value);
-
- // https://tc39.es/ecma262/#sec-array.prototype.includes use
- // SameValueZero, NaN == NaN, we need to bind nan_loop to check.
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- Bind(&nan_loop);
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
-
- auto cond = AddNode<Boolean>(
- graph()->NewNode(simplified()->ObjectIsNaN(), element));
- GotoIf(cond, &out, TrueConstant());
- });
- Goto(&out, fail_value);
- }
-
- Bind(&string_loop);
- TNode<String> search_element_string = TypeGuardString(search_element);
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- auto continue_label = MakeLabel();
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
- auto is_string = AddNode(
- graph()->NewNode(simplified()->ObjectIsString(), element));
-
- GotoIfNot(is_string, &continue_label);
-
- TNode<String> element_string = TypeGuardString(element);
- auto cond = AddNode(graph()->NewNode(simplified()->StringEqual(),
- element_string,
- search_element_string));
- GotoIfNot(cond, &continue_label);
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- Goto(&out, TrueConstant());
- } else {
- Goto(&out, k);
- }
-
- Bind(&continue_label);
- });
- Goto(&out, fail_value);
-
- Bind(&bigint_loop);
- ForStartUntil(TNode<Number>::UncheckedCast(from_index), original_length)
- .Do([&](TNode<Number> k) {
- auto continue_label = MakeLabel();
- TNode<Object> element = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind), elements, k);
- auto is_bigint = AddNode(
- graph()->NewNode(simplified()->ObjectIsBigInt(), element));
-
- GotoIfNot(is_bigint, &continue_label);
- auto cond = AddNode<Object>(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kBigIntEqualToBigInt, 2),
- search_element, element, context, FrameStateInput(), effect(),
- control()));
-
- GotoIfNot(ToBoolean(cond), &continue_label);
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- Goto(&out, TrueConstant());
- } else {
- Goto(&out, k);
- }
-
- Bind(&continue_label);
- });
- Goto(&out, fail_value);
- break;
- }
- default: {
- UNREACHABLE();
- }
- }
- Bind(&out);
- if (variant == ArrayIndexOfIncludesVariant::kIncludes) {
- return out.PhiAt<Boolean>(0);
- } else {
- return out.PhiAt<Number>(0);
- }
+ return Call4(GetCallableForArrayIndexOfIncludes(variant, kind, isolate()),
+ context, elements, search_element, length, from_index);
}
namespace {
@@ -4966,7 +4791,11 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtin::kReturnReceiver:
return ReduceReturnReceiver(node);
case Builtin::kStringPrototypeIndexOf:
- return ReduceStringPrototypeIndexOf(node);
+ return ReduceStringPrototypeIndexOfIncludes(
+ node, StringIndexOfIncludesVariant::kIndexOf);
+ case Builtin::kStringPrototypeIncludes:
+ return ReduceStringPrototypeIndexOfIncludes(
+ node, StringIndexOfIncludesVariant::kIncludes);
case Builtin::kStringPrototypeCharAt:
return ReduceStringPrototypeCharAt(node);
case Builtin::kStringPrototypeCharCodeAt:
@@ -5423,7 +5252,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
// ES #sec-string.prototype.indexof
-Reduction JSCallReducer::ReduceStringPrototypeIndexOf(Node* node) {
+// ES #sec-string.prototype.includes
+Reduction JSCallReducer::ReduceStringPrototypeIndexOfIncludes(
+ Node* node, StringIndexOfIncludesVariant variant) {
JSCallNode n(node);
CallParameters const& p = n.Parameters();
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -5464,7 +5295,17 @@ Reduction JSCallReducer::ReduceStringPrototypeIndexOf(Node* node) {
node->ReplaceInput(2, new_position);
node->TrimInputCount(3);
NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
- return Changed(node);
+
+ if (variant == StringIndexOfIncludesVariant::kIndexOf) {
+ return Changed(node);
+ } else {
+ DCHECK(variant == StringIndexOfIncludesVariant::kIncludes);
+ Node* result =
+ graph()->NewNode(simplified()->BooleanNot(),
+ graph()->NewNode(simplified()->NumberEqual(), node,
+ jsgraph()->SmiConstant(-1)));
+ return Replace(result);
+ }
}
return NoChange();
}
@@ -5638,9 +5479,9 @@ Reduction JSCallReducer::ReduceForInsufficientFeedback(
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state =
NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kSoft, reason, FeedbackSource()),
- frame_state, effect, control);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(reason, FeedbackSource()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -6083,6 +5924,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+
Node* index = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2),
jsgraph()->OneConstant(),
@@ -6101,12 +5943,25 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
ElementAccess const access =
AccessBuilder::ForFixedArrayElement(kind);
+
+ // When disable FLAG_turbo_loop_variable, typer cannot infer index
+ // is in [1, kMaxCopyElements-1], and will break in representing
+ // kRepFloat64 (Range(1, inf)) to kRepWord64 when converting
+ // input for kLoadElement. So we need to add type guard here.
+ // And we need to use index when using NumberLessThan to check
+ // terminate and updating index, otherwise which will break inducing
+ // variables in LoopVariableOptimizer.
+ STATIC_ASSERT(JSArray::kMaxCopyElements < kSmiMaxValue);
+ Node* index_retyped = effect2 =
+ graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()),
+ index, effect2, control2);
+
Node* value2 = effect2 =
graph()->NewNode(simplified()->LoadElement(access), elements,
- index, effect2, control2);
+ index_retyped, effect2, control2);
effect2 = graph()->NewNode(
simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(), index,
+ graph()->NewNode(simplified()->NumberSubtract(), index_retyped,
jsgraph()->OneConstant()),
value2, effect2, control2);
@@ -6661,74 +6516,37 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
return NoChange();
}
- Node* receiver = n.receiver();
- Effect effect = n.effect();
- Control control = n.control();
-
- Node* search_string = n.ArgumentOr(0, jsgraph()->UndefinedConstant());
- Node* position = n.ArgumentOr(1, jsgraph()->ZeroConstant());
-
- HeapObjectMatcher m(search_string);
- if (m.HasResolvedValue()) {
- ObjectRef target_ref = m.Ref(broker());
- if (target_ref.IsString()) {
- StringRef str = target_ref.AsString();
- if (str.length().has_value()) {
- receiver = effect = graph()->NewNode(
- simplified()->CheckString(p.feedback()), receiver, effect, control);
-
- position = effect = graph()->NewNode(
- simplified()->CheckSmi(p.feedback()), position, effect, control);
-
- if (str.length().value() == 0) {
- Node* value = jsgraph()->TrueConstant();
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- if (str.length().value() == 1) {
- Node* string_length =
- graph()->NewNode(simplified()->StringLength(), receiver);
- Node* unsigned_position = graph()->NewNode(
- simplified()->NumberMax(), position, jsgraph()->ZeroConstant());
-
- Node* check = graph()->NewNode(simplified()->NumberLessThan(),
- unsigned_position, string_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
- check, control);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse = jsgraph()->FalseConstant();
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- Node* string_first = etrue =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- unsigned_position, etrue, if_true);
-
- Node* search_first =
- jsgraph()->Constant(str.GetFirstChar().value());
- vtrue = graph()->NewNode(simplified()->NumberEqual(), string_first,
- search_first);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
+ TNode<Object> search_element = n.ArgumentOrUndefined(0, jsgraph());
+
+ // Here are three conditions:
+ // First, If search_element is definitely not a string, we make no change.
+ // Second, If search_element is definitely a string and its length is less
+ // or equal than max inline matching sequence threshold, we could inline
+ // the entire matching sequence.
+ // Third, we try to inline, and have a runtime deopt if search_element is
+ // not a string.
+ HeapObjectMatcher search_element_matcher(search_element);
+ if (search_element_matcher.HasResolvedValue()) {
+ ObjectRef target_ref = search_element_matcher.Ref(broker());
+ if (!target_ref.IsString()) return NoChange();
+ StringRef search_element_string = target_ref.AsString();
+ if (search_element_string.length().has_value()) {
+ int length = search_element_string.length().value();
+ // If search_element's length is less or equal than
+ // kMaxInlineMatchSequence, we inline the entire
+ // matching sequence.
+ if (length <= kMaxInlineMatchSequence) {
+ JSCallReducerAssembler a(this, node);
+ Node* subgraph =
+ a.ReduceStringPrototypeStartsWith(search_element_string);
+ return ReplaceWithSubgraph(&a, subgraph);
}
}
}
- return NoChange();
+ JSCallReducerAssembler a(this, node);
+ Node* subgraph = a.ReduceStringPrototypeStartsWith();
+ return ReplaceWithSubgraph(&a, subgraph);
}
// ES section 21.1.3.1 String.prototype.charAt ( pos )
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 4905d57021..5cbf62144c 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -55,6 +55,10 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
temp_zone_(temp_zone),
flags_(flags) {}
+ // Max string length for inlining entire match sequence for
+ // String.prototype.startsWith in JSCallReducer.
+ static constexpr int kMaxInlineMatchSequence = 3;
+
const char* reducer_name() const override { return "JSCallReducer"; }
Reduction Reduce(Node* node) final;
@@ -141,7 +145,10 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSCallWithSpread(Node* node);
Reduction ReduceRegExpPrototypeTest(Node* node);
Reduction ReduceReturnReceiver(Node* node);
- Reduction ReduceStringPrototypeIndexOf(Node* node);
+
+ enum class StringIndexOfIncludesVariant { kIncludes, kIndexOf };
+ Reduction ReduceStringPrototypeIndexOfIncludes(
+ Node* node, StringIndexOfIncludesVariant variant);
Reduction ReduceStringPrototypeSubstring(Node* node);
Reduction ReduceStringPrototypeSlice(Node* node);
Reduction ReduceStringPrototypeSubstr(Node* node);
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 00bb53d2d8..267890a11c 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -429,7 +429,7 @@ NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
ZoneVector<MapRef> const& maps,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) {
- DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) ||
+ DCHECK(IsLoadICKind(slot_kind) || IsSetNamedICKind(slot_kind) ||
IsDefineNamedOwnICKind(slot_kind) || IsKeyedLoadICKind(slot_kind) ||
IsKeyedHasICKind(slot_kind) || IsKeyedStoreICKind(slot_kind) ||
IsStoreInArrayLiteralICKind(slot_kind) ||
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index c4a22ae195..0231248efe 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -126,8 +126,7 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kEager,
- DeoptimizeReason::kDeoptimizeNow, FeedbackSource()),
+ common()->Deoptimize(DeoptimizeReason::kDeoptimizeNow, FeedbackSource()),
frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 5c116edd68..3ff17052ee 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -1958,7 +1958,7 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
broker()->GetFeedbackForPropertyAccess(source, access_mode, static_name);
switch (feedback.kind()) {
case ProcessedFeedback::kInsufficient:
- return ReduceSoftDeoptimize(
+ return ReduceEagerDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
case ProcessedFeedback::kNamedAccess:
@@ -1974,7 +1974,7 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
}
}
-Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
+Reduction JSNativeContextSpecialization::ReduceEagerDeoptimize(
Node* node, DeoptimizeReason reason) {
if (!(flags() & kBailoutOnUninitialized)) return NoChange();
@@ -1982,9 +1982,9 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state =
NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kSoft, reason, FeedbackSource()),
- frame_state, effect, control);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(reason, FeedbackSource()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -2142,9 +2142,10 @@ Reduction JSNativeContextSpecialization::ReduceJSDefineKeyedOwnProperty(
}
Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
- Node* receiver, ConvertReceiverMode receiver_mode, Node* context,
- Node* frame_state, Node** effect, Node** control,
- ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
+ Node* receiver, ConvertReceiverMode receiver_mode,
+ Node* lookup_start_object, Node* context, Node* frame_state, Node** effect,
+ Node** control, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info) {
ObjectRef constant = access_info.constant().value();
if (access_info.IsDictionaryProtoAccessorConstant()) {
@@ -2166,6 +2167,11 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
receiver_mode),
target, receiver, feedback, context, frame_state, *effect, *control);
} else {
+ // Disable optimizations for super ICs using API getters, so that we get
+ // the correct receiver checks.
+ if (receiver != lookup_start_object) {
+ return nullptr;
+ }
Node* holder = access_info.holder().has_value()
? jsgraph()->Constant(access_info.holder().value())
: receiver;
@@ -2289,9 +2295,9 @@ JSNativeContextSpecialization::BuildPropertyLoad(
receiver == lookup_start_object
? ConvertReceiverMode::kNotNullOrUndefined
: ConvertReceiverMode::kAny;
- value =
- InlinePropertyGetterCall(receiver, receiver_mode, context, frame_state,
- &effect, &control, if_exceptions, access_info);
+ value = InlinePropertyGetterCall(
+ receiver, receiver_mode, lookup_start_object, context, frame_state,
+ &effect, &control, if_exceptions, access_info);
} else if (access_info.IsModuleExport()) {
Node* cell = jsgraph()->Constant(access_info.constant().value().AsCell());
value = effect =
@@ -2314,8 +2320,10 @@ JSNativeContextSpecialization::BuildPropertyLoad(
name, access_info, lookup_start_object, &effect, &control);
}
}
-
- return ValueEffectControl(value, effect, control);
+ if (value != nullptr) {
+ return ValueEffectControl(value, effect, control);
+ }
+ return base::Optional<ValueEffectControl>();
}
JSNativeContextSpecialization::ValueEffectControl
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 9b9096c3c3..c0301ec83f 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -118,7 +118,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value,
KeyedAccessMode const& keyed_mode);
- Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+ Reduction ReduceEagerDeoptimize(Node* node, DeoptimizeReason reason);
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSLoadPropertyWithEnumeratedKey(Node* node);
@@ -170,8 +170,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// Helpers for accessor inlining.
Node* InlinePropertyGetterCall(Node* receiver,
ConvertReceiverMode receiver_mode,
- Node* context, Node* frame_state,
- Node** effect, Node** control,
+ Node* lookup_start_object, Node* context,
+ Node* frame_state, Node** effect,
+ Node** control,
ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info);
void InlinePropertySetterCall(Node* receiver, Node* value, Node* context,
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 7b63b0279d..199a960c58 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -277,7 +277,7 @@ CompareOperationHint JSTypeHintLowering::GetCompareOperationHint(
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
const Operator* op, Node* operand, Node* effect, Node* control,
FeedbackSlot slot) const {
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForUnaryOperation)) {
return LoweringResult::Exit(node);
@@ -349,7 +349,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
FeedbackSlot slot) const {
switch (op->opcode()) {
case IrOpcode::kJSStrictEqual: {
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
return LoweringResult::Exit(node);
@@ -363,7 +363,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual: {
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
return LoweringResult::Exit(node);
@@ -375,7 +375,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
break;
}
case IrOpcode::kJSInstanceOf: {
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
return LoweringResult::Exit(node);
@@ -396,7 +396,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
case IrOpcode::kJSExponentiate: {
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) {
return LoweringResult::Exit(node);
@@ -424,7 +424,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceForInNextOperation(
Node* receiver, Node* cache_array, Node* cache_type, Node* index,
Node* effect, Node* control, FeedbackSlot slot) const {
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
return LoweringResult::Exit(node);
@@ -436,7 +436,7 @@ JSTypeHintLowering::LoweringResult
JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
Node* control,
FeedbackSlot slot) const {
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
return LoweringResult::Exit(node);
@@ -463,7 +463,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceCallOperation(
Node* control, FeedbackSlot slot) const {
DCHECK(op->opcode() == IrOpcode::kJSCall ||
op->opcode() == IrOpcode::kJSCallWithSpread);
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
return LoweringResult::Exit(node);
@@ -476,7 +476,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
Node* control, FeedbackSlot slot) const {
DCHECK(op->opcode() == IrOpcode::kJSConstruct ||
op->opcode() == IrOpcode::kJSConstructWithSpread);
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) {
return LoweringResult::Exit(node);
@@ -491,14 +491,12 @@ JSTypeHintLowering::ReduceGetIteratorOperation(const Operator* op,
FeedbackSlot load_slot,
FeedbackSlot call_slot) const {
DCHECK_EQ(IrOpcode::kJSGetIterator, op->opcode());
- // Insert soft deopt if the load feedback is invalid.
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
load_slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
return LoweringResult::Exit(node);
}
- // Insert soft deopt if the call feedback is invalid.
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
call_slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
return LoweringResult::Exit(node);
@@ -510,7 +508,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
const Operator* op, Node* effect, Node* control, FeedbackSlot slot) const {
DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
op->opcode() == IrOpcode::kJSLoadNamedFromSuper);
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
return LoweringResult::Exit(node);
@@ -522,7 +520,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadKeyedOperation(
const Operator* op, Node* obj, Node* key, Node* effect, Node* control,
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
return LoweringResult::Exit(node);
@@ -537,7 +535,7 @@ JSTypeHintLowering::ReduceStoreNamedOperation(const Operator* op, Node* obj,
FeedbackSlot slot) const {
DCHECK(op->opcode() == IrOpcode::kJSSetNamedProperty ||
op->opcode() == IrOpcode::kJSDefineNamedOwnProperty);
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
return LoweringResult::Exit(node);
@@ -554,7 +552,7 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
op->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
op->opcode() == IrOpcode::kJSDefineKeyedOwnPropertyInLiteral ||
op->opcode() == IrOpcode::kJSDefineKeyedOwnProperty);
- if (Node* node = TryBuildSoftDeopt(
+ if (Node* node = BuildDeoptIfFeedbackIsInsufficient(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
return LoweringResult::Exit(node);
@@ -562,17 +560,16 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
return LoweringResult::NoChange();
}
-Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackSlot slot, Node* effect,
- Node* control,
- DeoptimizeReason reason) const {
+Node* JSTypeHintLowering::BuildDeoptIfFeedbackIsInsufficient(
+ FeedbackSlot slot, Node* effect, Node* control,
+ DeoptimizeReason reason) const {
if (!(flags() & kBailoutOnUninitialized)) return nullptr;
FeedbackSource source(feedback_vector(), slot);
if (!broker()->FeedbackIsInsufficient(source)) return nullptr;
Node* deoptimize = jsgraph()->graph()->NewNode(
- jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason,
- FeedbackSource()),
+ jsgraph()->common()->Deoptimize(reason, FeedbackSource()),
jsgraph()->Dead(), effect, control);
Node* frame_state =
NodeProperties::FindFrameStateBefore(deoptimize, jsgraph()->Dead());
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index c89acd12ff..258acc6657 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -165,8 +165,9 @@ class JSTypeHintLowering {
BinaryOperationHint GetBinaryOperationHint(FeedbackSlot slot) const;
CompareOperationHint GetCompareOperationHint(FeedbackSlot slot) const;
- Node* TryBuildSoftDeopt(FeedbackSlot slot, Node* effect, Node* control,
- DeoptimizeReason reson) const;
+ Node* BuildDeoptIfFeedbackIsInsufficient(FeedbackSlot slot, Node* effect,
+ Node* control,
+ DeoptimizeReason reson) const;
JSHeapBroker* broker() const { return broker_; }
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index f5bf0cbb44..07bfbea3f1 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -470,6 +470,7 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
num_returns++;
}
}
+ USE(num_fp_returns);
// Add parameters in registers and on the stack.
for (int i = 0; i < js_parameter_count; i++) {
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 5f3afc5a84..676e16c3a2 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -3,15 +3,19 @@
// found in the LICENSE file.
#include "src/compiler/machine-operator-reducer.h"
+
#include <cmath>
#include <limits>
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
+#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
+#include "src/codegen/tnode.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
#include "src/compiler/machine-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -2187,6 +2191,123 @@ Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
return NoChange();
}
+namespace {
+
+// Returns true if |node| is a constant whose value is 0.
+bool IsZero(Node* node) {
+ switch (node->opcode()) {
+#define CASE_IS_ZERO(opcode, matcher) \
+ case IrOpcode::opcode: { \
+ matcher m(node); \
+ return m.Is(0); \
+ }
+ CASE_IS_ZERO(kInt32Constant, Int32Matcher)
+ CASE_IS_ZERO(kInt64Constant, Int64Matcher)
+#undef CASE_IS_ZERO
+ default:
+ break;
+ }
+ return false;
+}
+
+// If |node| is of the form "x == 0", then return "x" (in order to remove the
+// "== 0" part).
+base::Optional<Node*> TryGetInvertedCondition(Node* cond) {
+ if (cond->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(cond);
+ if (IsZero(m.right().node())) {
+ return m.left().node();
+ }
+ }
+ return base::nullopt;
+}
+
+struct SimplifiedCondition {
+ Node* condition;
+ bool is_inverted;
+};
+
+// Tries to simplifies |cond| by removing all top-level "== 0". Everytime such a
+// construction is removed, the meaning of the comparison is inverted. This is
+// recorded by the variable |is_inverted| throughout this function, and returned
+// at the end. If |is_inverted| is true at the end, the caller should invert the
+// if/else branches following the comparison.
+base::Optional<SimplifiedCondition> TrySimplifyCompareZero(Node* cond) {
+ bool is_inverted = false;
+ bool changed = false;
+ base::Optional<Node*> new_cond;
+ while ((new_cond = TryGetInvertedCondition(cond)).has_value()) {
+ cond = *new_cond;
+ is_inverted = !is_inverted;
+ changed = true;
+ }
+ if (changed) {
+ return SimplifiedCondition{cond, is_inverted};
+ } else {
+ return {};
+ }
+}
+
+} // namespace
+
+void MachineOperatorReducer::SwapBranches(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kBranch);
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ NodeProperties::ChangeOp(use, common()->IfFalse());
+ break;
+ case IrOpcode::kIfFalse:
+ NodeProperties::ChangeOp(use, common()->IfTrue());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ NodeProperties::ChangeOp(
+ node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
+}
+
+// If |node| is a branch, removes all top-level 32-bit "== 0" from |node|.
+Reduction MachineOperatorReducer::SimplifyBranch(Node* node) {
+ Node* cond = node->InputAt(0);
+ if (auto simplified = TrySimplifyCompareZero(cond)) {
+ node->ReplaceInput(0, simplified->condition);
+ if (simplified->is_inverted) {
+ switch (node->opcode()) {
+ case IrOpcode::kBranch:
+ SwapBranches(node);
+ break;
+ case IrOpcode::kTrapIf:
+ NodeProperties::ChangeOp(node,
+ common()->TrapUnless(TrapIdOf(node->op())));
+ break;
+ case IrOpcode::kTrapUnless:
+ NodeProperties::ChangeOp(node,
+ common()->TrapIf(TrapIdOf(node->op())));
+ break;
+ case IrOpcode::kDeoptimizeIf: {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
+ NodeProperties::ChangeOp(
+ node, common()->DeoptimizeUnless(p.reason(), p.feedback()));
+ break;
+ }
+ case IrOpcode::kDeoptimizeUnless: {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
+ NodeProperties::ChangeOp(
+ node, common()->DeoptimizeIf(p.reason(), p.feedback()));
+ break;
+ }
+ default:
+
+ UNREACHABLE();
+ }
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kBranch ||
node->opcode() == IrOpcode::kDeoptimizeIf ||
@@ -2197,17 +2318,18 @@ Reduction MachineOperatorReducer::ReduceConditional(Node* node) {
// Reductions involving control flow happen elsewhere. Non-zero inputs are
// considered true in all conditional ops.
NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
+ Reduction reduction = NoChange();
if (condition.IsTruncateInt64ToInt32()) {
if (auto replacement =
ReduceConditionalN<Word64Adapter>(condition.node())) {
NodeProperties::ReplaceValueInput(node, *replacement, 0);
- return Changed(node);
+ reduction = Changed(node);
}
} else if (auto replacement = ReduceConditionalN<Word32Adapter>(node)) {
NodeProperties::ReplaceValueInput(node, *replacement, 0);
- return Changed(node);
+ reduction = Changed(node);
}
- return NoChange();
+ return reduction.FollowedBy(SimplifyBranch(node));
}
template <typename WordNAdapter>
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 9f12f81837..ec8bea5412 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -131,6 +131,12 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
template <typename WordNAdapter>
Reduction ReduceWordNXor(Node* node);
+ // Tries to simplify "if(x == 0)" by removing the "== 0" and inverting
+ // branches.
+ Reduction SimplifyBranch(Node* node);
+ // Helper for SimplifyBranch; swaps the if/else of a branch.
+ void SwapBranches(Node* node);
+
// Helper for ReduceConditional. Does not perform the actual reduction; just
// returns a new Node that could be used as the input to the condition.
template <typename WordNAdapter>
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 27fb3b247f..4c3197cf93 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -83,6 +83,7 @@
V(DeadValue) \
V(Dead) \
V(Plug) \
+ V(SLVerifierHint) \
V(StaticAssert)
// Opcodes for JavaScript operators.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 0f200f2cfe..cd546fe9c8 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -28,10 +28,12 @@
#include "src/compiler/backend/register-allocator-verifier.h"
#include "src/compiler/backend/register-allocator.h"
#include "src/compiler/basic-block-instrumentor.h"
+#include "src/compiler/branch-condition-duplicator.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/checkpoint-elimination.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/constant-folding-reducer.h"
@@ -1066,7 +1068,7 @@ PipelineStatistics* CreatePipelineStatistics(
} // namespace
-class PipelineCompilationJob final : public OptimizedCompilationJob {
+class PipelineCompilationJob final : public TurbofanCompilationJob {
public:
PipelineCompilationJob(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info,
@@ -1104,7 +1106,8 @@ PipelineCompilationJob::PipelineCompilationJob(
// Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
- : OptimizedCompilationJob(&compilation_info_, "TurboFan"),
+ : TurbofanCompilationJob(&compilation_info_,
+ CompilationJob::State::kReadyToPrepare),
zone_(isolate->allocator(), kPipelineCompilationJobZoneName),
zone_stats_(isolate->allocator()),
compilation_info_(&zone_, isolate, shared_info, function, code_kind,
@@ -1336,9 +1339,9 @@ struct InliningPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kJS);
JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
if (data->info()->bailout_on_uninitialized()) {
call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
@@ -1402,9 +1405,9 @@ struct JSWasmInliningPhase {
data->broker(), data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(),
data->jsgraph(), data->broker(),
data->source_positions(),
@@ -1507,12 +1510,12 @@ struct TypedLoweringPhase {
&graph_reducer, data->jsgraph(), data->broker());
TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
data->jsgraph(), data->broker());
- SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
- data->broker());
+ SimplifiedOperatorReducer simple_reducer(
+ &graph_reducer, data->jsgraph(), data->broker(), BranchSemantics::kJS);
CheckpointElimination checkpoint_elimination(&graph_reducer);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kJS);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &create_lowering);
@@ -1580,7 +1583,7 @@ struct SimplifiedLoweringPhase {
SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
data->source_positions(), data->node_origins(),
&data->info()->tick_counter(), linkage,
- data->observe_node_manager());
+ data->info(), data->observe_node_manager());
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1619,7 +1622,7 @@ struct WasmInliningPhase {
void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
uint32_t function_index, const wasm::WireBytesStorage* wire_bytes,
std::vector<compiler::WasmLoopInfo>* loop_info) {
- if (WasmInliner::any_inlining_impossible(data->graph()->NodeCount())) {
+ if (!WasmInliner::graph_size_allows_inlining(data->graph()->NodeCount())) {
return;
}
GraphReducer graph_reducer(
@@ -1627,9 +1630,11 @@ struct WasmInliningPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead(&graph_reducer, data->graph(), data->common(),
temp_zone);
+ std::unique_ptr<char[]> debug_name = data->info()->GetDebugName();
WasmInliner inliner(&graph_reducer, env, function_index,
data->source_positions(), data->node_origins(),
- data->mcgraph(), wire_bytes, loop_info);
+ data->mcgraph(), wire_bytes, loop_info,
+ debug_name.get());
AddReducer(data, &graph_reducer, &dead);
AddReducer(data, &graph_reducer, &inliner);
graph_reducer.ReduceGraph();
@@ -1736,13 +1741,14 @@ struct EarlyOptimizationPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
- data->broker());
+ data->broker(),
+ BranchSemantics::kMachine);
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1810,9 +1816,9 @@ struct EffectControlLinearizationPhase {
data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -1854,9 +1860,9 @@ struct LoadEliminationPhase {
temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kJS);
TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
data->jsgraph(), data->broker());
ConstantFoldingReducer constant_folding_reducer(
@@ -1919,9 +1925,9 @@ struct LateOptimizationPhase {
data->common(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone);
SelectLowering select_lowering(&graph_assembler, data->graph());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
@@ -1975,6 +1981,16 @@ struct DecompressionOptimizationPhase {
}
};
+struct BranchConditionDuplicationPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(BranchConditionDuplication)
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ BranchConditionDuplicator compare_zero_branch_optimizer(temp_zone,
+ data->graph());
+ compare_zero_branch_optimizer.Reduce();
+ }
+};
+
#if V8_ENABLE_WEBASSEMBLY
struct WasmOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmOptimization)
@@ -1993,9 +2009,9 @@ struct WasmOptimizationPhase {
allow_signalling_nan);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
@@ -2019,9 +2035,9 @@ struct WasmOptimizationPhase {
allow_signalling_nan);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
&graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
@@ -2052,9 +2068,9 @@ struct CsaEarlyOptimizationPhase {
true);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
@@ -2074,9 +2090,9 @@ struct CsaEarlyOptimizationPhase {
true);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
&graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
@@ -2103,9 +2119,9 @@ struct CsaOptimizationPhase {
data->common(), temp_zone);
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
allow_signalling_nan);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -2481,7 +2497,7 @@ struct VerifyGraphPhase {
#undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
#if V8_ENABLE_WEBASSEMBLY
-class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
+class WasmHeapStubCompilationJob final : public TurbofanCompilationJob {
public:
WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone, Graph* graph,
@@ -2491,8 +2507,7 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
// Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
- : OptimizedCompilationJob(&info_, "TurboFan",
- CompilationJob::State::kReadyToExecute),
+ : TurbofanCompilationJob(&info_, CompilationJob::State::kReadyToExecute),
debug_name_(std::move(debug_name)),
info_(base::CStrVector(debug_name_.get()), graph->zone(), kind),
call_descriptor_(call_descriptor),
@@ -2526,14 +2541,11 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
};
// static
-std::unique_ptr<OptimizedCompilationJob>
-Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
- CallDescriptor* call_descriptor,
- std::unique_ptr<Zone> zone,
- Graph* graph, CodeKind kind,
- std::unique_ptr<char[]> debug_name,
- const AssemblerOptions& options,
- SourcePositionTable* source_positions) {
+std::unique_ptr<TurbofanCompilationJob> Pipeline::NewWasmHeapStubCompilationJob(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph, CodeKind kind,
+ std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
+ SourcePositionTable* source_positions) {
return std::make_unique<WasmHeapStubCompilationJob>(
isolate, call_descriptor, std::move(zone), graph, kind,
std::move(debug_name), options, source_positions);
@@ -2790,6 +2802,9 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<DecompressionOptimizationPhase>();
RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
+ Run<BranchConditionDuplicationPhase>();
+ RunPrintAndVerify(BranchConditionDuplicationPhase::phase_name(), true);
+
data->source_positions()->RemoveDecorator();
if (data->info()->trace_turbo_json()) {
data->node_origins()->RemoveDecorator();
@@ -2930,6 +2945,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(),
true);
+ pipeline.Run<BranchConditionDuplicationPhase>();
+ pipeline.RunPrintAndVerify(BranchConditionDuplicationPhase::phase_name(),
+ true);
+
pipeline.Run<VerifyGraphPhase>(true);
int graph_hash_before_scheduling = 0;
@@ -3319,7 +3338,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
}
// static
-std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
+std::unique_ptr<TurbofanCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
bool has_script, BytecodeOffset osr_offset, JavaScriptFrame* osr_frame) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
@@ -3581,7 +3600,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage) {
data->BeginPhaseKind("V8.TFCodeGeneration");
data->InitializeCodeGenerator(linkage);
- UnparkedScopeIfNeeded unparked_scope(data->broker(), FLAG_code_comments);
+ UnparkedScopeIfNeeded unparked_scope(data->broker());
Run<AssembleCodePhase>();
if (data->info()->trace_turbo_json()) {
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 2a166b2073..ac61b406da 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -18,7 +18,7 @@ namespace internal {
struct AssemblerOptions;
class OptimizedCompilationInfo;
-class OptimizedCompilationJob;
+class TurbofanCompilationJob;
class ProfileDataFromFile;
class RegisterConfiguration;
@@ -48,7 +48,7 @@ struct WasmLoopInfo;
class Pipeline : public AllStatic {
public:
// Returns a new compilation job for the given JavaScript function.
- static V8_EXPORT_PRIVATE std::unique_ptr<OptimizedCompilationJob>
+ static V8_EXPORT_PRIVATE std::unique_ptr<TurbofanCompilationJob>
NewCompilationJob(Isolate* isolate, Handle<JSFunction> function,
CodeKind code_kind, bool has_script,
BytecodeOffset osr_offset = BytecodeOffset::None(),
@@ -70,7 +70,7 @@ class Pipeline : public AllStatic {
SourcePositionTable* source_positions = nullptr);
// Returns a new compilation job for a wasm heap stub.
- static std::unique_ptr<OptimizedCompilationJob> NewWasmHeapStubCompilationJob(
+ static std::unique_ptr<TurbofanCompilationJob> NewWasmHeapStubCompilationJob(
Isolate* isolate, CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone, Graph* graph, CodeKind kind,
std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 23051dfbba..787f7f9269 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#include <initializer_list>
+#include <type_traits>
-#include "src/base/type-traits.h"
#include "src/codegen/assembler.h"
#include "src/common/globals.h"
#include "src/compiler/access-builder.h"
@@ -969,9 +969,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
template <class... CArgs>
Node* CallCFunction(Node* function, base::Optional<MachineType> return_type,
CArgs... cargs) {
- static_assert(v8::internal::conjunction<
- std::is_convertible<CArgs, CFunctionArg>...>::value,
- "invalid argument types");
+ static_assert(
+ std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
+ "invalid argument types");
return CallCFunction(function, return_type, {cargs...});
}
@@ -983,9 +983,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* CallCFunctionWithoutFunctionDescriptor(Node* function,
MachineType return_type,
CArgs... cargs) {
- static_assert(v8::internal::conjunction<
- std::is_convertible<CArgs, CFunctionArg>...>::value,
- "invalid argument types");
+ static_assert(
+ std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
+ "invalid argument types");
return CallCFunctionWithoutFunctionDescriptor(function, return_type,
{cargs...});
}
@@ -1000,9 +1000,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineType return_type,
SaveFPRegsMode mode,
CArgs... cargs) {
- static_assert(v8::internal::conjunction<
- std::is_convertible<CArgs, CFunctionArg>...>::value,
- "invalid argument types");
+ static_assert(
+ std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
+ "invalid argument types");
return CallCFunctionWithCallerSavedRegisters(function, return_type, mode,
{cargs...});
}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index d2289dbc5e..96b0a40707 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -844,8 +844,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
use_info.type_check() == TypeCheckKind::kNumberOrOddball ||
use_info.type_check() == TypeCheckKind::kArrayIndex) &&
IsInt32Double(fv))) {
- return InsertTypeGuardForVerifier(NodeProperties::GetType(node),
- MakeTruncatedInt32Constant(fv));
+ return InsertTypeOverrideForVerifier(NodeProperties::GetType(node),
+ MakeTruncatedInt32Constant(fv));
}
break;
}
@@ -1109,8 +1109,8 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
if (base::IsValueInRangeForNumericType<int64_t>(fv)) {
int64_t const iv = static_cast<int64_t>(fv);
if (static_cast<double>(iv) == fv) {
- return InsertTypeGuardForVerifier(NodeProperties::GetType(node),
- jsgraph()->Int64Constant(iv));
+ return InsertTypeOverrideForVerifier(NodeProperties::GetType(node),
+ jsgraph()->Int64Constant(iv));
}
}
}
@@ -1121,7 +1121,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
if (m.HasResolvedValue() && m.Ref(broker_).IsBigInt() &&
use_info.truncation().IsUsedAsWord64()) {
BigIntRef bigint = m.Ref(broker_).AsBigInt();
- return InsertTypeGuardForVerifier(
+ return InsertTypeOverrideForVerifier(
NodeProperties::GetType(node),
jsgraph()->Int64Constant(static_cast<int64_t>(bigint.AsUint64())));
}
@@ -1571,14 +1571,13 @@ Node* RepresentationChanger::InsertCheckedFloat64ToInt32(
node, simplified()->CheckedFloat64ToInt32(check, feedback), use_node);
}
-Node* RepresentationChanger::InsertTypeGuardForVerifier(const Type& type,
- Node* node) {
+Node* RepresentationChanger::InsertTypeOverrideForVerifier(const Type& type,
+ Node* node) {
if (verification_enabled()) {
DCHECK(!type.IsInvalid());
- node = jsgraph()->graph()->NewNode(jsgraph()->common()->TypeGuard(type),
- node, jsgraph()->graph()->start(),
- jsgraph()->graph()->start());
- verifier_->RecordTypeGuard(node);
+ node = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->SLVerifierHint(nullptr, type), node);
+ verifier_->RecordHint(node);
}
return node;
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 5fc368da7e..5c1ffabf67 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -405,7 +405,7 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
Node* InsertTruncateInt64ToInt32(Node* node);
Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason,
const FeedbackSource& feedback = {});
- Node* InsertTypeGuardForVerifier(const Type& type, Node* node);
+ Node* InsertTypeOverrideForVerifier(const Type& type, Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/simplified-lowering-verifier.cc b/deps/v8/src/compiler/simplified-lowering-verifier.cc
index d113a9f081..25f177883f 100644
--- a/deps/v8/src/compiler/simplified-lowering-verifier.cc
+++ b/deps/v8/src/compiler/simplified-lowering-verifier.cc
@@ -22,26 +22,34 @@ Truncation LeastGeneralTruncation(const Truncation& t1, const Truncation& t2,
return LeastGeneralTruncation(LeastGeneralTruncation(t1, t2), t3);
}
+void SimplifiedLoweringVerifier::CheckType(Node* node, const Type& type) {
+ CHECK(NodeProperties::IsTyped(node));
+ Type node_type = NodeProperties::GetType(node);
+ if (!type.Is(node_type)) {
+ std::ostringstream type_str;
+ type.PrintTo(type_str);
+ std::ostringstream node_type_str;
+ node_type.PrintTo(node_type_str);
+
+ FATAL(
+ "SimplifiedLoweringVerifierError: verified type %s of node #%d:%s "
+ "does not match with type %s assigned during lowering",
+ type_str.str().c_str(), node->id(), node->op()->mnemonic(),
+ node_type_str.str().c_str());
+ }
+}
+
void SimplifiedLoweringVerifier::CheckAndSet(Node* node, const Type& type,
const Truncation& trunc) {
DCHECK(!type.IsInvalid());
if (NodeProperties::IsTyped(node)) {
- Type node_type = NodeProperties::GetType(node);
- if (!type.Is(node_type)) {
- std::ostringstream type_str;
- type.PrintTo(type_str);
- std::ostringstream node_type_str;
- node_type.PrintTo(node_type_str);
-
- FATAL(
- "SimplifiedLoweringVerifierError: verified type %s of node #%d:%s "
- "does not match with type %s assigned during lowering",
- type_str.str().c_str(), node->id(), node->op()->mnemonic(),
- node_type_str.str().c_str());
- }
+ CheckType(node, type);
} else {
- NodeProperties::SetType(node, type);
+ // We store the type inferred by the verification pass. We do not update
+ // the node's type directly, because following phases might encounter
+ // unsound types as long as the verification is not complete.
+ SetType(node, type);
}
SetTruncation(node, GeneralizeTruncation(trunc, type));
}
@@ -188,20 +196,7 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
break;
}
case IrOpcode::kTypeGuard: {
- Type input_type = Type::Any();
- if (is_recorded_type_guard(node)) {
- // If this TypeGuard is recorded, it means that it has been introduced
- // during lowering to provide type information for nodes that cannot be
- // typed directly (e.g. constants), so we cannot assume the input node
- // is typed.
- if (NodeProperties::IsTyped(node->InputAt(0))) {
- input_type = InputType(node, 0);
- }
- } else {
- input_type = InputType(node, 0);
- }
- Type output_type = op_typer.TypeTypeGuard(node->op(), input_type);
-
+ Type output_type = op_typer.TypeTypeGuard(node->op(), InputType(node, 0));
// TypeGuard has no effect on trunction, but the restricted type may help
// generalize it.
CheckAndSet(node, output_type, InputTruncation(node, 0));
@@ -239,6 +234,30 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
}
break;
}
+ case IrOpcode::kSLVerifierHint: {
+ Type output_type = InputType(node, 0);
+ Truncation output_trunc = InputTruncation(node, 0);
+ const auto& p = SLVerifierHintParametersOf(node->op());
+
+ if (const Operator* semantics = p.semantics()) {
+ switch (semantics->opcode()) {
+ case IrOpcode::kPlainPrimitiveToNumber:
+ output_type = op_typer.ToNumber(output_type);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ CheckType(node, output_type);
+ }
+
+ if (p.override_output_type()) {
+ output_type = *p.override_output_type();
+ }
+
+ SetType(node, output_type);
+ SetTruncation(node, GeneralizeTruncation(output_trunc, output_type));
+ break;
+ }
default:
// TODO(nicohartmann): Support remaining operators.
diff --git a/deps/v8/src/compiler/simplified-lowering-verifier.h b/deps/v8/src/compiler/simplified-lowering-verifier.h
index ceff65dfdd..6ee7effe93 100644
--- a/deps/v8/src/compiler/simplified-lowering-verifier.h
+++ b/deps/v8/src/compiler/simplified-lowering-verifier.h
@@ -16,26 +16,45 @@ class OperationTyper;
class SimplifiedLoweringVerifier final {
public:
struct PerNodeData {
+ base::Optional<Type> type = base::nullopt;
Truncation truncation = Truncation::Any(IdentifyZeros::kDistinguishZeros);
};
SimplifiedLoweringVerifier(Zone* zone, Graph* graph)
- : type_guards_(zone), data_(zone), graph_(graph) {}
+ : hints_(zone), data_(zone), graph_(graph) {}
void VisitNode(Node* node, OperationTyper& op_typer);
- void RecordTypeGuard(Node* node) {
- DCHECK_EQ(node->opcode(), IrOpcode::kTypeGuard);
- DCHECK(!is_recorded_type_guard(node));
- type_guards_.insert(node);
+ void RecordHint(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kSLVerifierHint);
+ hints_.push_back(node);
}
- const ZoneUnorderedSet<Node*>& recorded_type_guards() const {
- return type_guards_;
+ const ZoneVector<Node*>& inserted_hints() const { return hints_; }
+
+ base::Optional<Type> GetType(Node* node) const {
+ if (NodeProperties::IsTyped(node)) {
+ return NodeProperties::GetType(node);
+ }
+ // For nodes that have not been typed before SL, we use the type that has
+ // been inferred by the verifier.
+ if (node->id() < data_.size()) {
+ return data_[node->id()].type;
+ }
+ return base::nullopt;
}
private:
- bool is_recorded_type_guard(Node* node) const {
- return type_guards_.find(node) != type_guards_.end();
+ void ResizeDataIfNecessary(Node* node) {
+ if (data_.size() <= node->id()) {
+ data_.resize(node->id() + 1);
+ }
+ DCHECK_EQ(data_[node->id()].truncation,
+ Truncation::Any(IdentifyZeros::kDistinguishZeros));
+ }
+
+ void SetType(Node* node, const Type& type) {
+ ResizeDataIfNecessary(node);
+ data_[node->id()].type = type;
}
Type InputType(Node* node, int input_index) const {
@@ -45,15 +64,17 @@ class SimplifiedLoweringVerifier final {
if (NodeProperties::IsTyped(input)) {
return NodeProperties::GetType(input);
}
- return Type::None();
+ // For nodes that have not been typed before SL, we use the type that has
+ // been inferred by the verifier.
+ base::Optional<Type> type_opt;
+ if (input->id() < data_.size()) {
+ type_opt = data_[input->id()].type;
+ }
+ return type_opt.has_value() ? *type_opt : Type::None();
}
void SetTruncation(Node* node, const Truncation& truncation) {
- if (data_.size() <= node->id()) {
- data_.resize(node->id() + 1);
- }
- DCHECK_EQ(data_[node->id()].truncation,
- Truncation::Any(IdentifyZeros::kDistinguishZeros));
+ ResizeDataIfNecessary(node);
data_[node->id()].truncation = truncation;
}
@@ -68,6 +89,7 @@ class SimplifiedLoweringVerifier final {
return any_truncation;
}
+ void CheckType(Node* node, const Type& type);
void CheckAndSet(Node* node, const Type& type, const Truncation& trunc);
// Generalize to a less strict truncation in the context of a given type. For
@@ -81,7 +103,7 @@ class SimplifiedLoweringVerifier final {
Zone* graph_zone() const { return graph_->zone(); }
- ZoneUnorderedSet<Node*> type_guards_;
+ ZoneVector<Node*> hints_;
ZoneVector<PerNodeData> data_;
Graph* graph_;
};
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 8298bd0d2e..5c51f2ac8c 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -16,6 +16,8 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/diamond.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-observer.h"
@@ -222,6 +224,23 @@ bool IsSomePositiveOrderedNumber(Type type) {
return type.Is(Type::OrderedNumber()) && (type.IsNone() || type.Min() > 0);
}
+class JSONGraphWriterWithVerifierTypes : public JSONGraphWriter {
+ public:
+ JSONGraphWriterWithVerifierTypes(std::ostream& os, const Graph* graph,
+ const SourcePositionTable* positions,
+ const NodeOriginTable* origins,
+ SimplifiedLoweringVerifier* verifier)
+ : JSONGraphWriter(os, graph, positions, origins), verifier_(verifier) {}
+
+ protected:
+ base::Optional<Type> GetType(Node* node) override {
+ return verifier_->GetType(node);
+ }
+
+ private:
+ SimplifiedLoweringVerifier* verifier_;
+};
+
} // namespace
#ifdef DEBUG
@@ -316,6 +335,7 @@ class RepresentationSelector {
ObserveNodeManager* observe_node_manager,
SimplifiedLoweringVerifier* verifier)
: jsgraph_(jsgraph),
+ broker_(broker),
zone_(zone),
might_need_revisit_(zone),
count_(jsgraph->graph()->NodeCount()),
@@ -721,7 +741,7 @@ class RepresentationSelector {
}
}
- void RunVerifyPhase() {
+ void RunVerifyPhase(OptimizedCompilationInfo* info) {
DCHECK_NOT_NULL(verifier_);
TRACE("--{Verify Phase}--\n");
@@ -741,12 +761,20 @@ class RepresentationSelector {
// Verify all nodes.
for (Node* node : traversal_nodes_) verifier_->VisitNode(node, op_typer_);
- // Eliminate all introduced TypeGuard nodes.
- for (Node* node : verifier_->recorded_type_guards()) {
+ // Print graph.
+ if (info != nullptr && info->trace_turbo_json()) {
+ UnparkedScopeIfNeeded scope(broker_);
+ AllowHandleDereference allow_deref;
+
+ TurboJsonFile json_of(info, std::ios_base::app);
+ JSONGraphWriterWithVerifierTypes writer(
+ json_of, graph(), source_positions_, node_origins_, verifier_);
+ writer.PrintPhase("V8.TFSimplifiedLoweringVerifier");
+ }
+
+ // Eliminate all introduced hints.
+ for (Node* node : verifier_->inserted_hints()) {
Node* input = node->InputAt(0);
- DCHECK_EQ(node->InputAt(1), graph()->start());
- DCHECK_EQ(node->InputAt(2), graph()->start());
- DisconnectFromEffectAndControl(node);
node->ReplaceUses(input);
node->Kill();
}
@@ -759,7 +787,7 @@ class RepresentationSelector {
RunLowerPhase(lowering);
if (verification_enabled()) {
- RunVerifyPhase();
+ RunVerifyPhase(lowering->info_);
}
}
@@ -2106,7 +2134,7 @@ class RepresentationSelector {
VisitLeaf<T>(node, MachineRepresentation::kTaggedSigned);
if (lower<T>()) {
intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(value_as_int));
- Node* constant = InsertTypeGuardForVerifier(
+ Node* constant = InsertTypeOverrideForVerifier(
NodeProperties::GetType(node),
lowering->jsgraph()->IntPtrConstant(smi));
DeferReplacement(node, constant);
@@ -2934,7 +2962,9 @@ class RepresentationSelector {
is_asuintn ? Type::UnsignedBigInt64() : Type::SignedBigInt64());
if (lower<T>()) {
if (p.bits() == 0) {
- DeferReplacement(node, jsgraph_->ZeroConstant());
+ DeferReplacement(
+ node, InsertTypeOverrideForVerifier(Type::UnsignedBigInt63(),
+ jsgraph_->ZeroConstant()));
} else if (p.bits() == 64) {
DeferReplacement(node, node->InputAt(0));
} else {
@@ -3214,7 +3244,7 @@ class RepresentationSelector {
}
case IrOpcode::kStringCodePointAt: {
return VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::Word(),
- MachineRepresentation::kTaggedSigned);
+ MachineRepresentation::kWord32);
}
case IrOpcode::kStringFromSingleCharCode: {
VisitUnop<T>(node, UseInfo::TruncatingWord32(),
@@ -3549,7 +3579,9 @@ class RepresentationSelector {
case IrOpcode::kPlainPrimitiveToNumber: {
if (InputIs(node, Type::Boolean())) {
VisitUnop<T>(node, UseInfo::Bool(), MachineRepresentation::kWord32);
- if (lower<T>()) DeferReplacement(node, node->InputAt(0));
+ if (lower<T>()) {
+ ChangeToSemanticsHintForVerifier(node, node->op());
+ }
} else if (InputIs(node, Type::String())) {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTagged);
@@ -3560,7 +3592,9 @@ class RepresentationSelector {
if (InputIs(node, Type::NumberOrOddball())) {
VisitUnop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower<T>()) DeferReplacement(node, node->InputAt(0));
+ if (lower<T>()) {
+ ChangeToSemanticsHintForVerifier(node, node->op());
+ }
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kWord32);
@@ -3572,7 +3606,9 @@ class RepresentationSelector {
if (InputIs(node, Type::NumberOrOddball())) {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) DeferReplacement(node, node->InputAt(0));
+ if (lower<T>()) {
+ ChangeToSemanticsHintForVerifier(node, node->op());
+ }
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kFloat64);
@@ -4025,7 +4061,8 @@ class RepresentationSelector {
ProcessInput<T>(node, 0, UseInfo::Any());
return SetOutput<T>(node, MachineRepresentation::kNone);
case IrOpcode::kStaticAssert:
- return VisitUnop<T>(node, UseInfo::Any(),
+ DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
+ return VisitUnop<T>(node, UseInfo::Bool(),
MachineRepresentation::kTagged);
case IrOpcode::kAssertType:
return VisitUnop<T>(node, UseInfo::AnyTagged(),
@@ -4085,16 +4122,27 @@ class RepresentationSelector {
NotifyNodeReplaced(node, replacement);
}
- Node* InsertTypeGuardForVerifier(const Type& type, Node* node) {
+ Node* InsertTypeOverrideForVerifier(const Type& type, Node* node) {
if (verification_enabled()) {
DCHECK(!type.IsInvalid());
- node = graph()->NewNode(common()->TypeGuard(type), node, graph()->start(),
- graph()->start());
- verifier_->RecordTypeGuard(node);
+ node = graph()->NewNode(common()->SLVerifierHint(nullptr, type), node);
+ verifier_->RecordHint(node);
}
return node;
}
+ void ChangeToSemanticsHintForVerifier(Node* node, const Operator* semantics) {
+ DCHECK_EQ(node->op()->ValueInputCount(), 1);
+ DCHECK_EQ(node->op()->EffectInputCount(), 0);
+ DCHECK_EQ(node->op()->ControlInputCount(), 0);
+ if (verification_enabled()) {
+ ChangeOp(node, common()->SLVerifierHint(semantics, base::nullopt));
+ verifier_->RecordHint(node);
+ } else {
+ DeferReplacement(node, node->InputAt(0));
+ }
+ }
+
private:
void ChangeOp(Node* node, const Operator* new_op) {
compiler::NodeProperties::ChangeOp(node, new_op);
@@ -4111,6 +4159,7 @@ class RepresentationSelector {
}
JSGraph* jsgraph_;
+ JSHeapBroker* broker_;
Zone* zone_; // Temporary zone.
// Map from node to its uses that might need to be revisited.
ZoneMap<Node*, ZoneVector<Node*>> might_need_revisit_;
@@ -4306,13 +4355,11 @@ void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
}
}
-SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
- Zone* zone,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins,
- TickCounter* tick_counter,
- Linkage* linkage,
- ObserveNodeManager* observe_node_manager)
+SimplifiedLowering::SimplifiedLowering(
+ JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ TickCounter* tick_counter, Linkage* linkage, OptimizedCompilationInfo* info,
+ ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
@@ -4321,6 +4368,7 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
node_origins_(node_origins),
tick_counter_(tick_counter),
linkage_(linkage),
+ info_(info),
observe_node_manager_(observe_node_manager) {}
void SimplifiedLowering::LowerAllNodes() {
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index f60bc1a7e3..a8aa10c55e 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -30,8 +30,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
- NodeOriginTable* node_origins,
- TickCounter* tick_counter, Linkage* linkage,
+ NodeOriginTable* node_origins, TickCounter* tick_counter,
+ Linkage* linkage, OptimizedCompilationInfo* info,
ObserveNodeManager* observe_node_manager = nullptr);
~SimplifiedLowering() = default;
@@ -84,6 +84,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
TickCounter* const tick_counter_;
Linkage* const linkage_;
+ OptimizedCompilationInfo* info_;
ObserveNodeManager* const observe_node_manager_;
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 33edd66b4f..a6d8505f97 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -4,10 +4,12 @@
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
@@ -33,10 +35,13 @@ Decision DecideObjectIsSmi(Node* const input) {
} // namespace
-SimplifiedOperatorReducer::SimplifiedOperatorReducer(Editor* editor,
- JSGraph* jsgraph,
- JSHeapBroker* broker)
- : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {}
+SimplifiedOperatorReducer::SimplifiedOperatorReducer(
+ Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
+ BranchSemantics branch_semantics)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ broker_(broker),
+ branch_semantics_(branch_semantics) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
@@ -277,7 +282,11 @@ Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
}
Reduction SimplifiedOperatorReducer::ReplaceBoolean(bool value) {
- return Replace(jsgraph()->BooleanConstant(value));
+ if (branch_semantics_ == BranchSemantics::kJS) {
+ return Replace(jsgraph()->BooleanConstant(value));
+ } else {
+ return ReplaceInt32(value);
+ }
}
Reduction SimplifiedOperatorReducer::ReplaceFloat64(double value) {
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 650de7fb55..ee6de7487e 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/common/globals.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
@@ -27,7 +28,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* broker);
+ JSHeapBroker* broker,
+ BranchSemantics branch_semantics);
~SimplifiedOperatorReducer() final;
SimplifiedOperatorReducer(const SimplifiedOperatorReducer&) = delete;
SimplifiedOperatorReducer& operator=(const SimplifiedOperatorReducer&) =
@@ -60,6 +62,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
+ BranchSemantics branch_semantics_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 2daf637ebd..7227dffd05 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1031,6 +1031,7 @@ Type Typer::Visitor::TypeUnreachable(Node* node) { return Type::None(); }
Type Typer::Visitor::TypePlug(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStaticAssert(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeSLVerifierHint(Node* node) { UNREACHABLE(); }
// JS comparison operators.
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 2ee300d88e..b4f6193bce 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -155,6 +155,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// consumed as an effect input somewhere else.
// TODO(mvstanton): support this kind of verification for Wasm compiles, too.
if (code_type != kWasm && node->op()->EffectOutputCount() > 0) {
+#ifdef DEBUG
int effect_edges = 0;
for (Edge edge : node->use_edges()) {
if (all.IsLive(edge.from()) && NodeProperties::IsEffectEdge(edge)) {
@@ -162,6 +163,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
}
DCHECK_GT(effect_edges, 0);
+#endif
}
// Verify that frame state has been inserted for the nodes that need it.
@@ -1628,6 +1630,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_GE(value_count, 1);
CheckValueInputIs(node, 0, Type::Any()); // receiver
break;
+ case IrOpcode::kSLVerifierHint:
+ // SLVerifierHint is internal to SimplifiedLowering and should never be
+ // seen by the verifier.
+ UNREACHABLE();
#if V8_ENABLE_WEBASSEMBLY
case IrOpcode::kJSWasmCall:
CHECK_GE(value_count, 3);
@@ -2134,7 +2140,6 @@ void Verifier::VerifyNode(Node* node) {
bool check_no_control = node->op()->ControlOutputCount() == 0;
bool check_no_effect = node->op()->EffectOutputCount() == 0;
bool check_no_frame_state = node->opcode() != IrOpcode::kFrameState;
- int effect_edges = 0;
if (check_no_effect || check_no_control) {
for (Edge edge : node->use_edges()) {
Node* const user = edge.from();
@@ -2143,7 +2148,6 @@ void Verifier::VerifyNode(Node* node) {
DCHECK(!check_no_control);
} else if (NodeProperties::IsEffectEdge(edge)) {
DCHECK(!check_no_effect);
- effect_edges++;
} else if (NodeProperties::IsFrameStateEdge(edge)) {
DCHECK(!check_no_frame_state);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 0afc124ec8..6dda13e53c 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -482,8 +482,7 @@ class WasmGraphAssembler : public GraphAssembler {
mcgraph()->machine()->Is64() ? ChangeUint32ToUint64(index) : index;
return IntAdd(
IntPtrConstant(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)),
- IntMul(index_intptr,
- IntPtrConstant(element_type.element_size_bytes())));
+ IntMul(index_intptr, IntPtrConstant(element_type.value_kind_size())));
}
Node* LoadWasmArrayLength(Node* array) {
@@ -1637,7 +1636,7 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
Node* result;
Node* value = node;
MachineOperatorBuilder* m = mcgraph()->machine();
- int valueSizeInBytes = wasmtype.element_size_bytes();
+ int valueSizeInBytes = wasmtype.value_kind_size();
int valueSizeInBits = 8 * valueSizeInBytes;
bool isFloat = false;
@@ -1671,7 +1670,7 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
// In case we store lower part of WasmI64 expression, we can truncate
// upper 32bits
value = gasm_->TruncateInt64ToInt32(value);
- valueSizeInBytes = wasm::kWasmI32.element_size_bytes();
+ valueSizeInBytes = wasm::kWasmI32.value_kind_size();
valueSizeInBits = 8 * valueSizeInBytes;
if (mem_rep == MachineRepresentation::kWord16) {
value = gasm_->Word32Shl(value, Int32Constant(16));
@@ -3885,7 +3884,7 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
wasm::ValueType type) {
- int alignment = offset % type.element_size_bytes();
+ int alignment = offset % type.value_kind_size();
MachineType mach_type = type.machine_type();
if (COMPRESS_POINTERS_BOOL && mach_type.IsTagged()) {
// We are loading tagged value from off-heap location, so we need to load
@@ -3901,7 +3900,7 @@ const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
wasm::ValueType type) {
- int alignment = offset % type.element_size_bytes();
+ int alignment = offset % type.value_kind_size();
MachineRepresentation rep = type.machine_representation();
if (COMPRESS_POINTERS_BOOL && IsAnyTagged(rep)) {
// We are storing tagged value to off-heap location, so we need to store
@@ -5638,9 +5637,9 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
// Do NOT mark this as Operator::kEliminatable, because that would cause the
// Call node to have no control inputs, which means it could get scheduled
// before the check/trap above.
- Node* a = gasm_->CallBuiltin(
- stub, Operator::kNoDeopt | Operator::kNoThrow, rtt, length,
- Int32Constant(element_type.element_size_bytes()));
+ Node* a =
+ gasm_->CallBuiltin(stub, Operator::kNoDeopt | Operator::kNoThrow, rtt,
+ length, Int32Constant(element_type.value_kind_size()));
if (initial_value != nullptr) {
// TODO(manoskouk): If the loop is ever removed here, we have to update
// ArrayNewWithRtt() in graph-builder-interface.cc to not mark the current
@@ -5649,7 +5648,7 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
auto done = gasm_->MakeLabel();
Node* start_offset =
Int32Constant(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
- Node* element_size = Int32Constant(element_type.element_size_bytes());
+ Node* element_size = Int32Constant(element_type.value_kind_size());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
gasm_->Goto(&loop, start_offset);
@@ -5676,7 +5675,7 @@ Node* WasmGraphBuilder::ArrayInit(const wasm::ArrayType* type, Node* rtt,
gasm_->CallBuiltin(Builtin::kWasmAllocateArray_Uninitialized,
Operator::kNoDeopt | Operator::kNoThrow, rtt,
Int32Constant(static_cast<int32_t>(elements.size())),
- Int32Constant(element_type.element_size_bytes()));
+ Int32Constant(element_type.value_kind_size()));
for (int i = 0; i < static_cast<int>(elements.size()); i++) {
Node* offset =
gasm_->WasmArrayElementOffset(Int32Constant(i), element_type);
@@ -5795,15 +5794,6 @@ void WasmGraphBuilder::TypeCheck(
Node* map = gasm_->LoadMap(object);
- if (config.reference_kind == kFunction) {
- // Currently, the only way for a function to match an rtt is if its map
- // is equal to that rtt.
- callbacks.fail_if_not(gasm_->TaggedEqual(map, rtt), BranchHint::kTrue);
- return;
- }
-
- DCHECK(config.reference_kind == kArrayOrStruct);
-
// First, check if types happen to be equal. This has been shown to give large
// speedups.
callbacks.succeed_if(gasm_->TaggedEqual(map, rtt), BranchHint::kTrue);
@@ -7070,13 +7060,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* suspender = gasm_->Load(
MachineType::TaggedPointer(), api_function_ref,
wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kSuspenderOffset));
+ Node* native_context = gasm_->Load(
+ MachineType::TaggedPointer(), api_function_ref,
+ wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kNativeContextOffset));
auto* call_descriptor = GetBuiltinCallDescriptor(
Builtin::kWasmSuspend, zone_, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmSuspend, RelocInfo::WASM_STUB_CALL);
Node* args[] = {value, suspender};
- Node* chained_promise =
- BuildCallToRuntime(Runtime::kWasmCreateResumePromise, args, 2);
+ Node* chained_promise = BuildCallToRuntimeWithContext(
+ Runtime::kWasmCreateResumePromise, native_context, args, 2);
Node* resolved =
gasm_->Call(call_descriptor, call_target, chained_promise, suspender);
gasm_->Goto(&resume, resolved);
@@ -7267,11 +7260,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
for (wasm::ValueType type : sig_->parameters()) {
- param_bytes += type.element_size_bytes();
+ param_bytes += type.value_kind_size();
}
int return_bytes = 0;
for (wasm::ValueType type : sig_->returns()) {
- return_bytes += type.element_size_bytes();
+ return_bytes += type.value_kind_size();
}
int stack_slot_bytes = std::max(param_bytes, return_bytes);
@@ -7290,7 +7283,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), values,
Int32Constant(offset), Param(i + 1), effect(),
control()));
- offset += type.element_size_bytes();
+ offset += type.value_kind_size();
}
Node* function_node = gasm_->Load(
@@ -7355,7 +7348,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->NewNode(GetSafeLoadOperator(offset, type), values,
Int32Constant(offset), effect(), control()));
returns[i] = val;
- offset += type.element_size_bytes();
+ offset += type.value_kind_size();
}
Return(base::VectorOf(returns));
}
@@ -7577,7 +7570,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->NewNode(GetSafeLoadOperator(offset, type), arg_buffer,
Int32Constant(offset), effect(), control()));
args[pos++] = arg_load;
- offset += type.element_size_bytes();
+ offset += type.value_kind_size();
}
args[pos++] = effect();
@@ -7609,7 +7602,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
Int32Constant(offset), value, effect(),
control()));
- offset += type.element_size_bytes();
+ offset += type.value_kind_size();
pos++;
}
@@ -7661,7 +7654,7 @@ void BuildInlinedJSToWasmWrapper(
builder.BuildJSToWasmWrapper(false, js_wasm_call_data, frame_state);
}
-std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+std::unique_ptr<TurbofanCompilationJob> NewJSToWasmCompilationJob(
Isolate* isolate, const wasm::FunctionSig* sig,
const wasm::WasmModule* module, bool is_import,
const wasm::WasmFeatures& enabled_features) {
@@ -8256,7 +8249,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
GetWasmCallDescriptor(zone.get(), sig, WasmCallKind::kWasmImportWrapper);
// Run the compilation job synchronously.
- std::unique_ptr<OptimizedCompilationJob> job(
+ std::unique_ptr<TurbofanCompilationJob> job(
Pipeline::NewWasmHeapStubCompilationJob(
isolate, incoming, std::move(zone), graph,
CodeKind::WASM_TO_JS_FUNCTION, std::move(name_buffer),
@@ -8305,7 +8298,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
base::VectorOf(name_buffer.get(), kMaxNameLen) + kNamePrefixLen, sig);
// Run the compilation job synchronously.
- std::unique_ptr<OptimizedCompilationJob> job(
+ std::unique_ptr<TurbofanCompilationJob> job(
Pipeline::NewWasmHeapStubCompilationJob(
isolate, incoming, std::move(zone), graph,
CodeKind::JS_TO_JS_FUNCTION, std::move(name_buffer),
@@ -8362,7 +8355,7 @@ Handle<CodeT> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
base::VectorOf(name_buffer.get(), kMaxNameLen) + kNamePrefixLen, sig);
// Run the compilation job synchronously.
- std::unique_ptr<OptimizedCompilationJob> job(
+ std::unique_ptr<TurbofanCompilationJob> job(
Pipeline::NewWasmHeapStubCompilationJob(
isolate, incoming, std::move(zone), graph, CodeKind::C_WASM_ENTRY,
std::move(name_buffer), AssemblerOptions::Default(isolate)));
@@ -8417,10 +8410,12 @@ base::Vector<const char> GetDebugName(Zone* zone,
FLAG_trace_turbo_graph || FLAG_print_wasm_code)) {
wasm::WireBytesRef name = module->lazily_generated_names.LookupFunctionName(
module_bytes.value(), index);
- int name_len = name.length();
- char* index_name = zone->NewArray<char>(name_len);
- memcpy(index_name, module_bytes->start() + name.offset(), name.length());
- return base::Vector<const char>(index_name, name_len);
+ if (!name.is_empty()) {
+ int name_len = name.length();
+ char* index_name = zone->NewArray<char>(name_len);
+ memcpy(index_name, module_bytes->start() + name.offset(), name_len);
+ return base::Vector<const char>(index_name, name_len);
+ }
}
constexpr int kBufferLength = 24;
@@ -8440,6 +8435,10 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_byte_storage,
const wasm::FunctionBody& func_body, int func_index, Counters* counters,
wasm::WasmFeatures* detected) {
+ // Check that we do not accidentally compile a Wasm function to TurboFan if
+ // --liftoff-only is set.
+ DCHECK(!FLAG_liftoff_only);
+
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileTopTier", "func_index", func_index, "body_size",
func_body.end - func_body.start);
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 9fa017ef84..74a2f4840d 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -27,7 +27,7 @@
namespace v8 {
namespace internal {
struct AssemblerOptions;
-class OptimizedCompilationJob;
+class TurbofanCompilationJob;
namespace compiler {
// Forward declarations for some compiler data structures.
@@ -138,8 +138,8 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule*,
const wasm::FunctionSig*,
Handle<JSFunction> target);
-// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
-std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+// Returns an TurbofanCompilationJob object for a JS to Wasm wrapper.
+std::unique_ptr<TurbofanCompilationJob> NewJSToWasmCompilationJob(
Isolate* isolate, const wasm::FunctionSig* sig,
const wasm::WasmModule* module, bool is_import,
const wasm::WasmFeatures& enabled_features);
@@ -223,13 +223,8 @@ class WasmGraphBuilder {
kWasmApiFunctionRefMode,
kNoSpecialParameterMode
};
- enum ReferenceKind : bool { // --
- kArrayOrStruct = true,
- kFunction = false
- };
struct ObjectReferenceKnowledge {
bool object_can_be_null;
- ReferenceKind reference_kind;
uint8_t rtt_depth;
};
enum EnforceBoundsCheck : bool { // --
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
index e8acab6739..7c3d7d56f6 100644
--- a/deps/v8/src/compiler/wasm-inlining.cc
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -29,7 +29,47 @@ Reduction WasmInliner::Reduce(Node* node) {
}
#define TRACE(...) \
- if (FLAG_trace_wasm_inlining) PrintF(__VA_ARGS__);
+ if (FLAG_trace_wasm_inlining) PrintF(__VA_ARGS__)
+
+void WasmInliner::Trace(Node* call, int inlinee, const char* decision) {
+ TRACE("[function %d: considering node %d, call to %d: %s]\n", function_index_,
+ call->id(), inlinee, decision);
+}
+
+uint32_t WasmInliner::FindOriginatingFunction(Node* call) {
+ DCHECK_EQ(inlined_functions_.size(), first_node_id_.size());
+ NodeId id = call->id();
+ if (inlined_functions_.size() == 0 || id < first_node_id_[0]) {
+ return function_index_;
+ }
+ for (size_t i = 1; i < first_node_id_.size(); i++) {
+ if (id < first_node_id_[i]) return inlined_functions_[i - 1];
+ }
+ DCHECK_GE(id, first_node_id_.back());
+ return inlined_functions_.back();
+}
+
+int WasmInliner::GetCallCount(Node* call) {
+ if (!FLAG_wasm_speculative_inlining) return 0;
+ base::MutexGuard guard(&module()->type_feedback.mutex);
+ wasm::WasmCodePosition position =
+ source_positions_->GetSourcePosition(call).ScriptOffset();
+ uint32_t func = FindOriginatingFunction(call);
+ auto maybe_feedback =
+ module()->type_feedback.feedback_for_function.find(func);
+ if (maybe_feedback == module()->type_feedback.feedback_for_function.end()) {
+ return 0;
+ }
+ wasm::FunctionTypeFeedback feedback = maybe_feedback->second;
+ // It's possible that we haven't processed the feedback yet. Currently,
+ // this can happen for targets of call_direct that haven't gotten hot yet,
+ // and for functions where Liftoff bailed out.
+ if (feedback.feedback_vector.size() == 0) return 0;
+ auto index_in_vector = feedback.positions.find(position);
+ if (index_in_vector == feedback.positions.end()) return 0;
+ return feedback.feedback_vector[index_in_vector->second]
+ .absolute_call_frequency;
+}
// TODO(12166): Save inlined frames for trap/--trace-wasm purposes. Consider
// tail calls.
@@ -55,68 +95,73 @@ Reduction WasmInliner::ReduceCall(Node* call) {
}
auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
uint32_t inlinee_index = static_cast<uint32_t>(info.value());
- TRACE("[function %d: considering node %d, call to %d... ", function_index_,
- call->id(), inlinee_index)
if (info.rmode() != RelocInfo::WASM_CALL) {
- TRACE("not a wasm call]\n")
+ Trace(call, inlinee_index, "not a wasm call");
return NoChange();
}
if (inlinee_index < module()->num_imported_functions) {
- TRACE("imported function]\n")
+ Trace(call, inlinee_index, "imported function");
return NoChange();
}
if (inlinee_index == function_index_) {
- TRACE("recursive call]\n")
+ Trace(call, inlinee_index, "recursive call");
return NoChange();
}
- TRACE("adding to inlining candidates!]\n")
-
- bool is_speculative_call_ref = false;
- int call_count = 0;
- if (FLAG_wasm_speculative_inlining) {
- base::MutexGuard guard(&module()->type_feedback.mutex);
- auto maybe_feedback =
- module()->type_feedback.feedback_for_function.find(function_index_);
- if (maybe_feedback != module()->type_feedback.feedback_for_function.end()) {
- wasm::FunctionTypeFeedback feedback = maybe_feedback->second;
- wasm::WasmCodePosition position =
- source_positions_->GetSourcePosition(call).ScriptOffset();
- DCHECK_NE(position, wasm::kNoCodePosition);
- auto index_in_feedback_vector = feedback.positions.find(position);
- if (index_in_feedback_vector != feedback.positions.end()) {
- is_speculative_call_ref = true;
- call_count = feedback.feedback_vector[index_in_feedback_vector->second]
- .absolute_call_frequency;
- }
- }
- }
+ Trace(call, inlinee_index, "adding to inlining candidates!");
+
+ int call_count = GetCallCount(call);
CHECK_LT(inlinee_index, module()->functions.size());
const wasm::WasmFunction* inlinee = &module()->functions[inlinee_index];
base::Vector<const byte> function_bytes = wire_bytes_->GetCode(inlinee->code);
- CandidateInfo candidate{call, inlinee_index, is_speculative_call_ref,
- call_count, function_bytes.length()};
+ CandidateInfo candidate{call, inlinee_index, call_count,
+ function_bytes.length()};
inlining_candidates_.push(candidate);
return NoChange();
}
+bool SmallEnoughToInline(size_t current_graph_size, uint32_t candidate_size) {
+ if (WasmInliner::graph_size_allows_inlining(current_graph_size)) {
+ return true;
+ }
+ // For truly tiny functions, let's be a bit more generous.
+ return candidate_size < 10 &&
+ WasmInliner::graph_size_allows_inlining(current_graph_size - 100);
+}
+
+void WasmInliner::Trace(const CandidateInfo& candidate, const char* decision) {
+ TRACE(
+ " [function %d: considering candidate {@%d, index=%d, count=%d, "
+ "size=%d}: %s]\n",
+ function_index_, candidate.node->id(), candidate.inlinee_index,
+ candidate.call_count, candidate.wire_byte_size, decision);
+}
+
void WasmInliner::Finalize() {
- TRACE("function %d: going though inlining candidates...\n", function_index_);
+ TRACE("function %d %s: going though inlining candidates...\n",
+ function_index_, debug_name_);
+ if (inlining_candidates_.empty()) return;
while (!inlining_candidates_.empty()) {
CandidateInfo candidate = inlining_candidates_.top();
inlining_candidates_.pop();
Node* call = candidate.node;
- TRACE(
- " [function %d: considering candidate {@%d, index=%d, type=%s, "
- "count=%d, size=%d}... ",
- function_index_, call->id(), candidate.inlinee_index,
- candidate.is_speculative_call_ref ? "ref" : "direct",
- candidate.call_count, candidate.wire_byte_size);
if (call->IsDead()) {
- TRACE("dead node]\n");
+ Trace(candidate, "dead node");
+ continue;
+ }
+ int min_count_for_inlining = candidate.wire_byte_size / 2;
+ if (candidate.call_count < min_count_for_inlining) {
+ Trace(candidate, "not called often enough");
+ continue;
+ }
+ // We could build the candidate's graph first and consider its node count,
+ // but it turns out that wire byte size and node count are quite strongly
+ // correlated, at about 1.16 nodes per wire byte (measured for J2Wasm).
+ if (!SmallEnoughToInline(current_graph_size_, candidate.wire_byte_size)) {
+ Trace(candidate, "not enough inlining budget");
continue;
}
const wasm::WasmFunction* inlinee =
@@ -126,37 +171,37 @@ void WasmInliner::Finalize() {
// We use the signature based on the real argument types stored in the call
// node. This is more specific than the callee's formal signature and might
// enable some optimizations.
- const wasm::FunctionSig* real_sig =
+ const wasm::FunctionSig* specialized_sig =
CallDescriptorOf(call->op())->wasm_sig();
#if DEBUG
// Check that the real signature is a subtype of the formal one.
const wasm::FunctionSig* formal_sig =
WasmGraphBuilder::Int64LoweredSig(zone(), inlinee->sig);
- CHECK_EQ(real_sig->parameter_count(), formal_sig->parameter_count());
- CHECK_EQ(real_sig->return_count(), formal_sig->return_count());
- for (size_t i = 0; i < real_sig->parameter_count(); i++) {
- CHECK(wasm::IsSubtypeOf(real_sig->GetParam(i), formal_sig->GetParam(i),
- module()));
+ CHECK_EQ(specialized_sig->parameter_count(), formal_sig->parameter_count());
+ CHECK_EQ(specialized_sig->return_count(), formal_sig->return_count());
+ for (size_t i = 0; i < specialized_sig->parameter_count(); i++) {
+ CHECK(wasm::IsSubtypeOf(specialized_sig->GetParam(i),
+ formal_sig->GetParam(i), module()));
}
- for (size_t i = 0; i < real_sig->return_count(); i++) {
- CHECK(wasm::IsSubtypeOf(formal_sig->GetReturn(i), real_sig->GetReturn(i),
- module()));
+ for (size_t i = 0; i < specialized_sig->return_count(); i++) {
+ CHECK(wasm::IsSubtypeOf(formal_sig->GetReturn(i),
+ specialized_sig->GetReturn(i), module()));
}
#endif
- const wasm::FunctionBody inlinee_body(real_sig, inlinee->code.offset(),
- function_bytes.begin(),
- function_bytes.end());
wasm::WasmFeatures detected;
- WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
- source_positions_);
std::vector<WasmLoopInfo> inlinee_loop_infos;
size_t subgraph_min_node_id = graph()->NodeCount();
Node* inlinee_start;
Node* inlinee_end;
- {
+ for (const wasm::FunctionSig* sig = specialized_sig;;) {
+ const wasm::FunctionBody inlinee_body(sig, inlinee->code.offset(),
+ function_bytes.begin(),
+ function_bytes.end());
+ WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
+ source_positions_);
Graph::SubgraphScope scope(graph());
wasm::DecodeResult result = wasm::BuildTFGraph(
zone()->allocator(), env_->enabled_features, module(), &builder,
@@ -165,29 +210,31 @@ void WasmInliner::Finalize() {
NodeProperties::IsExceptionalCall(call)
? wasm::kInlinedHandledCall
: wasm::kInlinedNonHandledCall);
- if (result.failed()) {
- // This can happen if the inlinee has never been compiled before and is
- // invalid. Return, as there is no point to keep optimizing.
- TRACE("failed to compile]\n")
- return;
+ if (result.ok()) {
+ builder.LowerInt64(WasmGraphBuilder::kCalledFromWasm);
+ inlinee_start = graph()->start();
+ inlinee_end = graph()->end();
+ break;
}
-
- builder.LowerInt64(WasmGraphBuilder::kCalledFromWasm);
- inlinee_start = graph()->start();
- inlinee_end = graph()->end();
+ if (sig == specialized_sig) {
+ // One possible reason for failure is the opportunistic signature
+ // specialization. Try again without that.
+ sig = inlinee->sig;
+ inlinee_loop_infos.clear();
+ Trace(candidate, "retrying with original signature");
+ continue;
+ }
+ // Otherwise report failure.
+ Trace(candidate, "failed to compile");
+ return;
}
size_t additional_nodes = graph()->NodeCount() - subgraph_min_node_id;
- if (current_graph_size_ + additional_nodes >
- size_limit(initial_graph_size_)) {
- // This is not based on the accurate graph size, as it may have been
- // shrunk by other optimizations. We could recompute the accurate size
- // with a traversal, but it is most probably not worth the time.
- TRACE("not enough inlining budget]\n");
- continue;
- }
- TRACE("inlining!]\n");
+ Trace(candidate, "inlining!");
current_graph_size_ += additional_nodes;
+ inlined_functions_.push_back(candidate.inlinee_index);
+ static_assert(std::is_same_v<NodeId, uint32_t>);
+ first_node_id_.push_back(static_cast<uint32_t>(subgraph_min_node_id));
if (call->opcode() == IrOpcode::kCall) {
InlineCall(call, inlinee_start, inlinee_end, inlinee->sig,
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
index 0ded2ac0f4..bd41a95927 100644
--- a/deps/v8/src/compiler/wasm-inlining.h
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -39,7 +39,7 @@ class WasmInliner final : public AdvancedReducer {
uint32_t function_index, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, MachineGraph* mcgraph,
const wasm::WireBytesStorage* wire_bytes,
- std::vector<WasmLoopInfo>* loop_infos)
+ std::vector<WasmLoopInfo>* loop_infos, const char* debug_name)
: AdvancedReducer(editor),
env_(env),
function_index_(function_index),
@@ -48,6 +48,7 @@ class WasmInliner final : public AdvancedReducer {
mcgraph_(mcgraph),
wire_bytes_(wire_bytes),
loop_infos_(loop_infos),
+ debug_name_(debug_name),
initial_graph_size_(mcgraph->graph()->NodeCount()),
current_graph_size_(initial_graph_size_),
inlining_candidates_() {}
@@ -57,16 +58,14 @@ class WasmInliner final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
void Finalize() final;
- static bool any_inlining_impossible(size_t initial_graph_size) {
- return size_limit(initial_graph_size) - initial_graph_size <
- kMinimumFunctionNodeCount;
+ static bool graph_size_allows_inlining(size_t initial_graph_size) {
+ return initial_graph_size < 5000;
}
private:
struct CandidateInfo {
Node* node;
uint32_t inlinee_index;
- bool is_speculative_call_ref;
int call_count;
int wire_byte_size;
};
@@ -74,45 +73,13 @@ class WasmInliner final : public AdvancedReducer {
struct LexicographicOrdering {
// Returns if c1 should be prioritized less than c2.
bool operator()(CandidateInfo& c1, CandidateInfo& c2) {
- if (c1.is_speculative_call_ref && !c2.is_speculative_call_ref) {
- return false;
- }
- if (c2.is_speculative_call_ref && !c1.is_speculative_call_ref) {
- return true;
- }
if (c1.call_count > c2.call_count) return false;
if (c2.call_count > c1.call_count) return true;
return c1.wire_byte_size > c2.wire_byte_size;
}
};
- // TODO(manoskouk): This has not been found to be useful, but something
- // similar may be tried again in the future.
- // struct AdvancedOrdering {
- // // Returns if c1 should be prioritized less than c2.
- // bool operator()(CandidateInfo& c1, CandidateInfo& c2) {
- // if (c1.is_speculative_call_ref && c2.is_speculative_call_ref) {
- // if (c1.call_count > c2.call_count) return false;
- // if (c2.call_count > c1.call_count) return true;
- // return c1.wire_byte_size > c2.wire_byte_size;
- // }
- // if (!c1.is_speculative_call_ref && !c2.is_speculative_call_ref) {
- // return c1.wire_byte_size > c2.wire_byte_size;
- // }
- //
- // constexpr int kAssumedCallCountForDirectCalls = 3;
- //
- // int c1_call_count = c1.is_speculative_call_ref
- // ? c1.call_count
- // : kAssumedCallCountForDirectCalls;
- // int c2_call_count = c2.is_speculative_call_ref
- // ? c2.call_count
- // : kAssumedCallCountForDirectCalls;
- //
- // return static_cast<float>(c1_call_count) / c1.wire_byte_size <
- // static_cast<float>(c2_call_count) / c2.wire_byte_size;
- // }
- //};
+ uint32_t FindOriginatingFunction(Node* call);
Zone* zone() const { return mcgraph_->zone(); }
CommonOperatorBuilder* common() const { return mcgraph_->common(); }
@@ -120,17 +87,6 @@ class WasmInliner final : public AdvancedReducer {
MachineGraph* mcgraph() const { return mcgraph_; }
const wasm::WasmModule* module() const;
- // A limit to the size of the inlined graph as a function of its initial size.
- static size_t size_limit(size_t initial_graph_size) {
- return initial_graph_size +
- std::min(FLAG_wasm_inlining_max_size,
- FLAG_wasm_inlining_budget_factor / initial_graph_size);
- }
-
- // The smallest size in TF nodes any meaningful wasm function can have
- // (start, return, IntConstant(0), end).
- static constexpr size_t kMinimumFunctionNodeCount = 4;
-
Reduction ReduceCall(Node* call);
void InlineCall(Node* call, Node* callee_start, Node* callee_end,
const wasm::FunctionSig* inlinee_sig,
@@ -138,6 +94,11 @@ class WasmInliner final : public AdvancedReducer {
void InlineTailCall(Node* call, Node* callee_start, Node* callee_end);
void RewireFunctionEntry(Node* call, Node* callee_start);
+ int GetCallCount(Node* call);
+
+ void Trace(Node* call, int inlinee, const char* decision);
+ void Trace(const CandidateInfo& candidate, const char* decision);
+
wasm::CompilationEnv* const env_;
uint32_t function_index_;
SourcePositionTable* const source_positions_;
@@ -145,12 +106,18 @@ class WasmInliner final : public AdvancedReducer {
MachineGraph* const mcgraph_;
const wasm::WireBytesStorage* const wire_bytes_;
std::vector<WasmLoopInfo>* const loop_infos_;
+ const char* debug_name_;
const size_t initial_graph_size_;
size_t current_graph_size_;
std::priority_queue<CandidateInfo, std::vector<CandidateInfo>,
LexicographicOrdering>
inlining_candidates_;
std::unordered_set<Node*> seen_;
+ std::vector<uint32_t> inlined_functions_;
+ // Stores the graph size before an inlining was performed, to make it
+ // possible to map back from nodes to the function they came from.
+ // Guaranteed to have the same length as {inlined_functions_}.
+ std::vector<uint32_t> first_node_id_;
};
} // namespace compiler
diff --git a/deps/v8/src/d8/async-hooks-wrapper.cc b/deps/v8/src/d8/async-hooks-wrapper.cc
index eed6e19cf3..897736f19f 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.cc
+++ b/deps/v8/src/d8/async-hooks-wrapper.cc
@@ -8,13 +8,17 @@
#include "include/v8-local-handle.h"
#include "include/v8-primitive.h"
#include "include/v8-template.h"
+#include "src/api/api-inl.h"
+#include "src/api/api.h"
#include "src/d8/d8.h"
#include "src/execution/isolate-inl.h"
+#include "src/objects/managed-inl.h"
namespace v8 {
namespace {
-AsyncHooksWrap* UnwrapHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
+std::shared_ptr<AsyncHooksWrap> UnwrapHook(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
Local<Object> hook = args.This();
@@ -26,18 +30,17 @@ AsyncHooksWrap* UnwrapHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
return nullptr;
}
- Local<External> wrap = hook->GetInternalField(0).As<External>();
- void* ptr = wrap->Value();
- return static_cast<AsyncHooksWrap*>(ptr);
+ i::Handle<i::Object> handle = Utils::OpenHandle(*hook->GetInternalField(0));
+ return i::Handle<i::Managed<AsyncHooksWrap>>::cast(handle)->get();
}
void EnableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
- AsyncHooksWrap* wrap = UnwrapHook(args);
+ auto wrap = UnwrapHook(args);
if (wrap) wrap->Enable();
}
void DisableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
- AsyncHooksWrap* wrap = UnwrapHook(args);
+ auto wrap = UnwrapHook(args);
if (wrap) wrap->Disable();
}
@@ -126,8 +129,8 @@ Local<Object> AsyncHooks::CreateHook(
return Local<Object>();
}
- std::unique_ptr<AsyncHooksWrap> wrap =
- std::make_unique<AsyncHooksWrap>(isolate);
+ std::shared_ptr<AsyncHooksWrap> wrap =
+ std::make_shared<AsyncHooksWrap>(isolate);
Local<Object> fn_obj = args[0].As<Object>();
@@ -148,7 +151,9 @@ Local<Object> AsyncHooks::CreateHook(
Local<Object> obj = async_hooks_templ.Get(isolate)
->NewInstance(currentContext)
.ToLocalChecked();
- obj->SetInternalField(0, External::New(isolate, wrap.get()));
+ i::Handle<i::Object> managed = i::Managed<AsyncHooksWrap>::FromSharedPtr(
+ reinterpret_cast<i::Isolate*>(isolate), sizeof(AsyncHooksWrap), wrap);
+ obj->SetInternalField(0, Utils::ToLocal(managed));
{
base::RecursiveMutexGuard lock_guard(&async_wraps_mutex_);
diff --git a/deps/v8/src/d8/async-hooks-wrapper.h b/deps/v8/src/d8/async-hooks-wrapper.h
index cbc42a901d..e4c695b66d 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.h
+++ b/deps/v8/src/d8/async-hooks-wrapper.h
@@ -68,7 +68,7 @@ class AsyncHooks {
private:
base::RecursiveMutex async_wraps_mutex_;
- std::vector<std::unique_ptr<AsyncHooksWrap>> async_wraps_;
+ std::vector<std::shared_ptr<AsyncHooksWrap>> async_wraps_;
Isolate* isolate_;
Persistent<ObjectTemplate> async_hooks_templ;
Persistent<Private> async_id_smb;
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index dd55dfa004..6f38f7280e 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -1022,7 +1022,7 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
ModuleType module_type) {
DCHECK(IsAbsolutePath(file_name));
Isolate* isolate = context->GetIsolate();
- Local<String> source_text = ReadFile(isolate, file_name.c_str(), false);
+ MaybeLocal<String> source_text = ReadFile(isolate, file_name.c_str(), false);
if (source_text.IsEmpty() && options.fuzzy_module_file_extensions) {
std::string fallback_file_name = file_name + ".js";
source_text = ReadFile(isolate, fallback_file_name.c_str(), false);
@@ -1053,14 +1053,16 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
Local<Module> module;
if (module_type == ModuleType::kJavaScript) {
- ScriptCompiler::Source source(source_text, origin);
- if (!CompileString<Module>(isolate, context, source_text, origin)
+ ScriptCompiler::Source source(source_text.ToLocalChecked(), origin);
+ if (!CompileString<Module>(isolate, context, source_text.ToLocalChecked(),
+ origin)
.ToLocal(&module)) {
return MaybeLocal<Module>();
}
} else if (module_type == ModuleType::kJSON) {
Local<Value> parsed_json;
- if (!v8::JSON::Parse(context, source_text).ToLocal(&parsed_json)) {
+ if (!v8::JSON::Parse(context, source_text.ToLocalChecked())
+ .ToLocal(&parsed_json)) {
return MaybeLocal<Module>();
}
@@ -1441,9 +1443,12 @@ bool Shell::ExecuteWebSnapshot(Isolate* isolate, const char* file_name) {
if (length == 0) {
isolate->ThrowError("Could not read the web snapshot file");
} else {
- i::WebSnapshotDeserializer deserializer(isolate, snapshot_data.get(),
- static_cast<size_t>(length));
- success = deserializer.Deserialize();
+ for (int r = 0; r < DeserializationRunCount(); ++r) {
+ bool skip_exports = r > 0;
+ i::WebSnapshotDeserializer deserializer(isolate, snapshot_data.get(),
+ static_cast<size_t>(length));
+ success = deserializer.Deserialize({}, skip_exports);
+ }
}
if (!success) {
CHECK(try_catch.HasCaught());
@@ -1471,14 +1476,17 @@ bool Shell::LoadJSON(Isolate* isolate, const char* file_name) {
std::stringstream stream(data.get());
std::string line;
while (std::getline(stream, line, '\n')) {
- Local<String> source =
- String::NewFromUtf8(isolate, line.c_str()).ToLocalChecked();
- MaybeLocal<Value> maybe_value = JSON::Parse(realm, source);
- Local<Value> value;
- if (!maybe_value.ToLocal(&value)) {
- DCHECK(try_catch.HasCaught());
- ReportException(isolate, &try_catch);
- return false;
+ for (int r = 0; r < DeserializationRunCount(); ++r) {
+ Local<String> source =
+ String::NewFromUtf8(isolate, line.c_str()).ToLocalChecked();
+ MaybeLocal<Value> maybe_value = JSON::Parse(realm, source);
+
+ Local<Value> value;
+ if (!maybe_value.ToLocal(&value)) {
+ DCHECK(try_catch.HasCaught());
+ ReportException(isolate, &try_catch);
+ return false;
+ }
}
}
return true;
@@ -1783,8 +1791,8 @@ MaybeLocal<Context> Shell::CreateRealm(
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Local<Context> context =
Context::New(isolate, nullptr, global_template, global_object);
- DCHECK(!try_catch.HasCaught());
if (context.IsEmpty()) return MaybeLocal<Context>();
+ DCHECK(!try_catch.HasCaught());
InitializeModuleEmbedderData(context);
data->realms_[index].Reset(isolate, context);
data->realms_[index].AnnotateStrongRetainer(kGlobalHandleLabel);
@@ -2175,6 +2183,12 @@ void Shell::TestVerifySourcePositions(
}
}
+void Shell::InstallConditionalFeatures(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ isolate->InstallConditionalFeatures(isolate->GetCurrentContext());
+}
+
// async_hooks.createHook() registers functions to be called for different
// lifetime events of each async operation.
void Shell::AsyncHooksCreateHook(
@@ -2296,8 +2310,8 @@ void Shell::ReadFile(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
}
- Local<String> source = ReadFile(args.GetIsolate(), *file_name);
- if (source.IsEmpty()) return;
+ Local<String> source;
+ if (!ReadFile(args.GetIsolate(), *file_name).ToLocal(&source)) return;
args.GetReturnValue().Set(source);
}
@@ -2350,8 +2364,8 @@ void Shell::ExecuteFile(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::NewFromUtf8(isolate, oss.str().c_str()).ToLocalChecked());
return;
}
- Local<String> source = ReadFile(isolate, *file_name);
- if (source.IsEmpty()) return;
+ Local<String> source;
+ if (!ReadFile(isolate, *file_name).ToLocal(&source)) return;
if (!ExecuteString(
args.GetIsolate(), source,
String::NewFromUtf8(isolate, *file_name).ToLocalChecked(),
@@ -2491,8 +2505,9 @@ MaybeLocal<String> Shell::ReadSource(
return MaybeLocal<String>();
}
String::Utf8Value filename(isolate, args[index]);
- source = Shell::ReadFile(isolate, *filename);
- if (source.IsEmpty()) return MaybeLocal<String>();
+ if (!Shell::ReadFile(isolate, *filename).ToLocal(&source)) {
+ return MaybeLocal<String>();
+ };
break;
}
case CodeType::kString:
@@ -2629,7 +2644,6 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
int exit_code = (*args)[0]
->Int32Value(args->GetIsolate()->GetCurrentContext())
.FromMaybe(0);
- WaitForRunningWorkers();
Isolate* isolate = args->GetIsolate();
isolate->Exit();
@@ -3132,6 +3146,11 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
test_template->Set(isolate, "LeafInterfaceType",
Shell::CreateLeafInterfaceTypeTemplate(isolate));
}
+ // Allows testing code paths that are triggered when Origin Trials are
+ // added in the browser.
+ test_template->Set(
+ isolate, "installConditionalFeatures",
+ FunctionTemplate::New(isolate, Shell::InstallConditionalFeatures));
d8_template->Set(isolate, "test", test_template);
}
@@ -3265,7 +3284,7 @@ void Shell::Initialize(Isolate* isolate, D8Console* console,
Local<String> Shell::WasmLoadSourceMapCallback(Isolate* isolate,
const char* path) {
- return Shell::ReadFile(isolate, path, false);
+ return Shell::ReadFile(isolate, path, false).ToLocalChecked();
}
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
@@ -3275,7 +3294,8 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
EscapableHandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, nullptr, global_template);
- DCHECK(!context.IsEmpty());
+ DCHECK_IMPLIES(context.IsEmpty(), isolate->IsExecutionTerminating());
+ if (context.IsEmpty()) return {};
if (i::FLAG_perf_prof_annotate_wasm || i::FLAG_vtune_prof_annotate_wasm) {
isolate->SetWasmLoadSourceMapCallback(Shell::WasmLoadSourceMapCallback);
}
@@ -3563,32 +3583,12 @@ V8_NOINLINE void FuzzerMonitor::UseOfUninitializedValue() {
#endif
}
-static FILE* FOpen(const char* path, const char* mode) {
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
- FILE* result;
- if (fopen_s(&result, path, mode) == 0) {
- return result;
- } else {
- return nullptr;
- }
-#else
- FILE* file = base::Fopen(path, mode);
- if (file == nullptr) return nullptr;
- struct stat file_stat;
- if (fstat(fileno(file), &file_stat) != 0) return nullptr;
- bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
- if (is_regular_file) return file;
- base::Fclose(file);
- return nullptr;
-#endif
-}
-
char* Shell::ReadChars(const char* name, int* size_out) {
if (options.read_from_tcp_port >= 0) {
return ReadCharsFromTcpPort(name, size_out);
}
- FILE* file = FOpen(name, "rb");
+ FILE* file = base::OS::FOpen(name, "rb");
if (file == nullptr) return nullptr;
fseek(file, 0, SEEK_END);
@@ -3664,33 +3664,31 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
// Reads a file into a v8 string.
-Local<String> Shell::ReadFile(Isolate* isolate, const char* name,
- bool should_throw) {
+MaybeLocal<String> Shell::ReadFile(Isolate* isolate, const char* name,
+ bool should_throw) {
std::unique_ptr<base::OS::MemoryMappedFile> file(
base::OS::MemoryMappedFile::open(
name, base::OS::MemoryMappedFile::FileMode::kReadOnly));
if (!file) {
if (should_throw) {
std::ostringstream oss;
- oss << "Error loading file: \"" << name << '"';
+ oss << "Error loading file: " << name;
isolate->ThrowError(
- v8::String::NewFromUtf8(isolate, oss.str().c_str()).ToLocalChecked());
+ v8::String::NewFromUtf8(
+ isolate, oss.str().substr(0, String::kMaxLength).c_str())
+ .ToLocalChecked());
}
- return Local<String>();
+ return MaybeLocal<String>();
}
int size = static_cast<int>(file->size());
char* chars = static_cast<char*>(file->memory());
- Local<String> result;
if (i::FLAG_use_external_strings && i::String::IsAscii(chars, size)) {
String::ExternalOneByteStringResource* resource =
new ExternalOwningOneByteStringResource(std::move(file));
- result = String::NewExternalOneByte(isolate, resource).ToLocalChecked();
- } else {
- result = String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size)
- .ToLocalChecked();
+ return String::NewExternalOneByte(isolate, resource);
}
- return result;
+ return String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size);
}
void Shell::WriteChars(const char* name, uint8_t* buffer, size_t buffer_size) {
@@ -3984,8 +3982,8 @@ bool SourceGroup::Execute(Isolate* isolate) {
HandleScope handle_scope(isolate);
Local<String> file_name =
String::NewFromUtf8(isolate, arg).ToLocalChecked();
- Local<String> source = Shell::ReadFile(isolate, arg);
- if (source.IsEmpty()) {
+ Local<String> source;
+ if (!Shell::ReadFile(isolate, arg).ToLocal(&source)) {
printf("Error reading '%s'\n", arg);
base::OS::ExitProcess(1);
}
@@ -4195,6 +4193,10 @@ void Worker::Terminate() {
std::unique_ptr<v8::Task> task(
new TerminateTask(task_manager_, shared_from_this()));
task_runner_->PostTask(std::move(task));
+ // Also schedule an interrupt in case the worker is running code and never
+ // returning to the event queue. Since we checked the state before, and we are
+ // holding the {worker_mutex_}, it's safe to access the isolate.
+ isolate_->TerminateExecution();
}
void Worker::ProcessMessage(std::unique_ptr<SerializationData> data) {
@@ -4254,12 +4256,15 @@ void Worker::ExecuteInThread() {
D8Console console(isolate_);
Shell::Initialize(isolate_, &console, false);
- {
+ // This is not really a loop, but the loop allows us to break out of this
+ // block easily.
+ for (bool execute = true; execute; execute = false) {
Isolate::Scope iscope(isolate_);
{
HandleScope scope(isolate_);
PerIsolateData data(isolate_);
Local<Context> context = Shell::CreateEvaluationContext(isolate_);
+ if (context.IsEmpty()) break;
context_.Reset(isolate_, context);
{
Context::Scope cscope(context);
@@ -4346,6 +4351,41 @@ void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
+#if V8_TARGET_OS_WIN
+// Enable support for unicode filename path on windows.
+// We first convert ansi encoded argv[i] to utf16 encoded, and then
+// convert utf16 encoded to utf8 encoded with setting the argv[i]
+// to the utf8 encoded arg. We allocate memory for the utf8 encoded
+// arg, and we will free it and reset it to nullptr after using
+// the filename path arg. And because Execute may be called multiple
+// times, we need to free the allocated unicode filename when exit.
+
+// Save the allocated utf8 filenames, and we will free them when exit.
+std::vector<char*> utf8_filenames;
+#include <shellapi.h>
+// Convert utf-16 encoded string to utf-8 encoded.
+char* ConvertUtf16StringToUtf8(const wchar_t* str) {
+ // On Windows wchar_t must be a 16-bit value.
+ static_assert(sizeof(wchar_t) == 2, "wrong wchar_t size");
+ int len =
+ WideCharToMultiByte(CP_UTF8, 0, str, -1, nullptr, 0, nullptr, FALSE);
+ DCHECK_LT(0, len);
+ char* utf8_str = new char[len];
+ utf8_filenames.push_back(utf8_str);
+ WideCharToMultiByte(CP_UTF8, 0, str, -1, utf8_str, len, nullptr, FALSE);
+ return utf8_str;
+}
+
+// Convert ansi encoded argv[i] to utf8 encoded.
+void PreProcessUnicodeFilenameArg(char* argv[], int i) {
+ int argc;
+ wchar_t** wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
+ argv[i] = ConvertUtf16StringToUtf8(wargv[i]);
+ LocalFree(wargv);
+}
+
+#endif
+
bool Shell::SetOptions(int argc, char* argv[]) {
bool logfile_per_isolate = false;
bool no_always_opt = false;
@@ -4518,6 +4558,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.cpu_profiler = true;
options.cpu_profiler_print = true;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--stress-deserialize") == 0) {
+ options.stress_deserialize = true;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--web-snapshot-config=", 22) == 0) {
options.web_snapshot_config = argv[i] + 22;
argv[i] = nullptr;
@@ -4569,6 +4612,10 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--expose-fast-api") == 0) {
options.expose_fast_api = true;
argv[i] = nullptr;
+ } else {
+#ifdef V8_TARGET_OS_WIN
+ PreProcessUnicodeFilenameArg(argv, i);
+#endif
}
}
@@ -5003,7 +5050,8 @@ class Serializer : public ValueSerializer::Delegate {
Local<ArrayBuffer> array_buffer =
Local<ArrayBuffer>::New(isolate_, global_array_buffer);
if (!array_buffer->IsDetachable()) {
- isolate_->ThrowError("ArrayBuffer could not be transferred");
+ isolate_->ThrowError(
+ "ArrayBuffer is not detachable and could not be transferred");
return Nothing<bool>();
}
@@ -5357,6 +5405,9 @@ int Shell::Main(int argc, char* argv[]) {
if (HasFlagThatRequiresSharedIsolate()) {
Isolate::CreateParams shared_create_params;
+ shared_create_params.constraints.ConfigureDefaults(
+ base::SysInfo::AmountOfPhysicalMemory(),
+ base::SysInfo::AmountOfVirtualMemory());
shared_create_params.array_buffer_allocator = Shell::array_buffer_allocator;
shared_isolate =
reinterpret_cast<Isolate*>(i::Isolate::NewShared(shared_create_params));
@@ -5549,6 +5600,16 @@ int Shell::Main(int argc, char* argv[]) {
tracing_controller->StopTracing();
}
g_platform.reset();
+
+#ifdef V8_TARGET_OS_WIN
+ // We need to free the allocated utf8 filenames in
+ // PreProcessUnicodeFilenameArg.
+ for (char* utf8_str : utf8_filenames) {
+ delete[] utf8_str;
+ }
+ utf8_filenames.clear();
+#endif
+
return result;
}
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 7c42c5e7e2..d0bb1c8fdf 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -259,8 +259,11 @@ class Worker : public std::enable_shared_from_this<Worker> {
// need locking, but accessing the Worker's data member does.)
base::Mutex worker_mutex_;
- // Only accessed by the worker thread.
+ // The isolate should only be accessed by the worker itself, or when holding
+ // the worker_mutex_ and after checking the worker state.
Isolate* isolate_ = nullptr;
+
+ // Only accessed by the worker thread.
v8::Persistent<v8::Context> context_;
};
@@ -464,6 +467,8 @@ class ShellOptions {
"web-snapshot-output", nullptr};
DisallowReassignment<bool> d8_web_snapshot_api = {
"experimental-d8-web-snapshot-api", false};
+ // Applies to web snapshot and JSON deserialization.
+ DisallowReassignment<bool> stress_deserialize = {"stress-deserialize", false};
DisallowReassignment<bool> compile_only = {"compile-only", false};
DisallowReassignment<int> repeat_compile = {"repeat-compile", 1};
#if V8_ENABLE_WEBASSEMBLY
@@ -495,8 +500,8 @@ class Shell : public i::AllStatic {
static void ReportException(Isolate* isolate, Local<Message> message,
Local<Value> exception);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
- static Local<String> ReadFile(Isolate* isolate, const char* name,
- bool should_throw = true);
+ static MaybeLocal<String> ReadFile(Isolate* isolate, const char* name,
+ bool should_throw = true);
static Local<String> WasmLoadSourceMapCallback(Isolate* isolate,
const char* name);
static Local<Context> CreateEvaluationContext(Isolate* isolate);
@@ -552,6 +557,9 @@ class Shell : public i::AllStatic {
static void TestVerifySourcePositions(
const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void InstallConditionalFeatures(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
static void AsyncHooksCreateHook(
const v8::FunctionCallbackInfo<v8::Value>& args);
static void AsyncHooksExecutionAsyncId(
@@ -691,6 +699,10 @@ class Shell : public i::AllStatic {
static Local<FunctionTemplate> CreateSnapshotTemplate(Isolate* isolate);
private:
+ static inline int DeserializationRunCount() {
+ return options.stress_deserialize ? 1000 : 1;
+ }
+
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
static Global<Function> stringify_function_;
diff --git a/deps/v8/src/date/date.cc b/deps/v8/src/date/date.cc
index 9b0665aba0..5749c9833e 100644
--- a/deps/v8/src/date/date.cc
+++ b/deps/v8/src/date/date.cc
@@ -10,6 +10,7 @@
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif
+#include "src/strings/string-stream.h"
namespace v8 {
namespace internal {
@@ -533,5 +534,65 @@ double MakeTime(double hour, double min, double sec, double ms) {
return std::numeric_limits<double>::quiet_NaN();
}
+namespace {
+
+const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
+ "Thu", "Fri", "Sat"};
+const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
+
+template <class... Args>
+DateBuffer FormatDate(const char* format, Args... args) {
+ DateBuffer buffer;
+ SmallStringOptimizedAllocator<DateBuffer::kInlineSize> allocator(&buffer);
+ StringStream sstream(&allocator);
+ sstream.Add(format, args...);
+ buffer.resize_no_init(sstream.length());
+ return buffer;
+}
+
+} // namespace
+
+DateBuffer ToDateString(double time_val, DateCache* date_cache,
+ ToDateStringMode mode) {
+ if (std::isnan(time_val)) {
+ return FormatDate("Invalid Date");
+ }
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = mode != ToDateStringMode::kUTCDateAndTime
+ ? date_cache->ToLocal(time_ms)
+ : time_ms;
+ int year, month, day, weekday, hour, min, sec, ms;
+ date_cache->BreakDownTime(local_time_ms, &year, &month, &day, &weekday, &hour,
+ &min, &sec, &ms);
+ int timezone_offset = -date_cache->TimezoneOffset(time_ms);
+ int timezone_hour = std::abs(timezone_offset) / 60;
+ int timezone_min = std::abs(timezone_offset) % 60;
+ const char* local_timezone = date_cache->LocalTimezone(time_ms);
+ switch (mode) {
+ case ToDateStringMode::kLocalDate:
+ return FormatDate((year < 0) ? "%s %s %02d %05d" : "%s %s %02d %04d",
+ kShortWeekDays[weekday], kShortMonths[month], day,
+ year);
+ case ToDateStringMode::kLocalTime:
+ return FormatDate("%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
+ (timezone_offset < 0) ? '-' : '+', timezone_hour,
+ timezone_min, local_timezone);
+ case ToDateStringMode::kLocalDateAndTime:
+ return FormatDate(
+ (year < 0) ? "%s %s %02d %05d %02d:%02d:%02d GMT%c%02d%02d (%s)"
+ : "%s %s %02d %04d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+ kShortWeekDays[weekday], kShortMonths[month], day, year, hour, min,
+ sec, (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
+ local_timezone);
+ case ToDateStringMode::kUTCDateAndTime:
+ return FormatDate((year < 0) ? "%s, %02d %s %05d %02d:%02d:%02d GMT"
+ : "%s, %02d %s %04d %02d:%02d:%02d GMT",
+ kShortWeekDays[weekday], day, kShortMonths[month], year,
+ hour, min, sec);
+ }
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/date/date.h b/deps/v8/src/date/date.h
index 734ab3a26f..703cd45e15 100644
--- a/deps/v8/src/date/date.h
+++ b/deps/v8/src/date/date.h
@@ -5,6 +5,7 @@
#ifndef V8_DATE_DATE_H_
#define V8_DATE_DATE_H_
+#include "src/base/small-vector.h"
#include "src/base/timezone-cache.h"
#include "src/common/globals.h"
#include "src/objects/smi.h"
@@ -247,6 +248,19 @@ double MakeDay(double year, double month, double date);
// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
double MakeTime(double hour, double min, double sec, double ms);
+using DateBuffer = base::SmallVector<char, 128>;
+
+enum class ToDateStringMode {
+ kLocalDate,
+ kLocalTime,
+ kLocalDateAndTime,
+ kUTCDateAndTime,
+};
+
+// ES6 section 20.3.4.41.1 ToDateString(tv)
+DateBuffer ToDateString(double time_val, DateCache* date_cache,
+ ToDateStringMode mode);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 184929c80b..bb9183f61e 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -31,7 +31,7 @@ static MaybeHandle<SharedFunctionInfo> GetFunctionInfo(Isolate* isolate,
Handle<String> source,
REPLMode repl_mode) {
ScriptDetails script_details(isolate->factory()->empty_string(),
- ScriptOriginOptions(false, true));
+ ScriptOriginOptions(true, true));
script_details.repl_mode = repl_mode;
return Compiler::GetSharedFunctionInfoForScript(
isolate, source, script_details, ScriptCompiler::kNoCompileOptions,
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 1a9be54893..469d4a74c5 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -7,6 +7,7 @@
#include <vector>
+#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
@@ -23,9 +24,9 @@ class FrameInspector;
class DebugEvaluate : public AllStatic {
public:
- static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
- debug::EvaluateGlobalMode mode,
- REPLMode repl_mode = REPLMode::kNo);
+ static V8_EXPORT_PRIVATE MaybeHandle<Object> Global(
+ Isolate* isolate, Handle<String> source, debug::EvaluateGlobalMode mode,
+ REPLMode repl_mode = REPLMode::kNo);
static V8_EXPORT_PRIVATE MaybeHandle<Object> Global(
Isolate* isolate, Handle<JSFunction> function,
@@ -39,10 +40,11 @@ class DebugEvaluate : public AllStatic {
// The stack frame can be either a JavaScript stack frame or a Wasm
// stack frame. In the latter case, a special Debug Proxy API is
// provided to peek into the Wasm state.
- static MaybeHandle<Object> Local(Isolate* isolate, StackFrameId frame_id,
- int inlined_jsframe_index,
- Handle<String> source,
- bool throw_on_side_effect);
+ static V8_EXPORT_PRIVATE MaybeHandle<Object> Local(Isolate* isolate,
+ StackFrameId frame_id,
+ int inlined_jsframe_index,
+ Handle<String> source,
+ bool throw_on_side_effect);
// This is used for break-at-entry for builtins and API functions.
// Evaluate a piece of JavaScript in the native context, but with the
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index a09a21252a..8869c97f4a 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -9,6 +9,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/compiler.h"
#include "src/codegen/script-details.h"
+#include "src/date/date.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-property-iterator.h"
@@ -49,6 +50,51 @@ v8_inspector::V8Inspector* GetInspector(Isolate* isolate) {
return reinterpret_cast<i::Isolate*>(isolate)->inspector();
}
+Local<String> GetBigIntDescription(Isolate* isolate, Local<BigInt> bigint) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::BigInt> i_bigint = Utils::OpenHandle(*bigint);
+ // For large BigInts computing the decimal string representation
+ // can take a long time, so we go with hexadecimal in that case.
+ int radix = (i_bigint->Words64Count() > 100 * 1000) ? 16 : 10;
+ i::Handle<i::String> string =
+ i::BigInt::ToString(i_isolate, i_bigint, radix, i::kDontThrow)
+ .ToHandleChecked();
+ if (radix == 16) {
+ if (i_bigint->IsNegative()) {
+ string = i_isolate->factory()
+ ->NewConsString(
+ i_isolate->factory()->NewStringFromAsciiChecked("-0x"),
+ i_isolate->factory()->NewProperSubString(
+ string, 1, string->length() - 1))
+ .ToHandleChecked();
+ } else {
+ string =
+ i_isolate->factory()
+ ->NewConsString(
+ i_isolate->factory()->NewStringFromAsciiChecked("0x"), string)
+ .ToHandleChecked();
+ }
+ }
+ i::Handle<i::String> description =
+ i_isolate->factory()
+ ->NewConsString(
+ string,
+ i_isolate->factory()->LookupSingleCharacterStringFromCode('n'))
+ .ToHandleChecked();
+ return Utils::ToLocal(description);
+}
+
+Local<String> GetDateDescription(Local<Date> date) {
+ auto receiver = Utils::OpenHandle(*date);
+ i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(receiver);
+ i::Isolate* isolate = jsdate->GetIsolate();
+ auto buffer = i::ToDateString(jsdate->value().Number(), isolate->date_cache(),
+ i::ToDateStringMode::kLocalDateAndTime);
+ return Utils::ToLocal(isolate->factory()
+ ->NewStringFromUtf8(base::VectorOf(buffer))
+ .ToHandleChecked());
+}
+
Local<String> GetFunctionDescription(Local<Function> function) {
auto receiver = Utils::OpenHandle(*function);
if (receiver->IsJSBoundFunction()) {
@@ -137,7 +183,7 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> object,
std::vector<Local<Value>>* names_out,
std::vector<Local<Value>>* values_out) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- LOG_API(isolate, debug, GetPrivateMembers);
+ API_RCS_SCOPE(isolate, debug, GetPrivateMembers);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::JSReceiver> receiver = Utils::OpenHandle(*object);
i::Handle<i::JSArray> names;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index e099cb699a..3681ab0764 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -8,6 +8,7 @@
#include <memory>
#include "include/v8-callbacks.h"
+#include "include/v8-date.h"
#include "include/v8-debug.h"
#include "include/v8-embedder-heap.h"
#include "include/v8-local-handle.h"
@@ -49,6 +50,12 @@ int GetContextId(Local<Context> context);
void SetInspector(Isolate* isolate, v8_inspector::V8Inspector*);
v8_inspector::V8Inspector* GetInspector(Isolate* isolate);
+// Returns a debug string representation of the bigint.
+Local<String> GetBigIntDescription(Isolate* isolate, Local<BigInt> bigint);
+
+// Returns a debug string representation of the date.
+Local<String> GetDateDescription(Local<Date> date);
+
// Returns a debug string representation of the function.
Local<String> GetFunctionDescription(Local<Function> function);
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index d19cd0712e..079e60e9d4 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -390,6 +390,7 @@ void Debug::ThreadInit() {
static_cast<base::AtomicWord>(0));
thread_local_.break_on_next_function_call_ = false;
UpdateHookOnFunctionCall();
+ thread_local_.promise_stack_ = Smi::zero();
}
char* Debug::ArchiveDebug(char* storage) {
@@ -448,6 +449,8 @@ void Debug::Iterate(RootVisitor* v, ThreadLocal* thread_local_data) {
v->VisitRootPointer(
Root::kDebug, nullptr,
FullObjectSlot(&thread_local_data->ignore_step_into_function_));
+ v->VisitRootPointer(Root::kDebug, nullptr,
+ FullObjectSlot(&thread_local_data->promise_stack_));
}
DebugInfoListNode::DebugInfoListNode(Isolate* isolate, DebugInfo debug_info)
@@ -469,7 +472,6 @@ void Debug::Unload() {
ClearStepping();
RemoveAllCoverageInfos();
ClearAllDebuggerHints();
- ClearGlobalPromiseStack();
debug_delegate_ = nullptr;
}
@@ -2067,11 +2069,6 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
delete node;
}
-void Debug::ClearGlobalPromiseStack() {
- while (isolate_->PopPromise()) {
- }
-}
-
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
HandleScope scope(isolate_);
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 2747dab566..ce3d52ab58 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -490,8 +490,6 @@ class V8_EXPORT_PRIVATE Debug {
DebugInfoListNode** curr);
void FreeDebugInfoListNode(DebugInfoListNode* prev, DebugInfoListNode* node);
- void ClearGlobalPromiseStack();
-
void SetTemporaryObjectTrackingDisabled(bool disabled);
bool GetTemporaryObjectTrackingDisabled() const;
@@ -569,6 +567,10 @@ class V8_EXPORT_PRIVATE Debug {
// This flag is true when SetBreakOnNextFunctionCall is called and it forces
// debugger to break on next function call.
bool break_on_next_function_call_;
+
+ // Throwing an exception may cause a Promise rejection. For this purpose
+ // we keep track of a stack of nested promises.
+ Object promise_stack_;
};
static void Iterate(RootVisitor* v, ThreadLocal* thread_local_data);
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index 0337d2d291..de83ef1275 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -16,11 +16,9 @@ namespace internal {
0x1000)
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
#undef ASSERT_OFFSET
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kEagerDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index c695347a0b..06d5f035e6 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -9,8 +9,7 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = kInstrSize;
+const int Deoptimizer::kEagerDeoptExitSize = kInstrSize;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
#else
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index e30df04a64..ed78a06b35 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -95,7 +95,7 @@ class FrameWriter {
const char* debug_hint = "") {
Object obj = iterator->GetRawValue();
PushRawObject(obj, debug_hint);
- if (trace_scope_) {
+ if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " (input #%d)\n", iterator.input_index());
}
deoptimizer_->QueueValueForMaterialization(output_address(top_offset_), obj,
@@ -178,7 +178,7 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_.IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
Isolate* isolate = isolate_;
- NativeContext native_context = function_.context().native_context();
+ NativeContext native_context = function_.native_context();
Object element = native_context.DeoptimizedCodeListHead();
while (!element.IsUndefined(isolate)) {
Code code = FromCodeT(CodeT::cast(element));
@@ -193,11 +193,11 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
- unsigned deopt_exit_index, Address from,
- int fp_to_sp_delta, Isolate* isolate) {
+ Address from, int fp_to_sp_delta,
+ Isolate* isolate) {
JSFunction function = JSFunction::cast(Object(raw_function));
- Deoptimizer* deoptimizer = new Deoptimizer(
- isolate, function, kind, deopt_exit_index, from, fp_to_sp_delta);
+ Deoptimizer* deoptimizer =
+ new Deoptimizer(isolate, function, kind, from, fp_to_sp_delta);
isolate->set_current_deoptimizer(deoptimizer);
return deoptimizer;
}
@@ -377,8 +377,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
isolate->heap()->InvalidateCodeDeoptimizationData(code);
}
- native_context.GetOSROptimizedCodeCache().EvictMarkedCode(
- native_context.GetIsolate());
+ native_context.osr_code_cache().EvictDeoptimizedCode(isolate);
}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
@@ -393,7 +392,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
while (!context.IsUndefined(isolate)) {
NativeContext native_context = NativeContext::cast(context);
MarkAllCodeForContext(native_context);
- OSROptimizedCodeCache::Clear(native_context);
+ OSROptimizedCodeCache::Clear(isolate, native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link();
}
@@ -442,17 +441,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
// be different from the code on the function - evict it if necessary.
function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
function.shared(), "unlinking code marked for deopt");
- if (!code.deopt_already_counted()) {
- code.set_deopt_already_counted(true);
- }
- DeoptimizeMarkedCodeForContext(function.context().native_context());
+ DeoptimizeMarkedCodeForContext(function.native_context());
// TODO(mythria): Ideally EvictMarkCode should compact the cache without
// having to explicitly call this. We don't do this currently because
// compacting causes GC and DeoptimizeMarkedCodeForContext uses raw
// pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove
// this call from here.
OSROptimizedCodeCache::Compact(
- Handle<NativeContext>(function.context().native_context(), isolate));
+ isolate, Handle<NativeContext>(function.native_context(), isolate));
}
}
@@ -464,19 +460,16 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
return "deopt-eager";
- case DeoptimizeKind::kSoft:
- return "deopt-soft";
case DeoptimizeKind::kLazy:
return "deopt-lazy";
}
}
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
- DeoptimizeKind kind, unsigned deopt_exit_index,
- Address from, int fp_to_sp_delta)
+ DeoptimizeKind kind, Address from, int fp_to_sp_delta)
: isolate_(isolate),
function_(function),
- deopt_exit_index_(deopt_exit_index),
+ deopt_exit_index_(kFixedExitSizeMarker),
deopt_kind_(kind),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
@@ -500,9 +493,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
deoptimizing_throw_ = true;
}
- DCHECK(deopt_exit_index_ == kFixedExitSizeMarker ||
- deopt_exit_index_ < kMaxNumberOfEntries);
-
DCHECK_NE(from, kNullAddress);
compiled_code_ = FindOptimizedCode();
DCHECK(!compiled_code_.is_null());
@@ -513,11 +503,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
disallow_garbage_collection_ = new DisallowGarbageCollection();
#endif // DEBUG
CHECK(CodeKindCanDeoptimize(compiled_code_.kind()));
- if (!compiled_code_.deopt_already_counted() &&
- deopt_kind_ == DeoptimizeKind::kSoft) {
- isolate->counters()->soft_deopts_executed()->Increment();
- }
- compiled_code_.set_deopt_already_counted(true);
{
HandleScope scope(isolate_);
PROFILE(isolate_, CodeDeoptEvent(handle(compiled_code_, isolate_), kind,
@@ -528,38 +513,35 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
function.shared().internal_formal_parameter_count_with_receiver();
input_ = new (size) FrameDescription(size, parameter_count);
- if (kSupportsFixedDeoptExitSizes) {
- DCHECK_EQ(deopt_exit_index_, kFixedExitSizeMarker);
- // Calculate the deopt exit index from return address.
- DCHECK_GT(kNonLazyDeoptExitSize, 0);
- DCHECK_GT(kLazyDeoptExitSize, 0);
- DeoptimizationData deopt_data =
- DeoptimizationData::cast(compiled_code_.deoptimization_data());
- Address deopt_start = compiled_code_.raw_instruction_start() +
- deopt_data.DeoptExitStart().value();
- int non_lazy_deopt_count = deopt_data.NonLazyDeoptCount().value();
- Address lazy_deopt_start =
- deopt_start + non_lazy_deopt_count * kNonLazyDeoptExitSize;
- // The deoptimization exits are sorted so that lazy deopt exits appear after
- // eager deopts.
- static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
- static_cast<int>(kLastDeoptimizeKind),
- "lazy deopts are expected to be emitted last");
- // from_ is the value of the link register after the call to the
- // deoptimizer, so for the last lazy deopt, from_ points to the first
- // non-lazy deopt, so we use <=, similarly for the last non-lazy deopt and
- // the first deopt with resume entry.
- if (from_ <= lazy_deopt_start) {
- int offset =
- static_cast<int>(from_ - kNonLazyDeoptExitSize - deopt_start);
- DCHECK_EQ(0, offset % kNonLazyDeoptExitSize);
- deopt_exit_index_ = offset / kNonLazyDeoptExitSize;
- } else {
- int offset =
- static_cast<int>(from_ - kLazyDeoptExitSize - lazy_deopt_start);
- DCHECK_EQ(0, offset % kLazyDeoptExitSize);
- deopt_exit_index_ = non_lazy_deopt_count + (offset / kLazyDeoptExitSize);
- }
+ DCHECK_EQ(deopt_exit_index_, kFixedExitSizeMarker);
+ // Calculate the deopt exit index from return address.
+ DCHECK_GT(kEagerDeoptExitSize, 0);
+ DCHECK_GT(kLazyDeoptExitSize, 0);
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(compiled_code_.deoptimization_data());
+ Address deopt_start = compiled_code_.raw_instruction_start() +
+ deopt_data.DeoptExitStart().value();
+ int eager_deopt_count = deopt_data.EagerDeoptCount().value();
+ Address lazy_deopt_start =
+ deopt_start + eager_deopt_count * kEagerDeoptExitSize;
+ // The deoptimization exits are sorted so that lazy deopt exits appear after
+ // eager deopts.
+ static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
+ static_cast<int>(kLastDeoptimizeKind),
+ "lazy deopts are expected to be emitted last");
+ // from_ is the value of the link register after the call to the
+ // deoptimizer, so for the last lazy deopt, from_ points to the first
+ // non-lazy deopt, so we use <=, similarly for the last non-lazy deopt and
+ // the first deopt with resume entry.
+ if (from_ <= lazy_deopt_start) {
+ int offset = static_cast<int>(from_ - kEagerDeoptExitSize - deopt_start);
+ DCHECK_EQ(0, offset % kEagerDeoptExitSize);
+ deopt_exit_index_ = offset / kEagerDeoptExitSize;
+ } else {
+ int offset =
+ static_cast<int>(from_ - kLazyDeoptExitSize - lazy_deopt_start);
+ DCHECK_EQ(0, offset % kLazyDeoptExitSize);
+ deopt_exit_index_ = eager_deopt_count + (offset / kLazyDeoptExitSize);
}
}
@@ -580,6 +562,7 @@ Handle<Code> Deoptimizer::compiled_code() const {
Deoptimizer::~Deoptimizer() {
DCHECK(input_ == nullptr && output_ == nullptr);
DCHECK_NULL(disallow_garbage_collection_);
+ delete trace_scope_;
}
void Deoptimizer::DeleteFrameDescriptions() {
@@ -602,8 +585,6 @@ Builtin Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
return Builtin::kDeoptimizationEntry_Eager;
- case DeoptimizeKind::kSoft:
- return Builtin::kDeoptimizationEntry_Soft;
case DeoptimizeKind::kLazy:
return Builtin::kDeoptimizationEntry_Lazy;
}
@@ -618,9 +599,6 @@ bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
case Builtin::kDeoptimizationEntry_Eager:
*type_out = DeoptimizeKind::kEager;
return true;
- case Builtin::kDeoptimizationEntry_Soft:
- *type_out = DeoptimizeKind::kSoft;
- return true;
case Builtin::kDeoptimizationEntry_Lazy:
*type_out = DeoptimizeKind::kLazy;
return true;
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 36e85480be..47c05b491e 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -56,8 +56,7 @@ class Deoptimizer : public Malloced {
DeoptimizeKind deopt_kind() const { return deopt_kind_; }
static Deoptimizer* New(Address raw_function, DeoptimizeKind kind,
- unsigned deopt_exit_index, Address from,
- int fp_to_sp_delta, Isolate* isolate);
+ Address from, int fp_to_sp_delta, Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
// The returned object with information on the optimized frame needs to be
@@ -118,21 +117,12 @@ class Deoptimizer : public Malloced {
static constexpr int kMaxNumberOfEntries = 16384;
// This marker is passed to Deoptimizer::New as {deopt_exit_index} on
- // platforms that have fixed deopt sizes (see also
- // kSupportsFixedDeoptExitSizes). The actual deoptimization id is then
+ // platforms that have fixed deopt sizes. The actual deoptimization id is then
// calculated from the return address.
static constexpr unsigned kFixedExitSizeMarker = kMaxUInt32;
- // Set to true when the architecture supports deoptimization exit sequences
- // of a fixed size, that can be sorted so that the deoptimization index is
- // deduced from the address of the deoptimization exit.
- // TODO(jgruber): Remove this, and support for variable deopt exit sizes,
- // once all architectures use fixed exit sizes.
- V8_EXPORT_PRIVATE static const bool kSupportsFixedDeoptExitSizes;
-
- // Size of deoptimization exit sequence. This is only meaningful when
- // kSupportsFixedDeoptExitSizes is true.
- V8_EXPORT_PRIVATE static const int kNonLazyDeoptExitSize;
+ // Size of deoptimization exit sequence.
+ V8_EXPORT_PRIVATE static const int kEagerDeoptExitSize;
V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
// Tracing.
@@ -145,7 +135,7 @@ class Deoptimizer : public Malloced {
const TranslatedFrame::iterator& iterator);
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
- unsigned deopt_exit_index, Address from, int fp_to_sp_delta);
+ Address from, int fp_to_sp_delta);
Code FindOptimizedCode();
void DeleteFrameDescriptions();
@@ -182,11 +172,11 @@ class Deoptimizer : public Malloced {
Code FindDeoptimizingCode(Address addr);
// Tracing.
- bool tracing_enabled() const { return static_cast<bool>(trace_scope_); }
+ bool tracing_enabled() const { return trace_scope_ != nullptr; }
bool verbose_tracing_enabled() const {
- return FLAG_trace_deopt_verbose && trace_scope_;
+ return FLAG_trace_deopt_verbose && tracing_enabled();
}
- CodeTracer::Scope* trace_scope() const { return trace_scope_.get(); }
+ CodeTracer::Scope* trace_scope() const { return trace_scope_; }
CodeTracer::Scope* verbose_trace_scope() const {
return FLAG_trace_deopt_verbose ? trace_scope() : nullptr;
}
@@ -239,7 +229,9 @@ class Deoptimizer : public Malloced {
DisallowGarbageCollection* disallow_garbage_collection_;
#endif // DEBUG
- std::unique_ptr<CodeTracer::Scope> trace_scope_;
+ // Note: This is intentionally not a unique_ptr s.t. the Deoptimizer
+ // satisfies is_standard_layout, needed for offsetof().
+ CodeTracer::Scope* const trace_scope_;
friend class DeoptimizedFrameInfo;
friend class FrameDescription;
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 4fcb22c209..7507ba279f 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -9,8 +9,7 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 5;
+const int Deoptimizer::kEagerDeoptExitSize = 5;
const int Deoptimizer::kLazyDeoptExitSize = 5;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
index 73d71036ed..f513e3fbab 100644
--- a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
+++ b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
@@ -7,8 +7,7 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kEagerDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index c20b5c5ecf..62a7f45788 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -7,8 +7,7 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kEagerDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index c20b5c5ecf..62a7f45788 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -7,8 +7,7 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kEagerDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index c315743111..5a2557c24d 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -16,11 +16,9 @@ namespace internal {
0x1000)
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
#undef ASSERT_OFFSET
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kEagerDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc b/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
index 73d71036ed..f513e3fbab 100644
--- a/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
+++ b/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
@@ -7,8 +7,7 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kEagerDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 9db7bf722f..95028c0e65 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -16,11 +16,9 @@ namespace internal {
0x1000)
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
#undef ASSERT_OFFSET
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 6 + 2;
+const int Deoptimizer::kEagerDeoptExitSize = 6 + 2;
const int Deoptimizer::kLazyDeoptExitSize = 6 + 2;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 1fba0c6e2d..29c56e61ac 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -18,11 +18,9 @@ namespace internal {
0x7F)
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Eager);
ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
-ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Soft);
#undef ASSERT_OFFSET
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 4;
+const int Deoptimizer::kEagerDeoptExitSize = 4;
const int Deoptimizer::kLazyDeoptExitSize = 4;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 3e4dc1b477..42e9dff7e3 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -369,7 +369,7 @@ void VerifyJSObjectElements(Isolate* isolate, JSObject object) {
if (object.IsJSTypedArray()) {
// TODO(bmeurer,v8:4153): Fix CreateTypedArray to either not instantiate
// the object or propertly initialize it on errors during construction.
- /* CHECK(object->HasTypedArrayElements()); */
+ /* CHECK(object->HasTypedArrayOrRabGsabTypedArrayElements()); */
return;
}
CHECK(!object.elements().IsByteArray());
@@ -990,9 +990,23 @@ void JSGlobalObject::JSGlobalObjectVerify(Isolate* isolate) {
}
void Oddball::OddballVerify(Isolate* isolate) {
- TorqueGeneratedOddball::OddballVerify(isolate);
+ PrimitiveHeapObjectVerify(isolate);
+ CHECK(IsOddball(isolate));
+
Heap* heap = isolate->heap();
+ Object string = to_string();
+ VerifyPointer(isolate, string);
+ CHECK(string.IsString());
+ Object type = type_of();
+ VerifyPointer(isolate, type);
+ CHECK(type.IsString());
+ Object kind_value = TaggedField<Object>::load(*this, kKindOffset);
+ VerifyPointer(isolate, kind_value);
+ CHECK(kind_value.IsSmi());
+
Object number = to_number();
+ VerifyPointer(isolate, number);
+ CHECK(number.IsSmi() || number.IsHeapNumber());
if (number.IsHeapObject()) {
CHECK(number == ReadOnlyRoots(heap).nan_value() ||
number == ReadOnlyRoots(heap).hole_nan_value());
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index d89f9b4723..2ec89d5896 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -1204,7 +1204,7 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) {
} else {
os << "\n - no optimized code";
}
- os << "\n - optimization marker: " << optimization_marker();
+ os << "\n - tiering state: " << tiering_state();
os << "\n - maybe has optimized code: " << maybe_has_optimized_code();
os << "\n - invocation count: " << invocation_count();
os << "\n - profiler ticks: " << profiler_ticks();
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index 4bd99cbaca..d222c11336 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -217,6 +217,7 @@ void PerfJitLogger::LogRecordedBuffer(
if (FLAG_perf_basic_prof_only_functions &&
(abstract_code->kind() != CodeKind::INTERPRETED_FUNCTION &&
abstract_code->kind() != CodeKind::TURBOFAN &&
+ abstract_code->kind() != CodeKind::MAGLEV &&
abstract_code->kind() != CodeKind::BASELINE)) {
return;
}
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index 03868d5357..012e2ae400 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -61,6 +61,16 @@ class Decoder {
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(byte* instruction);
+ // Prefixed instructions.
+ enum PrefixType { not_prefixed, is_prefixed };
+ // static is used to retain values even with new instances.
+ static PrefixType PrefixStatus;
+ static uint64_t PrefixValue;
+ uint64_t GetPrefixValue();
+ void SetAsPrefixed(uint64_t v);
+ void ResetPrefix();
+ bool IsPrefixed();
+
private:
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
@@ -82,6 +92,7 @@ class Decoder {
void Unknown(Instruction* instr);
void UnknownFormat(Instruction* instr, const char* opcname);
+ void DecodeExtP(Instruction* instr);
void DecodeExt0(Instruction* instr);
void DecodeExt1(Instruction* instr);
void DecodeExt2(Instruction* instr);
@@ -95,6 +106,25 @@ class Decoder {
int out_buffer_pos_;
};
+// Define Prefix functions and values.
+// static
+Decoder::PrefixType Decoder::PrefixStatus = not_prefixed;
+uint64_t Decoder::PrefixValue = 0;
+
+uint64_t Decoder::GetPrefixValue() { return PrefixValue; }
+
+void Decoder::SetAsPrefixed(uint64_t v) {
+ PrefixStatus = is_prefixed;
+ PrefixValue = v;
+}
+
+void Decoder::ResetPrefix() {
+ PrefixStatus = not_prefixed;
+ PrefixValue = 0;
+}
+
+bool Decoder::IsPrefixed() { return PrefixStatus == is_prefixed; }
+
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
@@ -255,9 +285,16 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return FormatVectorRegister(instr, format);
}
case 'i': { // int16
- int32_t value = (instr->Bits(15, 0) << 16) >> 16;
+ int64_t value;
+ uint32_t imm_value = instr->Bits(15, 0);
+ if (IsPrefixed()) {
+ uint64_t prefix_value = GetPrefixValue();
+ value = SIGN_EXT_IMM34((prefix_value << 16) | imm_value);
+ } else {
+ value = (static_cast<int64_t>(imm_value) << 48) >> 48;
+ }
out_buffer_pos_ +=
- base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "%ld", value);
return 5;
}
case 'I': { // IMM8
@@ -425,6 +462,83 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
Format(instr, buffer);
}
+void Decoder::DecodeExtP(Instruction* instr) {
+ switch (EXTP | (instr->BitField(25, 25))) {
+ case PLOAD_STORE_8LS:
+ case PLOAD_STORE_MLS: {
+ // TODO(miladfarca): Decode the R bit.
+ DCHECK_NE(instr->Bit(20), 1);
+ // Read prefix.
+ SetAsPrefixed(instr->Bits(17, 0));
+ // Read suffix (next instruction).
+ Instruction* next_instr =
+ bit_cast<Instruction*>(bit_cast<intptr_t>(instr) + kInstrSize);
+ switch (next_instr->OpcodeBase()) {
+ // Prefixed ADDI.
+ case (ADDI): {
+ if (next_instr->RAValue() == 0) {
+ // This is load immediate prefixed.
+ Format(instr, "pli");
+ Format(next_instr, " 'rt, ");
+ } else {
+ Format(instr, "paddi");
+ Format(next_instr, " 'rt, 'ra, ");
+ }
+ Format(next_instr, "'int34");
+ break;
+ }
+ // Prefixed LBZ.
+ case LBZ: {
+ Format(next_instr, "plbz 'rt, 'int34('ra)");
+ break;
+ }
+ // Prefixed LHZ.
+ case LHZ: {
+ Format(next_instr, "plhz 'rt, 'int34('ra)");
+ break;
+ }
+ // Prefixed LHA.
+ case LHA: {
+ Format(next_instr, "plha 'rt, 'int34('ra)");
+ break;
+ }
+ // Prefixed LWZ.
+ case LWZ: {
+ Format(next_instr, "plwz 'rt, 'int34('ra)");
+ break;
+ }
+ // Prefixed LWA.
+ case PPLWA: {
+ Format(next_instr, "plwa 'rt, 'int34('ra)");
+ break;
+ }
+ // Prefixed LD.
+ case PPLD: {
+ Format(next_instr, "pld 'rt, 'int34('ra)");
+ break;
+ }
+ // Prefixed LFS.
+ case LFS: {
+ Format(next_instr, "plfs 'Dt, 'int34('ra)");
+ break;
+ }
+ // Prefixed LFD.
+ case LFD: {
+ Format(next_instr, "plfd 'Dt, 'int34('ra)");
+ break;
+ }
+ default: {
+ Unknown(instr);
+ }
+ }
+ break;
+ }
+ default: {
+ Unknown(instr);
+ }
+ }
+}
+
void Decoder::DecodeExt0(Instruction* instr) {
// Some encodings have integers hard coded in the middle, handle those first.
switch (EXT0 | (instr->BitField(20, 16)) | (instr->BitField(10, 0))) {
@@ -1432,9 +1546,21 @@ void Decoder::DecodeExt6(Instruction* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
+
+ uint32_t opcode = instr->OpcodeValue() << 26;
// Print raw instruction bytes.
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ", instr->InstructionBits());
+ if (opcode != EXTP) {
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ", instr->InstructionBits());
+ } else {
+ // Prefixed instructions have a 4-byte prefix and a 4-byte suffix. Print
+ // both on the same line.
+ Instruction* next_instr =
+ bit_cast<Instruction*>(bit_cast<intptr_t>(instr) + kInstrSize);
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "%08x|%08x ",
+ instr->InstructionBits(), next_instr->InstructionBits());
+ }
if (ABI_USES_FUNCTION_DESCRIPTORS && instr->InstructionBits() == 0) {
// The first field will be identified as a jump table entry. We
@@ -1443,7 +1569,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
return kInstrSize;
}
- uint32_t opcode = instr->OpcodeValue() << 26;
switch (opcode) {
case TWI: {
PrintSoftwareInterrupt(instr->SvcValue());
@@ -1563,6 +1688,10 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
Format(instr, "b'l'a 'target26");
break;
}
+ case EXTP: {
+ DecodeExtP(instr);
+ break;
+ }
case EXT0: {
DecodeExt0(instr);
break;
@@ -1753,6 +1882,13 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
}
+ if (IsPrefixed()) {
+ // The next instruction (suffix) should have already been decoded as part of
+ // prefix decoding.
+ ResetPrefix();
+ return 2 * kInstrSize;
+ }
+
return kInstrSize;
}
} // namespace internal
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 2a0cf4ff02..d50767421a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -22,36 +22,6 @@
// This has to come after windows.h.
#include <versionhelpers.h> // For IsWindows8OrGreater().
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 12695fa39c..9cad7b8717 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -1158,6 +1158,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer("vcvtdq2pd %s,", NameOfAVXRegister(regop));
current += PrintRightAVXOperand(current);
break;
+ case 0xC2:
+ AppendToBuffer("vcmpss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
+ current += 1;
+ break;
default:
UnimplementedInstruction();
}
@@ -1213,6 +1220,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfAVXRegister(vvvv));
current += PrintRightAVXOperand(current);
break;
+ case 0xC2:
+ AppendToBuffer("vcmpsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
+ current += 1;
+ break;
#define DISASM_SSE2_INSTRUCTION_LIST_SD(instruction, _1, _2, opcode) \
case 0x##opcode: \
AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
@@ -2296,6 +2310,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movsxb";
case 0xBF:
return "movsxw";
+ case 0xC2:
+ return "cmpss";
default:
return nullptr;
}
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index f8a02da402..0759741060 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -1139,9 +1139,9 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return reinterpret_cast<uintptr_t>(get_sp());
}
- // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // Otherwise the limit is the JS stack. Leave a safety margin of 4 KiB
// to prevent overrunning the stack when pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 1024;
+ return reinterpret_cast<uintptr_t>(stack_) + 4 * KB;
}
// Unsupported instructions use Format to print an error and stop execution.
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 6299cb2141..da2acd7d0c 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -313,9 +313,9 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return get_sp();
}
- // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // Otherwise the limit is the JS stack. Leave a safety margin of 4 KiB
// to prevent overrunning the stack when pushing values.
- return stack_limit_ + 1024;
+ return stack_limit_ + 4 * KB;
}
void Simulator::SetRedirectInstruction(Instruction* instruction) {
diff --git a/deps/v8/src/execution/clobber-registers.cc b/deps/v8/src/execution/clobber-registers.cc
index 0aea68dbe4..8f7fba765f 100644
--- a/deps/v8/src/execution/clobber-registers.cc
+++ b/deps/v8/src/execution/clobber-registers.cc
@@ -13,6 +13,12 @@
#include "src/codegen/ia32/register-ia32.h"
#elif V8_HOST_ARCH_X64
#include "src/codegen/x64/register-x64.h"
+#elif V8_HOST_ARCH_LOONG64
+#include "src/codegen/loong64/register-loong64.h"
+#elif V8_HOST_ARCH_MIPS
+#include "src/codegen/mips/register-mips.h"
+#elif V8_HOST_ARCH_MIPS64
+#include "src/codegen/mips64/register-mips64.h"
#endif
namespace v8 {
@@ -39,18 +45,33 @@ namespace internal {
#elif V8_HOST_ARCH_ARM64
#define CLOBBER_REGISTER(R) __asm__ volatile("fmov " #R ",xzr" :::);
-#endif // V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM64
+#elif V8_HOST_ARCH_LOONG64
+#define CLOBBER_REGISTER(R) __asm__ volatile("movgr2fr.d $" #R ",$zero" :::);
+
+#elif V8_HOST_ARCH_MIPS
+#define CLOBBER_USE_REGISTER(R) __asm__ volatile("mtc1 $zero,$" #R :::);
+
+#elif V8_HOST_ARCH_MIPS64
+#define CLOBBER_USE_REGISTER(R) __asm__ volatile("dmtc1 $zero,$" #R :::);
+
+#endif // V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM64 ||
+ // V8_HOST_ARCH_LOONG64 || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#endif // V8_CC_MSVC
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
// clobber all double registers
-#ifdef CLOBBER_REGISTER
+#if defined(CLOBBER_REGISTER)
DOUBLE_REGISTERS(CLOBBER_REGISTER)
#undef CLOBBER_REGISTER
return 0;
+#elif defined(CLOBBER_USE_REGISTER)
+ DOUBLE_USE_REGISTERS(CLOBBER_USE_REGISTER)
+#undef CLOBBER_USE_REGISTER
+ return 0;
+
#else
// TODO(v8:11798): This clobbers only subset of registers depending on
// compiler, Rewrite this in assembly to really clobber all registers. GCC for
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 0fdc7e6311..c40c6e6857 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -23,7 +23,7 @@
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/visitors.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/strings/string-stream.h"
#include "src/zone/zone-containers.h"
@@ -1608,7 +1608,7 @@ Handle<Object> FrameSummary::JavaScriptFrameSummary::script() const {
}
Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
- return handle(function_->context().native_context(), isolate());
+ return handle(function_->native_context(), isolate());
}
Handle<StackFrameInfo>
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index 44de6b52c8..57ccf3515a 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -42,7 +42,8 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
#if defined V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object)->heap();
-#elif defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+#elif defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE) && \
+ !defined(V8_EXTERNAL_CODE_SPACE)
Isolate* isolate =
Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
@@ -57,7 +58,8 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object);
-#elif defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+#elif defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE) && \
+ !defined(V8_EXTERNAL_CODE_SPACE)
Isolate* isolate =
Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 2f643ef627..fb0939c03e 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -96,7 +96,7 @@
#include "src/profiler/heap-profiler.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/snapshot/embedded/embedded-file-writer-interface.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/shared-heap-deserializer.h"
@@ -989,6 +989,25 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
PromiseCapability::cast(context->get(index)), isolate);
if (!capability->promise().IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
+ } else if (IsBuiltinFunction(
+ isolate, reaction->fulfill_handler(),
+ Builtin::kPromiseAllSettledResolveElementClosure)) {
+ Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
+ isolate);
+ Handle<Context> context(function->context(), isolate);
+ Handle<JSFunction> combinator(
+ context->native_context().promise_all_settled(), isolate);
+ builder->AppendPromiseCombinatorFrame(function, combinator);
+
+ // Now peak into the Promise.allSettled() resolve element context to
+ // find the promise capability that's being resolved when all
+ // the concurrent promises resolve.
+ int const index =
+ PromiseBuiltins::kPromiseAllResolveElementCapabilitySlot;
+ Handle<PromiseCapability> capability(
+ PromiseCapability::cast(context->get(index)), isolate);
+ if (!capability->promise().IsJSPromise()) return;
+ promise = handle(JSPromise::cast(capability->promise()), isolate);
} else if (IsBuiltinFunction(isolate, reaction->reject_handler(),
Builtin::kPromiseAnyRejectElementClosure)) {
Handle<JSFunction> function(JSFunction::cast(reaction->reject_handler()),
@@ -1509,8 +1528,6 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
data = handle(access_check_info.data(), this);
}
- LOG(this, ApiSecurityCheck());
-
{
// Leaving JavaScript.
VMState<EXTERNAL> state(this);
@@ -2600,21 +2617,22 @@ bool Isolate::OptionalRescheduleException(bool clear_exception) {
}
void Isolate::PushPromise(Handle<JSObject> promise) {
- ThreadLocalTop* tltop = thread_local_top();
- PromiseOnStack* prev = tltop->promise_on_stack_;
- Handle<JSObject> global_promise = global_handles()->Create(*promise);
- tltop->promise_on_stack_ = new PromiseOnStack(global_promise, prev);
-}
-
-bool Isolate::PopPromise() {
- ThreadLocalTop* tltop = thread_local_top();
- if (tltop->promise_on_stack_ == nullptr) return false;
- PromiseOnStack* prev = tltop->promise_on_stack_->prev();
- Handle<Object> global_promise = tltop->promise_on_stack_->promise();
- delete tltop->promise_on_stack_;
- tltop->promise_on_stack_ = prev;
- global_handles()->Destroy(global_promise.location());
- return true;
+ Handle<Object> promise_on_stack(debug()->thread_local_.promise_stack_, this);
+ promise_on_stack = factory()->NewPromiseOnStack(promise_on_stack, promise);
+ debug()->thread_local_.promise_stack_ = *promise_on_stack;
+}
+
+void Isolate::PopPromise() {
+ if (!IsPromiseStackEmpty()) {
+ debug()->thread_local_.promise_stack_ =
+ PromiseOnStack::cast(debug()->thread_local_.promise_stack_).prev();
+ }
+}
+
+bool Isolate::IsPromiseStackEmpty() const {
+ DCHECK_IMPLIES(!debug()->thread_local_.promise_stack_.IsSmi(),
+ debug()->thread_local_.promise_stack_.IsPromiseOnStack());
+ return debug()->thread_local_.promise_stack_.IsSmi();
}
namespace {
@@ -2688,15 +2706,14 @@ bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<JSPromise> promise) {
Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
Handle<Object> undefined = factory()->undefined_value();
- ThreadLocalTop* tltop = thread_local_top();
- if (tltop->promise_on_stack_ == nullptr) return undefined;
+ if (IsPromiseStackEmpty()) return undefined;
// Find the top-most try-catch or try-finally handler.
CatchType prediction = PredictExceptionCatcher();
if (prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) {
return undefined;
}
Handle<Object> retval = undefined;
- PromiseOnStack* promise_on_stack = tltop->promise_on_stack_;
+ Handle<Object> promise_stack(debug()->thread_local_.promise_stack_, this);
for (StackFrameIterator it(this); !it.done(); it.Advance()) {
StackFrame* frame = it.frame();
HandlerTable::CatchPrediction catch_prediction;
@@ -2728,10 +2745,16 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
Handle<JSPromise>::cast(retval)->set_handled_hint(true);
}
return retval;
- case HandlerTable::PROMISE:
- return promise_on_stack
- ? Handle<Object>::cast(promise_on_stack->promise())
- : undefined;
+ case HandlerTable::PROMISE: {
+ Handle<JSObject> promise;
+ if (promise_stack->IsPromiseOnStack() &&
+ PromiseOnStack::GetPromise(
+ Handle<PromiseOnStack>::cast(promise_stack))
+ .ToHandle(&promise)) {
+ return promise;
+ }
+ return undefined;
+ }
case HandlerTable::UNCAUGHT_ASYNC_AWAIT:
case HandlerTable::ASYNC_AWAIT: {
// If in the initial portion of async/await, continue the loop to pop up
@@ -2739,15 +2762,21 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
// dependents is found, or a non-async stack frame is encountered, in
// order to handle the synchronous async/await catch prediction case:
// assume that async function calls are awaited.
- if (!promise_on_stack) return retval;
- retval = promise_on_stack->promise();
+ if (!promise_stack->IsPromiseOnStack()) {
+ return retval;
+ }
+ Handle<PromiseOnStack> promise_on_stack =
+ Handle<PromiseOnStack>::cast(promise_stack);
+ if (!PromiseOnStack::GetPromise(promise_on_stack).ToHandle(&retval)) {
+ return retval;
+ }
if (retval->IsJSPromise()) {
if (PromiseHasUserDefinedRejectHandler(
Handle<JSPromise>::cast(retval))) {
return retval;
}
}
- promise_on_stack = promise_on_stack->prev();
+ promise_stack = handle(promise_on_stack->prev(), this);
continue;
}
}
@@ -3787,10 +3816,14 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
heap()->read_only_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress,
AddressToString(ro_space_firstpage_address));
- const uintptr_t map_space_firstpage_address =
- heap()->map_space()->FirstPageAddress();
- add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
- AddressToString(map_space_firstpage_address));
+
+ if (heap()->map_space()) {
+ const uintptr_t map_space_firstpage_address =
+ heap()->map_space()->FirstPageAddress();
+ add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
+ AddressToString(map_space_firstpage_address));
+ }
+
const uintptr_t code_space_firstpage_address =
heap()->code_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
@@ -4088,7 +4121,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
setup_delegate_ = nullptr;
Builtins::InitializeIsolateDataTables(this);
- Builtins::EmitCodeCreateEvents(this);
+
+ // Extra steps in the logger after the heap has been set up.
+ logger_->LateSetup(this);
#ifdef DEBUG
// Verify that the current heap state (usually deserialized from the snapshot)
@@ -4325,8 +4360,8 @@ CodeTracer* Isolate::GetCodeTracer() {
bool Isolate::use_optimizer() {
// TODO(v8:7700): Update this predicate for a world with multiple tiers.
- return FLAG_opt && !serializer_enabled_ && CpuFeatures::SupportsOptimizer() &&
- !is_precise_count_code_coverage();
+ return (FLAG_opt || FLAG_maglev) && !serializer_enabled_ &&
+ CpuFeatures::SupportsOptimizer() && !is_precise_count_code_coverage();
}
void Isolate::IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code) {
@@ -5039,8 +5074,7 @@ void Isolate::OnTerminationDuringRunMicrotasks() {
heap()->set_current_microtask(ReadOnlyRoots(this).undefined_value());
// Empty the promise stack.
- while (PopPromise()) {
- }
+ debug()->thread_local_.promise_stack_ = Smi::zero();
if (current_microtask->IsPromiseReactionJobTask()) {
Handle<PromiseReactionJobTask> promise_reaction_job_task =
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index a75a78de80..6284da66ba 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -382,6 +382,14 @@ class StackMemory;
#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
+#define MAYBE_ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
+ do { \
+ if (!(call).To(&dst)) { \
+ DCHECK((isolate)->has_pending_exception()); \
+ return value; \
+ } \
+ } while (false)
+
#define MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
do { \
Isolate* __isolate__ = (isolate); \
@@ -859,7 +867,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Push and pop a promise and the current try-catch handler.
void PushPromise(Handle<JSObject> promise);
- bool PopPromise();
+ void PopPromise();
+ bool IsPromiseStackEmpty() const;
// Return the relevant Promise that a throw/rejection pertains to, based
// on the contents of the Promise stack
@@ -2410,18 +2419,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
#undef FIELD_ACCESSOR
#undef THREAD_LOCAL_TOP_ACCESSOR
-class PromiseOnStack {
- public:
- PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
- : promise_(promise), prev_(prev) {}
- Handle<JSObject> promise() { return promise_; }
- PromiseOnStack* prev() { return prev_; }
-
- private:
- Handle<JSObject> promise_;
- PromiseOnStack* prev_;
-};
-
// SaveContext scopes save the current context on the Isolate on creation, and
// restore it on destruction.
class V8_EXPORT_PRIVATE SaveContext {
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.cc b/deps/v8/src/execution/loong64/simulator-loong64.cc
index a71f39aad3..ddcf86435e 100644
--- a/deps/v8/src/execution/loong64/simulator-loong64.cc
+++ b/deps/v8/src/execution/loong64/simulator-loong64.cc
@@ -4272,22 +4272,14 @@ void Simulator::DecodeTypeOp17() {
FPURegisters::Name(fd_reg()), fd_float(),
FPURegisters::Name(fj_reg()), fj_float(),
FPURegisters::Name(fk_reg()), fk_float());
- SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
- [](float lhs, float rhs) {
- return std::copysign(lhs, rhs);
- },
- fj_float(), fk_float()));
+ SetFPUFloatResult(fd_reg(), std::copysign(fj_float(), fk_float()));
} break;
case FCOPYSIGN_D: {
printf_instr("FCOPYSIGN_d\t %s: %016f, %s, %016f, %s, %016f\n",
FPURegisters::Name(fd_reg()), fd_double(),
FPURegisters::Name(fj_reg()), fj_double(),
FPURegisters::Name(fk_reg()), fk_double());
- SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation(
- [](double lhs, double rhs) {
- return std::copysign(lhs, rhs);
- },
- fj_double(), fk_double()));
+ SetFPUDoubleResult(fd_reg(), std::copysign(fj_double(), fk_double()));
} break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index ea81e6b1c0..c31a2c6d33 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -642,6 +642,18 @@ static bool AllOnOnePage(uintptr_t start, int size) {
return start_page == end_page;
}
+static bool is_snan(float input) {
+ uint32_t kQuietNanFPBit = 1 << 22;
+ uint32_t InputAsUint = bit_cast<uint32_t>(input);
+ return isnan(input) && ((InputAsUint & kQuietNanFPBit) == 0);
+}
+
+static bool is_snan(double input) {
+ uint64_t kQuietNanDPBit = 1L << 51;
+ uint64_t InputAsUint = bit_cast<uint64_t>(input);
+ return isnan(input) && ((InputAsUint & kQuietNanDPBit) == 0);
+}
+
void Simulator::set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
@@ -1529,6 +1541,122 @@ float VMXFPMax(float x, float y) {
void Simulator::ExecuteGeneric(Instruction* instr) {
uint32_t opcode = instr->OpcodeBase();
switch (opcode) {
+ // Prefixed instructions.
+ case PLOAD_STORE_8LS:
+ case PLOAD_STORE_MLS: {
+ // TODO(miladfarca): Simulate PC-relative capability indicated by the R
+ // bit.
+ DCHECK_NE(instr->Bit(20), 1);
+ // Read prefix value.
+ uint64_t prefix_value = instr->Bits(17, 0);
+ // Read suffix (next instruction).
+ Instruction* next_instr = bit_cast<Instruction*>(get_pc() + kInstrSize);
+ uint16_t suffix_value = next_instr->Bits(15, 0);
+ int64_t im_val = SIGN_EXT_IMM34((prefix_value << 16) | suffix_value);
+ switch (next_instr->OpcodeBase()) {
+ // Prefixed ADDI.
+ case ADDI: {
+ int rt = next_instr->RTValue();
+ int ra = next_instr->RAValue();
+ intptr_t alu_out;
+ if (ra == 0) {
+ alu_out = im_val;
+ } else {
+ intptr_t ra_val = get_register(ra);
+ alu_out = ra_val + im_val;
+ }
+ set_register(rt, alu_out);
+ break;
+ }
+ // Prefixed LBZ.
+ case LBZ: {
+ int ra = next_instr->RAValue();
+ int rt = next_instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ set_register(rt, ReadB(ra_val + im_val) & 0xFF);
+ break;
+ }
+ // Prefixed LHZ.
+ case LHZ: {
+ int ra = next_instr->RAValue();
+ int rt = next_instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ uintptr_t result = ReadHU(ra_val + im_val) & 0xFFFF;
+ set_register(rt, result);
+ break;
+ }
+ // Prefixed LHA.
+ case LHA: {
+ int ra = next_instr->RAValue();
+ int rt = next_instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t result = ReadH(ra_val + im_val);
+ set_register(rt, result);
+ break;
+ }
+ // Prefixed LWZ.
+ case LWZ: {
+ int ra = next_instr->RAValue();
+ int rt = next_instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ set_register(rt, ReadWU(ra_val + im_val));
+ break;
+ }
+ // Prefixed LWA.
+ case PPLWA: {
+ int ra = next_instr->RAValue();
+ int rt = next_instr->RTValue();
+ int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+ set_register(rt, ReadW(ra_val + im_val));
+ break;
+ }
+ // Prefixed LD.
+ case PPLD: {
+ int ra = next_instr->RAValue();
+ int rt = next_instr->RTValue();
+ int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+ set_register(rt, ReadDW(ra_val + im_val));
+ break;
+ }
+ // Prefixed LFS.
+ case LFS: {
+ int frt = next_instr->RTValue();
+ int ra = next_instr->RAValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int32_t val = ReadW(ra_val + im_val);
+ float* fptr = reinterpret_cast<float*>(&val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ // Conversion using double changes sNan to qNan on ia32/x64
+ if ((val & 0x7F800000) == 0x7F800000) {
+ int64_t dval = static_cast<int64_t>(val);
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29) |
+ 0x0;
+ set_d_register(frt, dval);
+ } else {
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+ }
+#else
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+#endif
+ break;
+ }
+ // Prefixed LFD.
+ case LFD: {
+ int frt = next_instr->RTValue();
+ int ra = next_instr->RAValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int64_t dptr = ReadDW(ra_val + im_val);
+ set_d_register(frt, dptr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ // We have now executed instructions at this as well as next pc.
+ set_pc(get_pc() + (2 * kInstrSize));
+ break;
+ }
case SUBFIC: {
int rt = instr->RTValue();
int ra = instr->RAValue();
@@ -4763,7 +4891,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
bit_cast<float, uint32_t>(static_cast<uint32_t>(double_bits >> 32));
double_bits = bit_cast<uint64_t, double>(static_cast<double>(f));
// Preserve snan.
- if (issignaling(f)) {
+ if (is_snan(f)) {
double_bits &= 0xFFF7FFFFFFFFFFFFU; // Clear bit 51.
}
set_d_register(t, double_bits);
@@ -4776,7 +4904,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
uint64_t float_bits = static_cast<uint64_t>(
bit_cast<uint32_t, float>(static_cast<float>(b_val)));
// Preserve snan.
- if (issignaling(b_val)) {
+ if (is_snan(b_val)) {
float_bits &= 0xFFBFFFFFU; // Clear bit 22.
}
// fp result is placed in both 32bit halfs of the dst.
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
index 7b2aff765d..f4159dae0d 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -155,6 +155,7 @@ static inline bool is_overlapped_widen(const int astart, int asize,
// PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
// HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
// MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+#ifdef CAN_USE_RVV_INSTRUCTIONS
template <uint64_t N>
struct type_usew_t;
template <>
@@ -1431,6 +1432,7 @@ inline Dst unsigned_saturation(Src v, uint n) {
} \
RVV_VI_LOOP_END \
rvv_trace_vd();
+#endif
namespace v8 {
namespace internal {
@@ -1488,7 +1490,9 @@ class RiscvDebugger {
int64_t GetFPURegisterValue(int regnum);
float GetFPURegisterValueFloat(int regnum);
double GetFPURegisterValueDouble(int regnum);
+#ifdef CAN_USE_RVV_INSTRUCTIONS
__int128_t GetVRegisterValue(int regnum);
+#endif
bool GetValue(const char* desc, int64_t* value);
};
@@ -1529,6 +1533,7 @@ double RiscvDebugger::GetFPURegisterValueDouble(int regnum) {
}
}
+#ifdef CAN_USE_RVV_INSTRUCTIONS
__int128_t RiscvDebugger::GetVRegisterValue(int regnum) {
if (regnum == kNumVRegisters) {
return sim_->get_pc();
@@ -1536,6 +1541,7 @@ __int128_t RiscvDebugger::GetVRegisterValue(int regnum) {
return sim_->get_vregister(regnum);
}
}
+#endif
bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
int regnum = Registers::Number(desc);
@@ -1695,8 +1701,9 @@ void RiscvDebugger::Debug() {
} else {
int regnum = Registers::Number(arg1);
int fpuregnum = FPURegisters::Number(arg1);
+#ifdef CAN_USE_RVV_INSTRUCTIONS
int vregnum = VRegisters::Number(arg1);
-
+#endif
if (regnum != kInvalidRegister) {
value = GetRegisterValue(regnum);
PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value,
@@ -1706,11 +1713,13 @@ void RiscvDebugger::Debug() {
dvalue = GetFPURegisterValueDouble(fpuregnum);
PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
FPURegisters::Name(fpuregnum), value, dvalue);
+#ifdef CAN_USE_RVV_INSTRUCTIONS
} else if (vregnum != kInvalidVRegister) {
__int128_t v = GetVRegisterValue(vregnum);
PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
VRegisters::Name(vregnum), (uint64_t)(v >> 64),
(uint64_t)v);
+#endif
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -2346,10 +2355,12 @@ double Simulator::get_fpu_register_double(int fpureg) const {
return *bit_cast<double*>(&FPUregisters_[fpureg]);
}
+#ifdef CAN_USE_RVV_INSTRUCTIONS
__int128_t Simulator::get_vregister(int vreg) const {
DCHECK((vreg >= 0) && (vreg < kNumVRegisters));
return Vregister_[vreg];
}
+#endif
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are constructed here,
@@ -2707,9 +2718,9 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return reinterpret_cast<uintptr_t>(get_sp());
}
- // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // Otherwise the limit is the JS stack. Leave a safety margin of 4 KiB
// to prevent overrunning the stack when pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 1024;
+ return reinterpret_cast<uintptr_t>(stack_) + 4 * KB;
}
// Unsupported instructions use Format to print an error and stop execution.
@@ -4255,6 +4266,7 @@ void Simulator::DecodeRVR4Type() {
}
}
+#ifdef CAN_USE_RVV_INSTRUCTIONS
bool Simulator::DecodeRvvVL() {
uint32_t instr_temp =
instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
@@ -4381,6 +4393,7 @@ bool Simulator::DecodeRvvVS() {
return false;
}
}
+#endif
Builtin Simulator::LookUp(Address pc) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
@@ -4618,9 +4631,13 @@ void Simulator::DecodeRVIType() {
break;
}
default: {
+#ifdef CAN_USE_RVV_INSTRUCTIONS
if (!DecodeRvvVL()) {
UNSUPPORTED();
}
+#else
+ UNSUPPORTED();
+#endif
break;
}
}
@@ -4655,9 +4672,13 @@ void Simulator::DecodeRVSType() {
break;
}
default:
+#ifdef CAN_USE_RVV_INSTRUCTIONS
if (!DecodeRvvVS()) {
UNSUPPORTED();
}
+#else
+ UNSUPPORTED();
+#endif
break;
}
}
@@ -5036,6 +5057,7 @@ T sat_subu(T x, T y, bool& sat) {
return res;
}
+#ifdef CAN_USE_RVV_INSTRUCTIONS
void Simulator::DecodeRvvIVV() {
DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
switch (instr_.InstructionBits() & kVTypeMask) {
@@ -6839,6 +6861,8 @@ void Simulator::DecodeVType() {
FATAL("Error: Unsupport on FILE:%s:%d.", __FILE__, __LINE__);
}
}
+#endif
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -6909,9 +6933,11 @@ void Simulator::InstructionDecode(Instruction* instr) {
case Instruction::kCSType:
DecodeCSType();
break;
+#ifdef CAN_USE_RVV_INSTRUCTIONS
case Instruction::kVType:
DecodeVType();
break;
+#endif
default:
if (1) {
std::cout << "Unrecognized instruction [@pc=0x" << std::hex
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.h b/deps/v8/src/execution/riscv64/simulator-riscv64.h
index bf1dda48e9..532a9eb51e 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.h
@@ -380,6 +380,7 @@ class Simulator : public SimulatorBase {
void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
+#ifdef CAN_USE_RVV_INSTRUCTIONS
// RVV CSR
__int128_t get_vregister(int vreg) const;
inline uint64_t rvv_vlen() const { return kRvvVLEN; }
@@ -439,6 +440,7 @@ class Simulator : public SimulatorBase {
return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
}
}
+#endif
inline uint32_t get_dynamic_rounding_mode();
inline bool test_fflags_bits(uint32_t mask);
@@ -652,6 +654,7 @@ class Simulator : public SimulatorBase {
}
}
+#ifdef CAN_USE_RVV_INSTRUCTIONS
inline void rvv_trace_vd() {
if (::v8::internal::FLAG_trace_sim) {
__int128_t value = Vregister_[rvv_vd_reg()];
@@ -746,6 +749,7 @@ class Simulator : public SimulatorBase {
inline void set_rvv_vlenb(uint64_t value, bool trace = true) {
vlenb_ = value;
}
+#endif
template <typename T, typename Func>
inline T CanonicalizeFPUOpFMA(Func fn, T dst, T src1, T src2) {
@@ -862,6 +866,7 @@ class Simulator : public SimulatorBase {
void DecodeCSType();
void DecodeCJType();
void DecodeCBType();
+#ifdef CAN_USE_RVV_INSTRUCTIONS
void DecodeVType();
void DecodeRvvIVV();
void DecodeRvvIVI();
@@ -872,6 +877,7 @@ class Simulator : public SimulatorBase {
void DecodeRvvFVF();
bool DecodeRvvVL();
bool DecodeRvvVS();
+#endif
// Used for breakpoints and traps.
void SoftwareInterrupt();
@@ -938,10 +944,12 @@ class Simulator : public SimulatorBase {
// Floating-point control and status register.
uint32_t FCSR_;
+#ifdef CAN_USE_RVV_INSTRUCTIONS
// RVV registers
__int128_t Vregister_[kNumVRegisters];
static_assert(sizeof(__int128_t) == kRvvVLEN / 8, "unmatch vlen");
uint64_t vstart_, vxsat_, vxrm_, vcsr_, vtype_, vl_, vlenb_;
+#endif
// Simulator support.
// Allocate 1MB for stack.
size_t stack_size_;
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index ddd6b7f153..90e46ea793 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -15,6 +15,10 @@
#include "src/tracing/trace-event.h"
#include "src/utils/memcopy.h"
+#ifdef V8_ENABLE_MAGLEV
+#include "src/maglev/maglev-concurrent-dispatcher.h"
+#endif // V8_ENABLE_MAGLEV
+
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
#endif // V8_ENABLE_WEBASSEMBLY
@@ -325,6 +329,14 @@ Object StackGuard::HandleInterrupts() {
isolate_->baseline_batch_compiler()->InstallBatch();
}
+#ifdef V8_ENABLE_MAGLEV
+ if (TestAndClear(&interrupt_flags, INSTALL_MAGLEV_CODE)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeMaglevConcurrentCompilation");
+ isolate_->maglev_concurrent_dispatcher()->FinalizeFinishedJobs();
+ }
+#endif // V8_ENABLE_MAGLEV
+
if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
// Callbacks must be invoked outside of ExecutionAccess lock.
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
index 69f1485370..b115b9b743 100644
--- a/deps/v8/src/execution/stack-guard.h
+++ b/deps/v8/src/execution/stack-guard.h
@@ -54,7 +54,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5) \
V(GROW_SHARED_MEMORY, GrowSharedMemory, 6) \
V(LOG_WASM_CODE, LogWasmCode, 7) \
- V(WASM_CODE_GC, WasmCodeGC, 8)
+ V(WASM_CODE_GC, WasmCodeGC, 8) \
+ V(INSTALL_MAGLEV_CODE, InstallMaglevCode, 9)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
diff --git a/deps/v8/src/execution/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index 302ad9a7b1..87887802e0 100644
--- a/deps/v8/src/execution/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -27,7 +27,6 @@ void ThreadLocalTop::Clear() {
c_entry_fp_ = kNullAddress;
handler_ = kNullAddress;
c_function_ = kNullAddress;
- promise_on_stack_ = nullptr;
simulator_ = nullptr;
js_entry_sp_ = kNullAddress;
external_callback_scope_ = nullptr;
@@ -57,10 +56,7 @@ void ThreadLocalTop::Initialize(Isolate* isolate) {
#endif
}
-void ThreadLocalTop::Free() {
- // Match unmatched PopPromise calls.
- while (promise_on_stack_) isolate_->PopPromise();
-}
+void ThreadLocalTop::Free() {}
#if defined(USE_SIMULATOR)
void ThreadLocalTop::StoreCurrentStackPosition() {
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index 98c4f1b60f..8ac0602255 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -26,7 +26,6 @@ namespace internal {
class EmbedderState;
class ExternalCallbackScope;
class Isolate;
-class PromiseOnStack;
class Simulator;
class ThreadLocalTop {
@@ -35,9 +34,9 @@ class ThreadLocalTop {
// refactor this to really consist of just Addresses and 32-bit
// integer fields.
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- static constexpr uint32_t kSizeInBytes = 27 * kSystemPointerSize;
-#else
static constexpr uint32_t kSizeInBytes = 26 * kSystemPointerSize;
+#else
+ static constexpr uint32_t kSizeInBytes = 25 * kSystemPointerSize;
#endif
// Does early low-level initialization that does not depend on the
@@ -140,11 +139,6 @@ class ThreadLocalTop {
// C function that was called at c entry.
Address c_function_;
- // Throwing an exception may cause a Promise rejection. For this purpose
- // we keep track of a stack of nested promises and the corresponding
- // try-catch handlers.
- PromiseOnStack* promise_on_stack_;
-
// Simulator field is always present to get predictable layout.
Simulator* simulator_;
diff --git a/deps/v8/src/execution/tiering-manager.cc b/deps/v8/src/execution/tiering-manager.cc
index e87b170a60..2b519ff4be 100644
--- a/deps/v8/src/execution/tiering-manager.cc
+++ b/deps/v8/src/execution/tiering-manager.cc
@@ -17,6 +17,7 @@
#include "src/handles/global-handles.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
+#include "src/objects/code.h"
#include "src/tracing/trace-event.h"
namespace v8 {
@@ -58,9 +59,8 @@ class OptimizationDecision {
public:
static constexpr OptimizationDecision Maglev() {
// TODO(v8:7700): Consider using another reason here.
- // TODO(v8:7700): Support concurrency.
return {OptimizationReason::kHotAndStable, CodeKind::MAGLEV,
- ConcurrencyMode::kNotConcurrent};
+ ConcurrencyMode::kConcurrent};
}
static constexpr OptimizationDecision TurbofanHotAndStable() {
return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN,
@@ -100,19 +100,17 @@ namespace {
void TraceInOptimizationQueue(JSFunction function) {
if (FLAG_trace_opt_verbose) {
- PrintF("[not marking function ");
- function.PrintName();
- PrintF(" for optimization: already queued]\n");
+ PrintF("[not marking function %s for optimization: already queued]\n",
+ function.DebugNameCStr().get());
}
}
void TraceHeuristicOptimizationDisallowed(JSFunction function) {
if (FLAG_trace_opt_verbose) {
- PrintF("[not marking function ");
- function.PrintName();
PrintF(
- " for optimization: marked with "
- "%%PrepareFunctionForOptimization for manual optimization]\n");
+ "[not marking function %s for optimization: marked with "
+ "%%PrepareFunctionForOptimization for manual optimization]\n",
+ function.DebugNameCStr().get());
}
}
@@ -148,33 +146,31 @@ void TieringManager::Optimize(JSFunction function, CodeKind code_kind,
function.MarkForOptimization(isolate_, d.code_kind, d.concurrency_mode);
}
-void TieringManager::AttemptOnStackReplacement(UnoptimizedFrame* frame,
- int loop_nesting_levels) {
- JSFunction function = frame->function();
- SharedFunctionInfo shared = function.shared();
- if (!FLAG_use_osr || !shared.IsUserJavaScript()) {
- return;
- }
-
- // If the code is not optimizable, don't try OSR.
- if (shared.optimization_disabled()) return;
+namespace {
- // We're using on-stack replacement: Store new loop nesting level in
- // BytecodeArray header so that certain back edges in any interpreter frame
- // for this bytecode will trigger on-stack replacement for that frame.
- if (FLAG_trace_osr) {
- CodeTracer::Scope scope(isolate_->GetCodeTracer());
- PrintF(scope.file(), "[OSR - arming back edges in ");
- function.PrintName(scope.file());
- PrintF(scope.file(), "]\n");
+bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame,
+ int* osr_urgency_out) {
+ JSFunction function = frame->function();
+ const int current_offset = frame->GetBytecodeOffset();
+ OSROptimizedCodeCache cache = function.native_context().osr_code_cache();
+ interpreter::BytecodeArrayIterator iterator(
+ handle(frame->GetBytecodeArray(), frame->isolate()));
+ for (BytecodeOffset osr_offset : cache.OsrOffsetsFor(function.shared())) {
+ DCHECK(!osr_offset.IsNone());
+ iterator.SetOffset(osr_offset.ToInt());
+ if (base::IsInRange(current_offset, iterator.GetJumpTargetOffset(),
+ osr_offset.ToInt())) {
+ int loop_depth = iterator.GetImmediateOperand(1);
+ // `+ 1` because osr_urgency is an exclusive upper limit on the depth.
+ *osr_urgency_out = loop_depth + 1;
+ return true;
+ }
}
-
- DCHECK(frame->is_unoptimized());
- int level = frame->GetBytecodeArray().osr_loop_nesting_level();
- frame->GetBytecodeArray().set_osr_loop_nesting_level(std::min(
- {level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker}));
+ return false;
}
+} // namespace
+
namespace {
bool TiersUpToMaglev(CodeKind code_kind) {
@@ -209,15 +205,83 @@ int TieringManager::InitialInterruptBudget() {
: FLAG_interrupt_budget;
}
+namespace {
+
+bool SmallEnoughForOSR(Isolate* isolate, JSFunction function) {
+ return function.shared().GetBytecodeArray(isolate).length() <=
+ kOSRBytecodeSizeAllowanceBase +
+ function.feedback_vector().profiler_ticks() *
+ kOSRBytecodeSizeAllowancePerTick;
+}
+
+void TrySetOsrUrgency(Isolate* isolate, JSFunction function, int osr_urgency) {
+ SharedFunctionInfo shared = function.shared();
+
+ if (V8_UNLIKELY(!FLAG_use_osr)) return;
+ if (V8_UNLIKELY(!shared.IsUserJavaScript())) return;
+ if (V8_UNLIKELY(shared.optimization_disabled())) return;
+
+ // We've passed all checks - bump the OSR urgency.
+
+ BytecodeArray bytecode = shared.GetBytecodeArray(isolate);
+ if (V8_UNLIKELY(FLAG_trace_osr)) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - setting osr urgency. function: %s, old urgency: %d, new "
+ "urgency: %d]\n",
+ function.DebugNameCStr().get(), bytecode.osr_urgency(), osr_urgency);
+ }
+
+ DCHECK_GE(osr_urgency, bytecode.osr_urgency()); // Never lower urgency here.
+ bytecode.set_osr_urgency(osr_urgency);
+}
+
+void TryIncrementOsrUrgency(Isolate* isolate, JSFunction function) {
+ int old_urgency = function.shared().GetBytecodeArray(isolate).osr_urgency();
+ int new_urgency = std::min(old_urgency + 1, BytecodeArray::kMaxOsrUrgency);
+ TrySetOsrUrgency(isolate, function, new_urgency);
+}
+
+void TryRequestOsrAtNextOpportunity(Isolate* isolate, JSFunction function) {
+ TrySetOsrUrgency(isolate, function, BytecodeArray::kMaxOsrUrgency);
+}
+
+void TryRequestOsrForCachedOsrCode(Isolate* isolate, JSFunction function,
+ int osr_urgency_for_cached_osr_code) {
+ DCHECK_LE(osr_urgency_for_cached_osr_code, BytecodeArray::kMaxOsrUrgency);
+ int old_urgency = function.shared().GetBytecodeArray(isolate).osr_urgency();
+ // Make sure not to decrease the existing urgency.
+ int new_urgency = std::max(old_urgency, osr_urgency_for_cached_osr_code);
+ TrySetOsrUrgency(isolate, function, new_urgency);
+}
+
+bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
+ return !any_ic_changed &&
+ bytecode_size < FLAG_max_bytecode_size_for_early_opt;
+}
+
+} // namespace
+
+void TieringManager::RequestOsrAtNextOpportunity(JSFunction function) {
+ DisallowGarbageCollection no_gc;
+ TryRequestOsrAtNextOpportunity(isolate_, function);
+}
+
void TieringManager::MaybeOptimizeFrame(JSFunction function,
- JavaScriptFrame* frame,
+ UnoptimizedFrame* frame,
CodeKind code_kind) {
- if (function.IsInOptimizationQueue()) {
+ const TieringState tiering_state = function.feedback_vector().tiering_state();
+ const TieringState osr_tiering_state =
+ function.feedback_vector().osr_tiering_state();
+ if (V8_UNLIKELY(IsInProgress(tiering_state)) ||
+ V8_UNLIKELY(IsInProgress(osr_tiering_state))) {
+ // Note: This effectively disables OSR for the function while it is being
+ // compiled.
TraceInOptimizationQueue(function);
return;
}
- if (FLAG_testing_d8_test_runner &&
+ if (V8_UNLIKELY(FLAG_testing_d8_test_runner) &&
!PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
function)) {
TraceHeuristicOptimizationDisallowed(function);
@@ -225,84 +289,55 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
}
// TODO(v8:7700): Consider splitting this up for Maglev/Turbofan.
- if (function.shared().optimization_disabled()) return;
-
- if (frame->is_unoptimized()) {
- if (V8_UNLIKELY(FLAG_always_osr)) {
- AttemptOnStackReplacement(UnoptimizedFrame::cast(frame),
- AbstractCode::kMaxLoopNestingMarker);
- // Fall through and do a normal optimized compile as well.
- } else if (MaybeOSR(function, UnoptimizedFrame::cast(frame))) {
- return;
- }
+ if (V8_UNLIKELY(function.shared().optimization_disabled())) return;
+
+ if (V8_UNLIKELY(FLAG_always_osr)) {
+ TryRequestOsrAtNextOpportunity(isolate_, function);
+ // Continue below and do a normal optimized compile as well.
}
- OptimizationDecision d = ShouldOptimize(function, code_kind, frame);
- if (d.should_optimize()) Optimize(function, code_kind, d);
-}
+ // If we have matching cached OSR'd code, request OSR at the next opportunity.
+ int osr_urgency_for_cached_osr_code;
+ if (HaveCachedOSRCodeForCurrentBytecodeOffset(
+ frame, &osr_urgency_for_cached_osr_code)) {
+ TryRequestOsrForCachedOsrCode(isolate_, function,
+ osr_urgency_for_cached_osr_code);
+ }
-bool TieringManager::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) {
- int ticks = function.feedback_vector().profiler_ticks();
- if (function.IsMarkedForOptimization() ||
- function.IsMarkedForConcurrentOptimization() ||
- function.HasAvailableOptimizedCode()) {
- int64_t allowance = kOSRBytecodeSizeAllowanceBase +
- ticks * kOSRBytecodeSizeAllowancePerTick;
- if (function.shared().GetBytecodeArray(isolate_).length() <= allowance) {
- AttemptOnStackReplacement(frame);
+ const bool is_marked_for_any_optimization =
+ (static_cast<uint32_t>(tiering_state) & kNoneOrInProgressMask) != 0;
+ if (is_marked_for_any_optimization || function.HasAvailableOptimizedCode()) {
+ // OSR kicks in only once we've previously decided to tier up, but we are
+ // still in the unoptimized frame (this implies a long-running loop).
+ if (SmallEnoughForOSR(isolate_, function)) {
+ TryIncrementOsrUrgency(isolate_, function);
}
- return true;
- }
- return false;
-}
-namespace {
+ // Return unconditionally and don't run through the optimization decision
+ // again; we've already decided to tier up previously.
+ return;
+ }
-bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
- return !any_ic_changed &&
- bytecode_size < FLAG_max_bytecode_size_for_early_opt;
+ DCHECK(!is_marked_for_any_optimization &&
+ !function.HasAvailableOptimizedCode());
+ OptimizationDecision d = ShouldOptimize(function, code_kind, frame);
+ if (d.should_optimize()) Optimize(function, code_kind, d);
}
-} // namespace
-
OptimizationDecision TieringManager::ShouldOptimize(JSFunction function,
CodeKind code_kind,
JavaScriptFrame* frame) {
DCHECK_EQ(code_kind, function.GetActiveTier().value());
- if (TiersUpToMaglev(code_kind)) {
+ if (TiersUpToMaglev(code_kind) &&
+ !function.shared(isolate_).maglev_compilation_failed()) {
return OptimizationDecision::Maglev();
} else if (code_kind == CodeKind::TURBOFAN) {
// Already in the top tier.
return OptimizationDecision::DoNotOptimize();
}
- // If function's SFI has OSR cache, once enter loop range of OSR cache, set
- // OSR loop nesting level for matching condition of OSR (loop_depth <
- // osr_level), soon later OSR will be triggered when executing bytecode
- // JumpLoop which is entry of the OSR cache, then hit the OSR cache.
BytecodeArray bytecode = function.shared().GetBytecodeArray(isolate_);
- if (V8_UNLIKELY(function.shared().osr_code_cache_state() > kNotCached) &&
- frame->is_unoptimized()) {
- int current_offset =
- static_cast<UnoptimizedFrame*>(frame)->GetBytecodeOffset();
- OSROptimizedCodeCache cache =
- function.context().native_context().GetOSROptimizedCodeCache();
- std::vector<int> bytecode_offsets =
- cache.GetBytecodeOffsetsFromSFI(function.shared());
- interpreter::BytecodeArrayIterator iterator(
- Handle<BytecodeArray>(bytecode, isolate_));
- for (int jump_offset : bytecode_offsets) {
- iterator.SetOffset(jump_offset);
- int jump_target_offset = iterator.GetJumpTargetOffset();
- if (jump_offset >= current_offset &&
- current_offset >= jump_target_offset) {
- bytecode.set_osr_loop_nesting_level(iterator.GetImmediateOperand(1) +
- 1);
- return OptimizationDecision::TurbofanHotAndStable();
- }
- }
- }
const int ticks = function.feedback_vector().profiler_ticks();
const int ticks_for_optimization =
FLAG_ticks_before_optimization +
@@ -315,9 +350,8 @@ OptimizationDecision TieringManager::ShouldOptimize(JSFunction function,
// small, optimistically optimize it now.
return OptimizationDecision::TurbofanSmallFunction();
} else if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function.PrintName();
- PrintF(", not enough ticks: %d/%d and ", ticks, ticks_for_optimization);
+ PrintF("[not yet optimizing %s, not enough ticks: %d/%d and ",
+ function.DebugNameCStr().get(), ticks, ticks_for_optimization);
if (any_ic_changed_) {
PrintF("ICs changed]\n");
} else {
@@ -325,6 +359,7 @@ OptimizationDecision TieringManager::ShouldOptimize(JSFunction function,
bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
}
}
+
return OptimizationDecision::DoNotOptimize();
}
@@ -368,7 +403,7 @@ void TieringManager::OnInterruptTick(Handle<JSFunction> function) {
DCHECK(function->shared().HasBytecodeArray());
// TODO(jgruber): Consider integrating this into a linear tiering system
- // controlled by OptimizationMarker in which the order is always
+ // controlled by TieringState in which the order is always
// Ignition-Sparkplug-Turbofan, and only a single tierup is requested at
// once.
// It's unclear whether this is possible and/or makes sense - for example,
@@ -403,9 +438,9 @@ void TieringManager::OnInterruptTick(Handle<JSFunction> function) {
function_obj.feedback_vector().SaturatingIncrementProfilerTicks();
JavaScriptFrameIterator it(isolate_);
- DCHECK(it.frame()->is_unoptimized());
+ UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
const CodeKind code_kind = function_obj.GetActiveTier().value();
- MaybeOptimizeFrame(function_obj, it.frame(), code_kind);
+ MaybeOptimizeFrame(function_obj, frame, code_kind);
}
} // namespace internal
diff --git a/deps/v8/src/execution/tiering-manager.h b/deps/v8/src/execution/tiering-manager.h
index ce1cde3613..329c7d27d0 100644
--- a/deps/v8/src/execution/tiering-manager.h
+++ b/deps/v8/src/execution/tiering-manager.h
@@ -32,8 +32,8 @@ class TieringManager {
void NotifyICChanged() { any_ic_changed_ = true; }
- void AttemptOnStackReplacement(UnoptimizedFrame* frame,
- int nesting_levels = 1);
+ // After this request, the next JumpLoop will perform OSR.
+ void RequestOsrAtNextOpportunity(JSFunction function);
// For use when a JSFunction is available.
static int InterruptBudgetFor(Isolate* isolate, JSFunction function);
@@ -43,12 +43,10 @@ class TieringManager {
private:
// Make the decision whether to optimize the given function, and mark it for
// optimization if the decision was 'yes'.
- void MaybeOptimizeFrame(JSFunction function, JavaScriptFrame* frame,
+ // This function is also responsible for bumping the OSR urgency.
+ void MaybeOptimizeFrame(JSFunction function, UnoptimizedFrame* frame,
CodeKind code_kind);
- // Potentially attempts OSR from and returns whether no other
- // optimization attempts should be made.
- bool MaybeOSR(JSFunction function, UnoptimizedFrame* frame);
OptimizationDecision ShouldOptimize(JSFunction function, CodeKind code_kind,
JavaScriptFrame* frame);
void Optimize(JSFunction function, CodeKind code_kind,
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 17cb7a9ef1..0127d2a805 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -99,11 +99,21 @@ void StatisticsExtension::GetCounters(
const char* name;
};
+ size_t new_space_size = 0;
+ size_t new_space_available = 0;
+ size_t new_space_committed_memory = 0;
+
+ if (heap->new_space()) {
+ new_space_size = heap->new_space()->Size();
+ new_space_available = heap->new_space()->Available();
+ new_space_committed_memory = heap->new_space()->CommittedMemory();
+ }
+
const StatisticNumber numbers[] = {
{heap->memory_allocator()->Size(), "total_committed_bytes"},
- {heap->new_space()->Size(), "new_space_live_bytes"},
- {heap->new_space()->Available(), "new_space_available_bytes"},
- {heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
+ {new_space_size, "new_space_live_bytes"},
+ {new_space_available, "new_space_available_bytes"},
+ {new_space_committed_memory, "new_space_commited_bytes"},
{heap->old_space()->Size(), "old_space_live_bytes"},
{heap->old_space()->Available(), "old_space_available_bytes"},
{heap->old_space()->CommittedMemory(), "old_space_commited_bytes"},
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 7ed7cccbcc..f02ef309d6 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -195,7 +195,8 @@ struct MaybeBoolFlag {
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
#endif
-#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
+ (V8_TARGET_ARCH_S390X && COMPRESS_POINTERS_BOOL)
// TODO(v8:11421): Enable Sparkplug for these architectures.
#define ENABLE_SPARKPLUG false
#else
@@ -568,7 +569,8 @@ DEFINE_BOOL(assert_types, false,
// TODO(tebbi): Support allocating types from background thread.
DEFINE_NEG_IMPLICATION(assert_types, concurrent_recompilation)
-DEFINE_BOOL(verify_simplified_lowering, false,
+// Enable verification of SimplifiedLowering in debug builds.
+DEFINE_BOOL(verify_simplified_lowering, DEBUG_BOOL,
"verify graph generated by simplified lowering")
DEFINE_BOOL(trace_compilation_dependencies, false, "trace code dependencies")
@@ -847,8 +849,6 @@ DEFINE_BOOL(turbo_stats_nvp, false,
DEFINE_BOOL(turbo_stats_wasm, false,
"print TurboFan statistics of wasm compilations")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
-DEFINE_BOOL(function_context_specialization, false,
- "enable function context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
DEFINE_INT(max_inlined_bytecode_size, 460,
"maximum size of bytecode for a single inlining")
@@ -880,6 +880,8 @@ DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(turbo_inline_array_builtins, true,
"inline array builtins in TurboFan code")
DEFINE_BOOL(use_osr, true, "use on-stack replacement")
+DEFINE_BOOL(concurrent_osr, false, "enable concurrent OSR")
+DEFINE_WEAK_IMPLICATION(future, concurrent_osr)
DEFINE_BOOL(trace_osr, false, "trace on-stack replacement")
DEFINE_BOOL(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
@@ -982,10 +984,9 @@ DEFINE_UINT(wasm_max_code_space, v8::internal::kMaxWasmCodeMB,
DEFINE_BOOL(wasm_tier_up, true,
"enable tier up to the optimizing compiler (requires --liftoff to "
"have an effect)")
-DEFINE_BOOL(wasm_dynamic_tiering, false,
+DEFINE_BOOL(wasm_dynamic_tiering, true,
"enable dynamic tier up to the optimizing compiler")
DEFINE_NEG_NEG_IMPLICATION(liftoff, wasm_dynamic_tiering)
-DEFINE_WEAK_IMPLICATION(future, wasm_dynamic_tiering)
DEFINE_INT(wasm_tiering_budget, 1800000,
"budget for dynamic tiering (rough approximation of bytes executed")
DEFINE_INT(
@@ -1008,6 +1009,7 @@ DEFINE_BOOL(liftoff_only, false,
"disallow TurboFan compilation for WebAssembly (for testing)")
DEFINE_IMPLICATION(liftoff_only, liftoff)
DEFINE_NEG_IMPLICATION(liftoff_only, wasm_tier_up)
+DEFINE_NEG_IMPLICATION(liftoff_only, wasm_dynamic_tiering)
DEFINE_NEG_IMPLICATION(fuzzing, liftoff_only)
DEFINE_DEBUG_BOOL(
enable_testing_opcode_in_wasm, false,
@@ -1093,10 +1095,13 @@ DEFINE_BOOL(wasm_speculative_inlining, false,
DEFINE_BOOL(trace_wasm_inlining, false, "trace wasm inlining")
DEFINE_BOOL(trace_wasm_speculative_inlining, false,
"trace wasm speculative inlining")
-DEFINE_IMPLICATION(wasm_speculative_inlining, experimental_wasm_typed_funcref)
+DEFINE_BOOL(wasm_type_canonicalization, false,
+ "apply isorecursive canonicalization on wasm types")
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_dynamic_tiering)
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_inlining)
DEFINE_WEAK_IMPLICATION(experimental_wasm_gc, wasm_speculative_inlining)
+DEFINE_WEAK_IMPLICATION(experimental_wasm_typed_funcref,
+ wasm_type_canonicalization)
// Speculative inlining needs type feedback from Liftoff and compilation in
// Turbofan.
DEFINE_NEG_NEG_IMPLICATION(liftoff, wasm_speculative_inlining)
@@ -1179,6 +1184,8 @@ DEFINE_BOOL(huge_max_old_generation_size, true,
"Increase max size of the old space to 4 GB for x64 systems with"
"the physical memory bigger than 16 GB")
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
+DEFINE_BOOL(separate_gc_phases, false,
+ "yound and full garbage collection phases are not overlapping")
DEFINE_BOOL(global_gc_scheduling, true,
"enable GC scheduling based on global memory")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
@@ -1667,7 +1674,7 @@ DEFINE_INT(max_valid_polymorphic_map_count, 4,
DEFINE_BOOL(native_code_counters, DEBUG_BOOL,
"generate extra code for manipulating stats counters")
-DEFINE_BOOL(super_ic, false, "use an IC for super property loads")
+DEFINE_BOOL(super_ic, true, "use an IC for super property loads")
DEFINE_BOOL(enable_mega_dom_ic, false, "use MegaDOM IC state for API objects")
@@ -2003,14 +2010,11 @@ DEFINE_BOOL(log, false,
"Minimal logging (no API, code, GC, suspect, or handles samples).")
DEFINE_BOOL(log_all, false, "Log all events to the log file.")
-DEFINE_BOOL(log_api, false, "Log API events to the log file.")
DEFINE_BOOL(log_code, false,
"Log code events to the log file without profiling.")
DEFINE_BOOL(log_code_disassemble, false,
"Log all disassembled code to the log file.")
DEFINE_IMPLICATION(log_code_disassemble, log_code)
-DEFINE_BOOL(log_handles, false, "Log global handle events.")
-DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
DEFINE_BOOL(log_source_code, false, "Log source code.")
DEFINE_BOOL(log_function_events, false,
"Log function events "
@@ -2125,7 +2129,8 @@ DEFINE_BOOL(interpreted_frames_native_stack, false,
DEFINE_BOOL(enable_system_instrumentation, false,
"Enable platform-specific profiling.")
-
+// Don't move code objects.
+DEFINE_NEG_IMPLICATION(enable_system_instrumentation, compact_code_space)
#ifndef V8_TARGET_ARCH_ARM
DEFINE_IMPLICATION(enable_system_instrumentation,
interpreted_frames_native_stack)
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 41e640c9f7..38113c008c 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -5,14 +5,18 @@
#include "src/handles/global-handles.h"
#include <algorithm>
+#include <atomic>
+#include <climits>
#include <cstdint>
#include <map>
#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
+#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
#include "src/base/sanitizer/asan.h"
#include "src/common/allow-deprecated.h"
+#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/base/stack.h"
#include "src/heap/embedder-tracing.h"
@@ -69,7 +73,38 @@ class GlobalHandles::NodeBlock final {
NodeBlock* next() const { return next_; }
NodeBlock* next_used() const { return next_used_; }
+ void set_markbit(size_t index) {
+ const auto [cell, bit] = CellAndBit(index);
+ reinterpret_cast<std::atomic<CellType>&>(mark_bits_[cell])
+ .fetch_or(CellType{1} << bit, std::memory_order_relaxed);
+ }
+
+ void clear_markbit(size_t index) {
+ const auto [cell, bit] = CellAndBit(index);
+ mark_bits_[cell] &= ~(CellType{1} << bit);
+ }
+
+ bool markbit(size_t index) const {
+ const auto [cell, bit] = CellAndBit(index);
+ return mark_bits_[cell] & CellType{1} << bit;
+ }
+
private:
+ using CellType = uint32_t;
+
+ std::tuple<CellType, CellType> CellAndBit(size_t index) const {
+ static constexpr CellType kMarkBitCellSizeLog2 = 5;
+ static_assert(base::bits::IsPowerOfTwo(kBlockSize),
+ "Block size must be power of two.");
+ static_assert(
+ sizeof(CellType) * CHAR_BIT == (CellType{1} << kMarkBitCellSizeLog2),
+ "Markbit CellType not matching defined log2 size.");
+ static constexpr CellType kCellMask =
+ (CellType{1} << kMarkBitCellSizeLog2) - 1;
+ return {static_cast<CellType>(index >> kMarkBitCellSizeLog2),
+ index & kCellMask};
+ }
+
NodeType nodes_[kBlockSize];
NodeBlock* const next_;
GlobalHandles* const global_handles_;
@@ -77,6 +112,7 @@ class GlobalHandles::NodeBlock final {
NodeBlock* next_used_ = nullptr;
NodeBlock* prev_used_ = nullptr;
uint32_t used_nodes_ = 0;
+ CellType mark_bits_[kBlockSize / (sizeof(CellType) * CHAR_BIT)] = {0};
};
template <class NodeType>
@@ -298,7 +334,8 @@ class NodeBase {
void Acquire(Object object) {
DCHECK(!AsChild()->IsInUse());
CheckFieldsAreCleared();
- object_ = object.ptr();
+ reinterpret_cast<std::atomic<Address>*>(&object_)->store(
+ object.ptr(), std::memory_order_relaxed);
AsChild()->MarkAsUsed();
data_.parameter = nullptr;
DCHECK(AsChild()->IsInUse());
@@ -419,12 +456,6 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
Node(const Node&) = delete;
Node& operator=(const Node&) = delete;
- void Zap() {
- DCHECK(IsInUse());
- // Zap the values for eager trapping.
- object_ = kGlobalHandleZapValue;
- }
-
const char* label() const {
return state() == NORMAL ? reinterpret_cast<char*>(data_.parameter)
: nullptr;
@@ -655,16 +686,29 @@ class GlobalHandles::TracedNode final
bool is_root() const { return IsRoot::decode(flags_); }
void set_root(bool v) { flags_ = IsRoot::update(flags_, v); }
- bool markbit() const { return Markbit::decode(flags_); }
- void clear_markbit() { flags_ = Markbit::update(flags_, false); }
- void set_markbit() { flags_ = Markbit::update(flags_, true); }
+ void set_markbit() {
+ NodeBlock<TracedNode>::From(this)->set_markbit(index());
+ }
+
+ bool markbit() const {
+ return NodeBlock<TracedNode>::From(this)->markbit(index());
+ }
+ void clear_markbit() {
+ NodeBlock<TracedNode>::From(this)->clear_markbit(index());
+ }
bool is_on_stack() const { return IsOnStack::decode(flags_); }
void set_is_on_stack(bool v) { flags_ = IsOnStack::update(flags_, v); }
- void clear_object() { object_ = kNullAddress; }
+ void clear_object() {
+ reinterpret_cast<std::atomic<Address>*>(&object_)->store(
+ kNullAddress, std::memory_order_relaxed);
+ }
- void CopyObjectReference(const TracedNode& other) { object_ = other.object_; }
+ void CopyObjectReference(const TracedNode& other) {
+ reinterpret_cast<std::atomic<Address>*>(&object_)->store(
+ other.object_, std::memory_order_relaxed);
+ }
void ResetPhantomHandle() {
DCHECK(IsInUse());
@@ -675,23 +719,18 @@ class GlobalHandles::TracedNode final
static void Verify(GlobalHandles* global_handles, const Address* const* slot);
protected:
+ // Various state is managed in a bit field. Mark bits are used concurrently
+ // and held externally in a NodeBlock.
using NodeState = base::BitField8<State, 0, 2>;
using IsInYoungList = NodeState::Next<bool, 1>;
using IsRoot = IsInYoungList::Next<bool, 1>;
- using Markbit = IsRoot::Next<bool, 1>;
- using IsOnStack = Markbit::Next<bool, 1>;
-
+ using IsOnStack = IsRoot::Next<bool, 1>;
void ClearImplFields() {
set_root(true);
- // Nodes are black allocated for simplicity.
- set_markbit();
set_is_on_stack(false);
}
- void CheckImplFieldsAreCleared() const {
- DCHECK(is_root());
- DCHECK(markbit());
- }
+ void CheckImplFieldsAreCleared() const { DCHECK(is_root()); }
friend class NodeBase<GlobalHandles::TracedNode>;
};
@@ -938,6 +977,8 @@ Handle<Object> GlobalHandles::CreateTraced(Object value, Address* slot,
traced_young_nodes_.push_back(result);
result->set_in_young_list(true);
}
+ // Nodes are black allocated for simplicity.
+ result->set_markbit();
if (store_mode != GlobalHandleStoreMode::kInitializingStore) {
WriteBarrier::MarkingFromGlobalHandle(value);
}
@@ -1042,7 +1083,7 @@ void GlobalHandles::MoveTracedReference(Address** from, Address** to) {
GlobalHandleStoreMode::kAssigningStore, to_on_stack);
SetSlotThreadSafe(to, o.location());
to_node = TracedNode::FromLocation(*to);
- DCHECK(to_node->markbit());
+ DCHECK_IMPLIES(!to_node->is_on_stack(), to_node->markbit());
} else {
DCHECK(to_node->IsInUse());
to_node->CopyObjectReference(*from_node);
@@ -1081,8 +1122,9 @@ GlobalHandles* GlobalHandles::From(const TracedNode* node) {
void GlobalHandles::MarkTraced(Address* location) {
TracedNode* node = TracedNode::FromLocation(location);
- node->set_markbit();
DCHECK(node->IsInUse());
+ if (node->is_on_stack()) return;
+ node->set_markbit();
}
void GlobalHandles::Destroy(Address* location) {
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 8fdf858c50..940da8eb95 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -10,12 +10,15 @@
#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
#include "src/handles/maybe-handles.h"
-#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/objects-inl.h"
#include "src/roots/roots-inl.h"
#include "src/utils/address-map.h"
#include "src/utils/identity-map.h"
+#ifdef V8_ENABLE_MAGLEV
+#include "src/maglev/maglev-concurrent-dispatcher.h"
+#endif // V8_ENABLE_MAGLEV
+
#ifdef DEBUG
// For GetIsolateFromWritableHeapObject.
#include "src/heap/heap-write-barrier-inl.h"
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index cdab2a9aab..21c22b5440 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -7,8 +7,10 @@
#include <atomic>
#include <memory>
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
#include "src/objects/js-array-buffer.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/task-utils.h"
diff --git a/deps/v8/src/heap/barrier.h b/deps/v8/src/heap/barrier.h
deleted file mode 100644
index a5a4b51263..0000000000
--- a/deps/v8/src/heap/barrier.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_BARRIER_H_
-#define V8_HEAP_BARRIER_H_
-
-#include "src/base/platform/condition-variable.h"
-#include "src/base/platform/mutex.h"
-#include "src/base/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-// Barrier that can be used once to synchronize a dynamic number of tasks
-// working concurrently.
-//
-// The barrier takes a timeout which is used to avoid waiting for too long. If
-// any of the users ever reach the timeout they will disable the barrier and
-// signal others to fall through.
-//
-// Usage:
-// void RunConcurrently(OneShotBarrier* shared_barrier) {
-// shared_barrier->Start();
-// do {
-// {
-// /* process work and create new work */
-// barrier->NotifyAll();
-// /* process work and create new work */
-// }
-// } while(!shared_barrier->Wait());
-// }
-//
-// Note: If Start() is not called in time, e.g., because the first concurrent
-// task is already done processing all work, then Done() will return true
-// immediately.
-class OneshotBarrier {
- public:
- explicit OneshotBarrier(base::TimeDelta timeout) : timeout_(timeout) {}
-
- void Start() {
- base::MutexGuard guard(&mutex_);
- tasks_++;
- }
-
- void NotifyAll() {
- base::MutexGuard guard(&mutex_);
- if (waiting_ > 0) condition_.NotifyAll();
- }
-
- bool Wait() {
- base::MutexGuard guard(&mutex_);
- if (done_) return true;
-
- DCHECK_LE(waiting_, tasks_);
- waiting_++;
- if (waiting_ == tasks_) {
- done_ = true;
- condition_.NotifyAll();
- } else {
- // Spurious wakeup is ok here.
- if (!condition_.WaitFor(&mutex_, timeout_)) {
- // If predefined timeout was reached, Stop waiting and signal being done
- // also to other tasks.
- done_ = true;
- }
- }
- waiting_--;
- return done_;
- }
-
- // Only valid to be called in a sequential setting.
- bool DoneForTesting() const { return done_; }
-
- private:
- base::ConditionVariable condition_;
- base::Mutex mutex_;
- base::TimeDelta timeout_;
- int tasks_ = 0;
- int waiting_ = 0;
- bool done_ = false;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_BARRIER_H_
diff --git a/deps/v8/src/heap/base-space.h b/deps/v8/src/heap/base-space.h
index f9ce4fe9b7..a992d75d5c 100644
--- a/deps/v8/src/heap/base-space.h
+++ b/deps/v8/src/heap/base-space.h
@@ -29,12 +29,12 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
return heap_;
}
- AllocationSpace identity() { return id_; }
+ AllocationSpace identity() const { return id_; }
// Returns name of the space.
static const char* GetSpaceName(AllocationSpace space);
- const char* name() { return GetSpaceName(id_); }
+ const char* name() const { return GetSpaceName(id_); }
void AccountCommitted(size_t bytes) {
DCHECK_GE(committed_ + bytes, committed_);
@@ -51,15 +51,15 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
- virtual size_t CommittedMemory() { return committed_; }
+ virtual size_t CommittedMemory() const { return committed_; }
- virtual size_t MaximumCommittedMemory() { return max_committed_; }
+ virtual size_t MaximumCommittedMemory() const { return max_committed_; }
// Approximate amount of physical memory committed for this space.
- virtual size_t CommittedPhysicalMemory() = 0;
+ virtual size_t CommittedPhysicalMemory() const = 0;
// Returns allocated size.
- virtual size_t Size() = 0;
+ virtual size_t Size() const = 0;
protected:
BaseSpace(Heap* heap, AllocationSpace id)
diff --git a/deps/v8/src/heap/basic-memory-chunk.cc b/deps/v8/src/heap/basic-memory-chunk.cc
index c62fa7f49d..a456d98cd7 100644
--- a/deps/v8/src/heap/basic-memory-chunk.cc
+++ b/deps/v8/src/heap/basic-memory-chunk.cc
@@ -9,6 +9,7 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/objects/heap-object.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -49,32 +50,19 @@ constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask;
constexpr BasicMemoryChunk::MainThreadFlags
BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask;
-BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
- Address area_end) {
- size_ = size;
- area_start_ = area_start;
- area_end_ = area_end;
-}
-
-// static
-BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
- size_t size, Address area_start,
- Address area_end,
- BaseSpace* owner,
- VirtualMemory reservation) {
- BasicMemoryChunk* chunk = FromAddress(base);
- DCHECK_EQ(base, chunk->address());
- new (chunk) BasicMemoryChunk(size, area_start, area_end);
-
- chunk->heap_ = heap;
- chunk->set_owner(owner);
- chunk->reservation_ = std::move(reservation);
- chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
- chunk->allocated_bytes_ = chunk->area_size();
- chunk->wasted_memory_ = 0;
- chunk->marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
-
- return chunk;
+BasicMemoryChunk::BasicMemoryChunk(Heap* heap, BaseSpace* space,
+ size_t chunk_size, Address area_start,
+ Address area_end, VirtualMemory reservation)
+ : size_(chunk_size),
+ heap_(heap),
+ area_start_(area_start),
+ area_end_(area_end),
+ allocated_bytes_(area_end - area_start),
+ wasted_memory_(0),
+ high_water_mark_(area_start - reinterpret_cast<Address>(this)),
+ owner_(space),
+ reservation_(std::move(reservation)) {
+ marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
}
bool BasicMemoryChunk::InOldSpace() const {
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index 98a7109f97..66477e89b1 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -129,14 +129,18 @@ class BasicMemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
- BasicMemoryChunk(size_t size, Address area_start, Address area_end);
+ BasicMemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
+ Address area_start, Address area_end,
+ VirtualMemory reservation);
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
Address address() const { return reinterpret_cast<Address>(this); }
// Returns the offset of a given address to this page.
- inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
+ inline size_t Offset(Address a) const {
+ return static_cast<size_t>(a - address());
+ }
// Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses.
@@ -178,7 +182,7 @@ class BasicMemoryChunk {
void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
// Set or clear multiple flags at a time. `mask` indicates which flags are
// should be replaced with new `flags`.
- void SetFlags(MainThreadFlags flags, MainThreadFlags mask) {
+ void SetFlags(MainThreadFlags flags, MainThreadFlags mask = kAllFlagsMask) {
main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
}
@@ -198,11 +202,11 @@ class BasicMemoryChunk {
return IsFlagSet(READ_ONLY_HEAP);
}
- bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
+ bool NeverEvacuate() const { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
- bool CanAllocate() {
+ bool CanAllocate() const {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
@@ -217,7 +221,7 @@ class BasicMemoryChunk {
((flags & COMPACTION_WAS_ABORTED) == 0);
}
- Executability executable() {
+ Executability executable() const {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
@@ -254,11 +258,6 @@ class BasicMemoryChunk {
return addr >= area_start() && addr <= area_end();
}
- static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- BaseSpace* owner,
- VirtualMemory reservation);
-
size_t wasted_memory() const { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() const { return allocated_bytes_; }
@@ -291,7 +290,7 @@ class BasicMemoryChunk {
Bitmap::FromAddress(address() + kMarkingBitmapOffset));
}
- Address HighWaterMark() { return address() + high_water_mark_; }
+ Address HighWaterMark() const { return address() + high_water_mark_; }
static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return;
diff --git a/deps/v8/src/heap/code-range.cc b/deps/v8/src/heap/code-range.cc
index 08b3c15148..bdb09771c3 100644
--- a/deps/v8/src/heap/code-range.cc
+++ b/deps/v8/src/heap/code-range.cc
@@ -191,6 +191,7 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
}
const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
+ const size_t kCommitPageSize = page_allocator()->CommitPageSize();
size_t allocate_code_size =
RoundUp(embedded_blob_code_size, kAllocatePageSize);
@@ -207,8 +208,31 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
isolate, "Can't allocate space for re-embedded builtins");
}
- size_t code_size =
- RoundUp(embedded_blob_code_size, page_allocator()->CommitPageSize());
+ size_t code_size = RoundUp(embedded_blob_code_size, kCommitPageSize);
+ if constexpr (base::OS::IsRemapPageSupported()) {
+ // By default, the embedded builtins are not remapped, but copied. This
+ // costs memory, since builtins become private dirty anonymous memory,
+ // rather than shared, clean, file-backed memory for the embedded version.
+ // If the OS supports it, we can remap the builtins *on top* of the space
+ // allocated in the code range, making the "copy" shared, clean, file-backed
+ // memory, and thus saving sizeof(builtins).
+ //
+ // Builtins should start at a page boundary, see
+ // platform-embedded-file-writer-mac.cc. If it's not the case (e.g. if the
+ // embedded builtins are not coming from the binary), fall back to copying.
+ if (IsAligned(reinterpret_cast<uintptr_t>(embedded_blob_code),
+ kCommitPageSize)) {
+ bool ok = base::OS::RemapPages(embedded_blob_code, code_size,
+ embedded_blob_code_copy,
+ base::OS::MemoryPermission::kReadExecute);
+
+ if (ok) {
+ embedded_blob_code_copy_.store(embedded_blob_code_copy,
+ std::memory_order_release);
+ return embedded_blob_code_copy;
+ }
+ }
+ }
if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
PageAllocator::kReadWrite)) {
diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc
index 3cf8f41c43..9486c234b0 100644
--- a/deps/v8/src/heap/collection-barrier.cc
+++ b/deps/v8/src/heap/collection-barrier.cc
@@ -9,7 +9,6 @@
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
-#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 1863eb5a22..7629e572ec 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -10,6 +10,7 @@
#include "include/v8config.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 2a8feffc59..d893a0d39d 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -75,7 +75,9 @@ class V8ToCppGCReferencesVisitor final
const internal::JSObject js_object =
*reinterpret_cast<const internal::JSObject* const&>(value);
- if (!js_object.ptr() || !js_object.MayHaveEmbedderFields()) return;
+ if (!js_object.ptr() || js_object.IsSmi() ||
+ !js_object.MayHaveEmbedderFields())
+ return;
internal::LocalEmbedderHeapTracer::WrapperInfo info;
if (!internal::LocalEmbedderHeapTracer::ExtractWrappableInfo(
@@ -143,15 +145,13 @@ void CppHeap::EnableDetachedGarbageCollectionsForTesting() {
void CppHeap::CollectGarbageForTesting(cppgc::EmbedderStackState stack_state) {
return internal::CppHeap::From(this)->CollectGarbageForTesting(
- cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
- stack_state);
+ internal::CppHeap::CollectionType::kMajor, stack_state);
}
void CppHeap::CollectGarbageInYoungGenerationForTesting(
cppgc::EmbedderStackState stack_state) {
return internal::CppHeap::From(this)->CollectGarbageForTesting(
- cppgc::internal::GarbageCollector::Config::CollectionType::kMinor,
- stack_state);
+ internal::CppHeap::CollectionType::kMinor, stack_state);
}
namespace internal {
@@ -204,27 +204,27 @@ class UnifiedHeapConcurrentMarker
: public cppgc::internal::ConcurrentMarkerBase {
public:
UnifiedHeapConcurrentMarker(
- cppgc::internal::HeapBase& heap,
+ cppgc::internal::HeapBase& heap, Heap* v8_heap,
cppgc::internal::MarkingWorklists& marking_worklists,
cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform,
UnifiedHeapMarkingState& unified_heap_marking_state)
: cppgc::internal::ConcurrentMarkerBase(
heap, marking_worklists, incremental_marking_schedule, platform),
- unified_heap_marking_state_(unified_heap_marking_state) {}
+ v8_heap_(v8_heap) {}
std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor(
cppgc::internal::ConcurrentMarkingState&) const final;
private:
- UnifiedHeapMarkingState& unified_heap_marking_state_;
+ Heap* const v8_heap_;
};
std::unique_ptr<cppgc::Visitor>
UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
cppgc::internal::ConcurrentMarkingState& marking_state) const {
- return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
- heap(), marking_state, unified_heap_marking_state_);
+ return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(heap(), v8_heap_,
+ marking_state);
}
void FatalOutOfMemoryHandlerImpl(const std::string& reason,
@@ -253,6 +253,10 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
marking_visitor_->marking_state_);
}
+ UnifiedHeapMarkingState& GetMutatorUnifiedHeapMarkingState() {
+ return mutator_unified_heap_marking_state_;
+ }
+
protected:
cppgc::Visitor& visitor() final { return *marking_visitor_; }
cppgc::internal::ConservativeTracingVisitor& conservative_visitor() final {
@@ -263,7 +267,7 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
}
private:
- UnifiedHeapMarkingState unified_heap_marking_state_;
+ UnifiedHeapMarkingState mutator_unified_heap_marking_state_;
std::unique_ptr<MutatorUnifiedHeapMarkingVisitor> marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
@@ -273,19 +277,19 @@ UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap,
cppgc::Platform* platform,
MarkingConfig config)
: cppgc::internal::MarkerBase(heap, platform, config),
- unified_heap_marking_state_(v8_heap),
- marking_visitor_(
- config.collection_type == cppgc::internal::GarbageCollector::Config::
- CollectionType::kMajor
- ? std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
- heap, mutator_marking_state_, unified_heap_marking_state_)
- : std::make_unique<MutatorMinorGCMarkingVisitor>(
- heap, mutator_marking_state_, unified_heap_marking_state_)),
+ mutator_unified_heap_marking_state_(v8_heap, nullptr),
+ marking_visitor_(config.collection_type == CppHeap::CollectionType::kMajor
+ ? std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
+ heap, mutator_marking_state_,
+ mutator_unified_heap_marking_state_)
+ : std::make_unique<MutatorMinorGCMarkingVisitor>(
+ heap, mutator_marking_state_,
+ mutator_unified_heap_marking_state_)),
conservative_marking_visitor_(heap, mutator_marking_state_,
*marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
- heap_, marking_worklists_, schedule_, platform_,
- unified_heap_marking_state_);
+ heap_, v8_heap, marking_worklists_, schedule_, platform_,
+ mutator_unified_heap_marking_state_);
}
void UnifiedHeapMarker::AddObject(void* object) {
@@ -294,10 +298,17 @@ void UnifiedHeapMarker::AddObject(void* object) {
}
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
- const FullCycle& cppgc_event) {
- DCHECK(!last_full_gc_event_.has_value());
- last_full_gc_event_ = cppgc_event;
- GetIsolate()->heap()->tracer()->NotifyCppGCCompleted();
+ const GCCycle& cppgc_event) {
+ auto* tracer = GetIsolate()->heap()->tracer();
+ if (cppgc_event.type == MetricRecorder::GCCycle::Type::kMinor) {
+ DCHECK(!last_young_gc_event_);
+ last_young_gc_event_ = cppgc_event;
+ tracer->NotifyYoungCppGCCompleted();
+ } else {
+ DCHECK(!last_full_gc_event_);
+ last_full_gc_event_ = cppgc_event;
+ tracer->NotifyFullCppGCCompleted();
+ }
}
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
@@ -317,7 +328,6 @@ void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
incremental_mark_batched_events_.events.emplace_back();
incremental_mark_batched_events_.events.back().cpp_wall_clock_duration_in_us =
cppgc_event.duration_us;
- // TODO(chromium:1154636): Populate event.wall_clock_duration_in_us.
if (incremental_mark_batched_events_.events.size() == kMaxBatchedEvents) {
recorder->AddMainThreadEvent(std::move(incremental_mark_batched_events_),
GetContextId());
@@ -336,7 +346,6 @@ void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
incremental_sweep_batched_events_.events.emplace_back();
incremental_sweep_batched_events_.events.back()
.cpp_wall_clock_duration_in_us = cppgc_event.duration_us;
- // TODO(chromium:1154636): Populate event.wall_clock_duration_in_us.
if (incremental_sweep_batched_events_.events.size() == kMaxBatchedEvents) {
recorder->AddMainThreadEvent(std::move(incremental_sweep_batched_events_),
GetContextId());
@@ -360,17 +369,28 @@ void CppHeap::MetricRecorderAdapter::FlushBatchedIncrementalEvents() {
}
}
-bool CppHeap::MetricRecorderAdapter::MetricsReportPending() const {
+bool CppHeap::MetricRecorderAdapter::FullGCMetricsReportPending() const {
return last_full_gc_event_.has_value();
}
-const base::Optional<cppgc::internal::MetricRecorder::FullCycle>
+bool CppHeap::MetricRecorderAdapter::YoungGCMetricsReportPending() const {
+ return last_young_gc_event_.has_value();
+}
+
+const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
CppHeap::MetricRecorderAdapter::ExtractLastFullGcEvent() {
auto res = std::move(last_full_gc_event_);
last_full_gc_event_.reset();
return res;
}
+const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
+CppHeap::MetricRecorderAdapter::ExtractLastYoungGcEvent() {
+ auto res = std::move(last_young_gc_event_);
+ last_young_gc_event_.reset();
+ return res;
+}
+
const base::Optional<cppgc::internal::MetricRecorder::MainThreadIncrementalMark>
CppHeap::MetricRecorderAdapter::ExtractLastIncrementalMarkEvent() {
auto res = std::move(last_incremental_mark_event_);
@@ -383,6 +403,7 @@ void CppHeap::MetricRecorderAdapter::ClearCachedEvents() {
incremental_sweep_batched_events_.events.clear();
last_incremental_mark_event_.reset();
last_full_gc_event_.reset();
+ last_young_gc_event_.reset();
}
Isolate* CppHeap::MetricRecorderAdapter::GetIsolate() const {
@@ -493,6 +514,9 @@ bool ShouldReduceMemory(CppHeap::GarbageCollectionFlags flags) {
} // namespace
CppHeap::MarkingType CppHeap::SelectMarkingType() const {
+ // For now, force atomic marking for minor collections.
+ if (*collection_type_ == CollectionType::kMinor) return MarkingType::kAtomic;
+
if (IsForceGC(current_gc_flags_) && !force_incremental_marking_for_testing_)
return MarkingType::kAtomic;
@@ -505,29 +529,31 @@ CppHeap::SweepingType CppHeap::SelectSweepingType() const {
return sweeping_support();
}
-void CppHeap::InitializeTracing(
- cppgc::internal::GarbageCollector::Config::CollectionType collection_type,
- GarbageCollectionFlags gc_flags) {
+void CppHeap::InitializeTracing(CollectionType collection_type,
+ GarbageCollectionFlags gc_flags) {
CHECK(!sweeper_.IsSweepingInProgress());
- // Check that previous cycle metrics have been reported.
- DCHECK_IMPLIES(GetMetricRecorder(),
- !GetMetricRecorder()->MetricsReportPending());
+ // Check that previous cycle metrics for the same collection type have been
+ // reported.
+ if (GetMetricRecorder()) {
+ if (collection_type == CollectionType::kMajor)
+ DCHECK(!GetMetricRecorder()->FullGCMetricsReportPending());
+ else
+ DCHECK(!GetMetricRecorder()->YoungGCMetricsReportPending());
+ }
DCHECK(!collection_type_);
collection_type_ = collection_type;
#if defined(CPPGC_YOUNG_GENERATION)
- if (*collection_type_ ==
- cppgc::internal::GarbageCollector::Config::CollectionType::kMajor)
+ if (*collection_type_ == CollectionType::kMajor)
cppgc::internal::SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
current_gc_flags_ = gc_flags;
const UnifiedHeapMarker::MarkingConfig marking_config{
- *collection_type_, cppgc::Heap::StackState::kNoHeapPointers,
- SelectMarkingType(),
+ *collection_type_, StackState::kNoHeapPointers, SelectMarkingType(),
IsForceGC(current_gc_flags_)
? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
: UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
@@ -546,6 +572,16 @@ void CppHeap::InitializeTracing(
}
void CppHeap::StartTracing() {
+ if (isolate_) {
+ // Reuse the same local worklist for the mutator marking state which results
+ // in directly processing the objects by the JS logic. Also avoids
+ // publishing local objects.
+ static_cast<UnifiedHeapMarker*>(marker_.get())
+ ->GetMutatorUnifiedHeapMarkingState()
+ .Update(isolate_->heap()
+ ->mark_compact_collector()
+ ->local_marking_worklists());
+ }
marker_->StartMarking();
marking_done_ = false;
}
@@ -576,9 +612,7 @@ void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
in_atomic_pause_ = true;
marker_->EnterAtomicPause(stack_state);
- if (isolate_ &&
- *collection_type_ ==
- cppgc::internal::GarbageCollector::Config::CollectionType::kMinor) {
+ if (isolate_ && *collection_type_ == CollectionType::kMinor) {
// Visit V8 -> cppgc references.
TraceV8ToCppGCReferences(isolate_,
static_cast<UnifiedHeapMarker*>(marker_.get())
@@ -588,6 +622,10 @@ void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
compactor_.CancelIfShouldNotCompact(MarkingType::kAtomic, stack_state);
}
+bool CppHeap::FinishConcurrentMarkingIfNeeded() {
+ return marker_->JoinConcurrentMarkingIfNeeded();
+}
+
void CppHeap::TraceEpilogue() {
CHECK(in_atomic_pause_);
CHECK(marking_done_);
@@ -609,9 +647,9 @@ void CppHeap::TraceEpilogue() {
const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
#if CPPGC_VERIFY_HEAP
UnifiedHeapMarkingVerifier verifier(*this, *collection_type_);
- verifier.Run(
- stack_state_of_prev_gc(), stack_end_of_current_gc(),
- stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+ verifier.Run(stack_state_of_prev_gc(), stack_end_of_current_gc(),
+ stats_collector()->marked_bytes_on_current_cycle() +
+ bytes_allocated_in_prefinalizers);
#endif // CPPGC_VERIFY_HEAP
USE(bytes_allocated_in_prefinalizers);
@@ -639,26 +677,32 @@ void CppHeap::TraceEpilogue() {
sweeper().NotifyDoneIfNeeded();
}
-void CppHeap::RunMinorGC() {
-#if defined(CPPGC_YOUNG_GENERATION)
+void CppHeap::RunMinorGC(StackState stack_state) {
+ DCHECK(!sweeper_.IsSweepingInProgress());
+
if (in_no_gc_scope()) return;
// Minor GC does not support nesting in full GCs.
if (IsMarking()) return;
- // Finish sweeping in case it is still running.
- sweeper().FinishIfRunning();
+ // Minor GCs with the stack are currently not supported.
+ if (stack_state == StackState::kMayContainHeapPointers) return;
+
+ // Notify GC tracer that CppGC started young GC cycle.
+ isolate_->heap()->tracer()->NotifyYoungCppGCRunning();
SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
// Perform an atomic GC, with starting incremental/concurrent marking and
// immediately finalizing the garbage collection.
- InitializeTracing(
- cppgc::internal::GarbageCollector::Config::CollectionType::kMinor,
- GarbageCollectionFlagValues::kForced);
+ InitializeTracing(CollectionType::kMinor,
+ GarbageCollectionFlagValues::kNoFlags);
StartTracing();
+ // TODO(chromium:1029379): Should be safe to run without stack.
EnterFinalPause(cppgc::EmbedderStackState::kMayContainHeapPointers);
- AdvanceTracing(std::numeric_limits<double>::infinity());
+ CHECK(AdvanceTracing(std::numeric_limits<double>::infinity()));
+ if (FinishConcurrentMarkingIfNeeded()) {
+ CHECK(AdvanceTracing(std::numeric_limits<double>::infinity()));
+ }
TraceEpilogue();
-#endif // defined(CPPGC_YOUNG_GENERATION)
}
void CppHeap::AllocatedObjectSizeIncreased(size_t bytes) {
@@ -695,9 +739,8 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
}
}
-void CppHeap::CollectGarbageForTesting(
- cppgc::internal::GarbageCollector::Config::CollectionType collection_type,
- cppgc::internal::GarbageCollector::Config::StackState stack_state) {
+void CppHeap::CollectGarbageForTesting(CollectionType collection_type,
+ StackState stack_state) {
if (in_no_gc_scope()) return;
// Finish sweeping in case it is still running.
@@ -717,7 +760,10 @@ void CppHeap::CollectGarbageForTesting(
StartTracing();
}
EnterFinalPause(stack_state);
- AdvanceTracing(std::numeric_limits<double>::infinity());
+ CHECK(AdvanceTracing(std::numeric_limits<double>::infinity()));
+ if (FinishConcurrentMarkingIfNeeded()) {
+ CHECK(AdvanceTracing(std::numeric_limits<double>::infinity()));
+ }
TraceEpilogue();
}
}
@@ -736,9 +782,8 @@ void CppHeap::StartIncrementalGarbageCollectionForTesting() {
DCHECK_NULL(isolate_);
if (IsMarking()) return;
force_incremental_marking_for_testing_ = true;
- InitializeTracing(
- cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
- GarbageCollectionFlagValues::kForced);
+ InitializeTracing(CollectionType::kMajor,
+ GarbageCollectionFlagValues::kForced);
StartTracing();
force_incremental_marking_for_testing_ = false;
}
@@ -749,9 +794,7 @@ void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
DCHECK_NULL(isolate_);
DCHECK(IsMarking());
if (IsMarking()) {
- CollectGarbageForTesting(
- cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
- stack_state);
+ CollectGarbageForTesting(CollectionType::kMajor, stack_state);
}
sweeper_.FinishIfRunning();
}
@@ -859,5 +902,12 @@ CppHeap::CreateCppMarkingStateForMutatorThread() {
static_cast<UnifiedHeapMarker*>(marker())->GetMutatorMarkingState());
}
+CppHeap::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
+ CppHeap* cpp_heap) {
+ if (cpp_heap && cpp_heap->marker()) {
+ pause_scope_.emplace(*cpp_heap->marker());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 70958b2b6d..9be1e63d61 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -17,6 +17,7 @@ static_assert(
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/logging/metrics.h"
@@ -41,6 +42,9 @@ class V8_EXPORT_PRIVATE CppHeap final
};
using GarbageCollectionFlags = base::Flags<GarbageCollectionFlagValues>;
+ using StackState = cppgc::internal::GarbageCollector::Config::StackState;
+ using CollectionType =
+ cppgc::internal::GarbageCollector::Config::CollectionType;
class MetricRecorderAdapter final : public cppgc::internal::MetricRecorder {
public:
@@ -48,19 +52,22 @@ class V8_EXPORT_PRIVATE CppHeap final
explicit MetricRecorderAdapter(CppHeap& cpp_heap) : cpp_heap_(cpp_heap) {}
- void AddMainThreadEvent(const FullCycle& cppgc_event) final;
+ void AddMainThreadEvent(const GCCycle& cppgc_event) final;
void AddMainThreadEvent(const MainThreadIncrementalMark& cppgc_event) final;
void AddMainThreadEvent(
const MainThreadIncrementalSweep& cppgc_event) final;
void FlushBatchedIncrementalEvents();
- // The following 3 methods are only used for reporting nested cpp events
+ // The following methods are only used for reporting nested cpp events
// through V8. Standalone events are reported directly.
- bool MetricsReportPending() const;
+ bool FullGCMetricsReportPending() const;
+ bool YoungGCMetricsReportPending() const;
- const base::Optional<cppgc::internal::MetricRecorder::FullCycle>
+ const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
ExtractLastFullGcEvent();
+ const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
+ ExtractLastYoungGcEvent();
const base::Optional<
cppgc::internal::MetricRecorder::MainThreadIncrementalMark>
ExtractLastIncrementalMarkEvent();
@@ -77,12 +84,23 @@ class V8_EXPORT_PRIVATE CppHeap final
incremental_mark_batched_events_;
v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalSweep
incremental_sweep_batched_events_;
- base::Optional<cppgc::internal::MetricRecorder::FullCycle>
+ base::Optional<cppgc::internal::MetricRecorder::GCCycle>
last_full_gc_event_;
+ base::Optional<cppgc::internal::MetricRecorder::GCCycle>
+ last_young_gc_event_;
base::Optional<cppgc::internal::MetricRecorder::MainThreadIncrementalMark>
last_incremental_mark_event_;
};
+ class PauseConcurrentMarkingScope final {
+ public:
+ explicit PauseConcurrentMarkingScope(CppHeap*);
+
+ private:
+ base::Optional<cppgc::internal::MarkerBase::PauseConcurrentMarkingScope>
+ pause_scope_;
+ };
+
static CppHeap* From(v8::CppHeap* heap) {
return static_cast<CppHeap*>(heap);
}
@@ -109,9 +127,7 @@ class V8_EXPORT_PRIVATE CppHeap final
void EnableDetachedGarbageCollectionsForTesting();
- void CollectGarbageForTesting(
- cppgc::internal::GarbageCollector::Config::CollectionType,
- cppgc::internal::GarbageCollector::Config::StackState);
+ void CollectGarbageForTesting(CollectionType, StackState);
void CollectCustomSpaceStatisticsAtLastGC(
std::vector<cppgc::CustomSpaceIndex>,
@@ -128,8 +144,9 @@ class V8_EXPORT_PRIVATE CppHeap final
bool IsTracingDone();
void TraceEpilogue();
void EnterFinalPause(cppgc::EmbedderStackState stack_state);
+ bool FinishConcurrentMarkingIfNeeded();
- void RunMinorGC();
+ void RunMinorGC(StackState);
// StatsCollector::AllocationObserver interface.
void AllocatedObjectSizeIncreased(size_t) final;
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
new file mode 100644
index 0000000000..2dea2a9d1e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
@@ -0,0 +1,59 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
+#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
+
+#include "include/v8-traced-handle.h"
+#include "src/base/logging.h"
+#include "src/handles/global-handles.h"
+#include "src/heap/cppgc-js/unified-heap-marking-state.h"
+#include "src/heap/heap.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/marking-worklist-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class BasicTracedReferenceExtractor {
+ public:
+ static Object GetObjectForMarking(const TracedReferenceBase& ref) {
+ Address* global_handle_location = const_cast<Address*>(
+ reinterpret_cast<const Address*>(ref.GetSlotThreadSafe()));
+ // We cannot assume that the reference is non-null as we may get here by
+ // tracing an ephemeron which doesn't have early bailouts, see
+ // `cppgc::Visitor::TraceEphemeron()` for non-Member values.
+ if (!global_handle_location) return Object();
+
+ GlobalHandles::MarkTraced(global_handle_location);
+ return Object(
+ reinterpret_cast<std::atomic<Address>*>(global_handle_location)
+ ->load(std::memory_order_relaxed));
+ }
+};
+
+void UnifiedHeapMarkingState::MarkAndPush(
+ const TracedReferenceBase& reference) {
+ // The following code will crash with null pointer derefs when finding a
+ // non-empty `TracedReferenceBase` when `CppHeap` is in detached mode.
+
+ Object object = BasicTracedReferenceExtractor::GetObjectForMarking(reference);
+ if (!object.IsHeapObject()) {
+ // The embedder is not aware of whether numbers are materialized as heap
+ // objects are just passed around as Smis.
+ return;
+ }
+ HeapObject heap_object = HeapObject::cast(object);
+ if (marking_state_->WhiteToGrey(heap_object)) {
+ local_marking_worklist_->Push(heap_object);
+ }
+ if (V8_UNLIKELY(track_retaining_path_)) {
+ heap_->AddRetainingRoot(Root::kWrapperTracing, heap_object);
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
new file mode 100644
index 0000000000..06e6a30d46
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
@@ -0,0 +1,32 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/unified-heap-marking-state.h"
+
+#include "src/base/logging.h"
+#include "src/heap/mark-compact.h"
+
+namespace v8 {
+namespace internal {
+
+UnifiedHeapMarkingState::UnifiedHeapMarkingState(
+ Heap* heap, MarkingWorklists::Local* local_marking_worklist)
+ : heap_(heap),
+ marking_state_(heap_ ? heap_->mark_compact_collector()->marking_state()
+ : nullptr),
+ local_marking_worklist_(local_marking_worklist),
+ track_retaining_path_(FLAG_track_retaining_path) {
+ DCHECK_IMPLIES(FLAG_track_retaining_path,
+ !FLAG_concurrent_marking && !FLAG_parallel_marking);
+ DCHECK_IMPLIES(heap_, marking_state_);
+}
+
+void UnifiedHeapMarkingState::Update(
+ MarkingWorklists::Local* local_marking_worklist) {
+ local_marking_worklist_ = local_marking_worklist;
+ DCHECK_NOT_NULL(heap_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index 388fa94aab..51dd8fd752 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -6,43 +6,33 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
-#include "src/base/logging.h"
-#include "src/heap/heap.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/marking-worklist.h"
namespace v8 {
-
namespace internal {
-class BasicTracedReferenceExtractor {
- public:
- static Address* ObjectReference(const TracedReferenceBase& ref) {
- return reinterpret_cast<Address*>(ref.val_);
- }
-};
-
-class UnifiedHeapMarkingState {
+// `UnifiedHeapMarkingState` is used to handle `TracedReferenceBase` and
+// friends. It is used when `CppHeap` is attached but also detached. In detached
+// mode, the expectation is that no non-null `TracedReferenceBase` is found.
+class UnifiedHeapMarkingState final {
public:
- explicit UnifiedHeapMarkingState(Heap* heap) : heap_(heap) {}
+ UnifiedHeapMarkingState(Heap*, MarkingWorklists::Local*);
UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete;
UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete;
- inline void MarkAndPush(const TracedReferenceBase&);
+ void Update(MarkingWorklists::Local*);
+
+ V8_INLINE void MarkAndPush(const TracedReferenceBase&);
private:
- Heap* heap_;
+ Heap* const heap_;
+ MarkCompactCollector::MarkingState* const marking_state_;
+ MarkingWorklists::Local* local_marking_worklist_ = nullptr;
+ const bool track_retaining_path_;
};
-void UnifiedHeapMarkingState::MarkAndPush(const TracedReferenceBase& ref) {
- // The same visitor is used in testing scenarios without attaching the heap to
- // an Isolate under the assumption that no non-empty v8 references are found.
- // Having the following DCHECK crash means that the heap is in detached mode
- // but we find traceable pointers into an Isolate.
- DCHECK_NOT_NULL(heap_);
- heap_->RegisterExternallyReferencedObject(
- BasicTracedReferenceExtractor::ObjectReference(ref));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index f884b1d9fe..559014ad41 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -4,10 +4,12 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
-#include "src/heap/cppgc-js/unified-heap-marking-state.h"
+#include "src/heap/cppgc-js/unified-heap-marking-state-inl.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/visitor.h"
+#include "src/heap/heap.h"
+#include "src/heap/mark-compact.h"
namespace v8 {
namespace internal {
@@ -53,18 +55,8 @@ void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) {
marking_state_.RegisterMovableReference(slot);
}
-namespace {
-void DeferredTraceTracedReference(cppgc::Visitor* visitor, const void* ref) {
- static_cast<JSVisitor*>(visitor)->Trace(
- *static_cast<const TracedReferenceBase*>(ref));
-}
-} // namespace
-
void UnifiedHeapMarkingVisitorBase::Visit(const TracedReferenceBase& ref) {
- bool should_defer_tracing = DeferTraceToMutatorThreadIfConcurrent(
- &ref, DeferredTraceTracedReference, 0);
-
- if (!should_defer_tracing) unified_heap_marking_state_.MarkAndPush(ref);
+ unified_heap_marking_state_.MarkAndPush(ref);
}
MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
@@ -89,10 +81,22 @@ void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
}
ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
- HeapBase& heap, cppgc::internal::ConcurrentMarkingState& marking_state,
- UnifiedHeapMarkingState& unified_heap_marking_state)
+ HeapBase& heap, Heap* v8_heap,
+ cppgc::internal::ConcurrentMarkingState& marking_state)
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
- unified_heap_marking_state) {}
+ concurrent_unified_heap_marking_state_),
+ local_marking_worklist_(
+ v8_heap ? std::make_unique<MarkingWorklists::Local>(
+ v8_heap->mark_compact_collector()->marking_worklists())
+ : nullptr),
+ concurrent_unified_heap_marking_state_(v8_heap,
+ local_marking_worklist_.get()) {}
+
+ConcurrentUnifiedHeapMarkingVisitor::~ConcurrentUnifiedHeapMarkingVisitor() {
+ if (local_marking_worklist_) {
+ local_marking_worklist_->Publish();
+ }
+}
bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, cppgc::TraceCallback callback,
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
index abff33cd5a..990e385cd8 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
@@ -87,10 +87,9 @@ class V8_EXPORT_PRIVATE MutatorMinorGCMarkingVisitor final
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase {
public:
- ConcurrentUnifiedHeapMarkingVisitor(HeapBase&,
- cppgc::internal::ConcurrentMarkingState&,
- UnifiedHeapMarkingState&);
- ~ConcurrentUnifiedHeapMarkingVisitor() override = default;
+ ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, Heap*,
+ cppgc::internal::ConcurrentMarkingState&);
+ ~ConcurrentUnifiedHeapMarkingVisitor() override;
protected:
void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final {
@@ -103,6 +102,15 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback,
size_t) final;
+
+ private:
+ // Visitor owns the local worklist. All remaining items are published on
+ // destruction of the visitor. This is good enough as concurrent visitation
+ // ends before computing the rest of the transitive closure on the main
+ // thread. Dynamically allocated as it is only present when the heaps are
+ // attached.
+ std::unique_ptr<MarkingWorklists::Local> local_marking_worklist_;
+ UnifiedHeapMarkingState concurrent_unified_heap_marking_state_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/caged-heap-local-data.cc b/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
index b1ce0df00f..044b3d5a2b 100644
--- a/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
@@ -31,10 +31,12 @@ void AgeTable::Reset(PageAllocator* allocator) {
// TODO(chromium:1029379): Consider MADV_DONTNEED instead of MADV_FREE on
// POSIX platforms.
std::fill(table_.begin(), table_.end(), Age::kOld);
- const uintptr_t begin = RoundUp(reinterpret_cast<uintptr_t>(table_.begin()),
- allocator->CommitPageSize());
- const uintptr_t end = RoundDown(reinterpret_cast<uintptr_t>(table_.end()),
+ const uintptr_t begin = RoundUp(reinterpret_cast<uintptr_t>(table_.data()),
allocator->CommitPageSize());
+ const uintptr_t end =
+ RoundDown(reinterpret_cast<uintptr_t>(table_.data() + table_.size()),
+ allocator->CommitPageSize());
+
allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
}
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index c300793515..450020a21a 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -292,6 +292,7 @@ class CompactionState final {
page->PayloadSize() - used_bytes_in_current_page_);
}
#endif
+ page->object_start_bitmap().MarkAsFullyPopulated();
}
private:
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.cc b/deps/v8/src/heap/cppgc/concurrent-marker.cc
index 04d2c65aaa..983e1f1f29 100644
--- a/deps/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.cc
@@ -203,14 +203,20 @@ void ConcurrentMarkerBase::Start() {
std::make_unique<ConcurrentMarkingTask>(*this));
}
-void ConcurrentMarkerBase::Cancel() {
- if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
- concurrent_marking_handle_->Cancel();
+bool ConcurrentMarkerBase::Join() {
+ if (!concurrent_marking_handle_ || !concurrent_marking_handle_->IsValid())
+ return false;
+
+ concurrent_marking_handle_->Join();
+ return true;
}
-void ConcurrentMarkerBase::JoinForTesting() {
- if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
- concurrent_marking_handle_->Join();
+bool ConcurrentMarkerBase::Cancel() {
+ if (!concurrent_marking_handle_ || !concurrent_marking_handle_->IsValid())
+ return false;
+
+ concurrent_marking_handle_->Cancel();
+ return true;
}
bool ConcurrentMarkerBase::IsActive() const {
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.h b/deps/v8/src/heap/cppgc/concurrent-marker.h
index 830584910a..f244f1a49a 100644
--- a/deps/v8/src/heap/cppgc/concurrent-marker.h
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.h
@@ -24,9 +24,10 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
ConcurrentMarkerBase& operator=(const ConcurrentMarkerBase&) = delete;
void Start();
- void Cancel();
-
- void JoinForTesting();
+ // Returns whether the job has been joined.
+ bool Join();
+ // Returns whether the job has been cancelled.
+ bool Cancel();
void NotifyIncrementalMutatorStepCompleted();
diff --git a/deps/v8/src/heap/cppgc/default-platform.cc b/deps/v8/src/heap/cppgc/default-platform.cc
deleted file mode 100644
index 1899557134..0000000000
--- a/deps/v8/src/heap/cppgc/default-platform.cc
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/cppgc/default-platform.h"
-
-namespace cppgc {
-
-// static
-void DefaultPlatform::InitializeProcess(DefaultPlatform* platform) {
- cppgc::InitializeProcess(platform->GetPageAllocator());
-}
-
-} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index 14b0d2ad19..83b05f34b2 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -19,6 +19,7 @@
#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/unmarker.h"
namespace cppgc {
namespace internal {
@@ -130,7 +131,8 @@ void HeapBase::ResetRememberedSet() {
protected:
bool VisitNormalPageSpace(NormalPageSpace& space) {
- some_lab_is_set_ |= space.linear_allocation_buffer().size();
+ some_lab_is_set_ |=
+ static_cast<bool>(space.linear_allocation_buffer().size());
return true;
}
@@ -152,6 +154,7 @@ void HeapBase::Terminate() {
constexpr size_t kMaxTerminationGCs = 20;
size_t gc_count = 0;
bool more_termination_gcs_needed = false;
+
do {
CHECK_LT(gc_count++, kMaxTerminationGCs);
@@ -164,15 +167,27 @@ void HeapBase::Terminate() {
weak_cross_thread_persistent_region_.ClearAllUsedNodes();
}
+#if defined(CPPGC_YOUNG_GENERATION)
+ // Unmark the heap so that the sweeper destructs all objects.
+ // TODO(chromium:1029379): Merge two heap iterations (unmarking + sweeping)
+ // into forced finalization.
+ SequentialUnmarker unmarker(raw_heap());
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
+ in_atomic_pause_ = true;
stats_collector()->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
GarbageCollector::Config::IsForcedGC::kForced);
object_allocator().ResetLinearAllocationBuffers();
stats_collector()->NotifyMarkingCompleted(0);
ExecutePreFinalizers();
+ // TODO(chromium:1029379): Prefinalizers may black-allocate objects (under a
+ // compile-time option). Run sweeping with forced finalization here.
sweeper().Start(
{Sweeper::SweepingConfig::SweepingType::kAtomic,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep});
+ in_atomic_pause_ = false;
+
sweeper().NotifyDoneIfNeeded();
more_termination_gcs_needed =
strong_persistent_region_.NodesInUse() ||
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index 5ee54f2aef..5c33a13314 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -57,7 +57,8 @@ class V8_EXPORT_PRIVATE BasePage {
// |address| is guaranteed to point into the page but not payload. Returns
// nullptr when pointing into free list entries and the valid header
- // otherwise.
+ // otherwise. The function is not thread-safe and cannot be called when
+ // e.g. sweeping is in progress.
HeapObjectHeader* TryObjectHeaderFromInnerAddress(void* address) const;
const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
const void* address) const;
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index beaa089206..74194c4373 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -180,9 +180,9 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
#if CPPGC_VERIFY_HEAP
MarkingVerifier verifier(*this, config_.collection_type);
- verifier.Run(
- config_.stack_state, stack_end_of_current_gc(),
- stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+ verifier.Run(config_.stack_state, stack_end_of_current_gc(),
+ stats_collector()->marked_bytes_on_current_cycle() +
+ bytes_allocated_in_prefinalizers);
#endif // CPPGC_VERIFY_HEAP
#ifndef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
DCHECK_EQ(0u, bytes_allocated_in_prefinalizers);
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index fa8732fde7..afac1f4887 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -87,6 +87,23 @@ size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
constexpr v8::base::TimeDelta MarkerBase::kMaximumIncrementalStepDuration;
+class MarkerBase::IncrementalMarkingTask final : public cppgc::Task {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ IncrementalMarkingTask(MarkerBase*, MarkingConfig::StackState);
+
+ static Handle Post(cppgc::TaskRunner*, MarkerBase*);
+
+ private:
+ void Run() final;
+
+ MarkerBase* const marker_;
+ MarkingConfig::StackState stack_state_;
+ // TODO(chromium:1056170): Change to CancelableTask.
+ Handle handle_;
+};
+
MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
MarkerBase* marker, MarkingConfig::StackState stack_state)
: marker_(marker),
@@ -218,7 +235,6 @@ void MarkerBase::StartMarking() {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
- concurrent_marking_active_ = true;
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
@@ -226,6 +242,7 @@ void MarkerBase::StartMarking() {
incremental_marking_allocation_observer_.get());
}
}
+
void MarkerBase::HandleNotFullyConstructedObjects() {
if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
@@ -262,11 +279,10 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
// Start parallel marking.
mutator_marking_state_.Publish();
- if (concurrent_marking_active_) {
+ if (concurrent_marker_->IsActive()) {
concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
} else {
concurrent_marker_->Start();
- concurrent_marking_active_ = true;
}
}
}
@@ -301,6 +317,9 @@ void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kAtomicMark);
CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
+ if (JoinConcurrentMarkingIfNeeded()) {
+ CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
+ }
mutator_marking_state_.Publish();
}
LeaveAtomicPause();
@@ -431,13 +450,11 @@ void MarkerBase::AdvanceMarkingOnAllocation() {
}
}
-bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
+bool MarkerBase::JoinConcurrentMarkingIfNeeded() {
if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
- !concurrent_marking_active_)
+ !concurrent_marker_->Join())
return false;
- concurrent_marker_->Cancel();
- concurrent_marking_active_ = false;
// Concurrent markers may have pushed some "leftover" in-construction objects
// after flushing in EnterAtomicPause.
HandleNotFullyConstructedObjects();
@@ -464,9 +481,6 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
// adjustment.
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
- if (is_done && CancelConcurrentMarkingIfNeeded()) {
- is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
- }
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
@@ -623,7 +637,17 @@ void MarkerBase::SetMainThreadMarkingDisabledForTesting(bool value) {
}
void MarkerBase::WaitForConcurrentMarkingForTesting() {
- concurrent_marker_->JoinForTesting();
+ concurrent_marker_->Join();
+}
+
+MarkerBase::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
+ MarkerBase& marker)
+ : marker_(marker), resume_on_exit_(marker_.concurrent_marker_->Cancel()) {}
+
+MarkerBase::PauseConcurrentMarkingScope::~PauseConcurrentMarkingScope() {
+ if (resume_on_exit_) {
+ marker_.concurrent_marker_->Start();
+ }
}
Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config)
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index d990dcaed0..57d4bdf11b 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -36,6 +36,8 @@ class HeapBase;
// Alternatively, FinishMarking combines steps 3.-5.
class V8_EXPORT_PRIVATE MarkerBase {
public:
+ class IncrementalMarkingTask;
+
struct MarkingConfig {
enum class CollectionType : uint8_t {
kMinor,
@@ -61,6 +63,17 @@ class V8_EXPORT_PRIVATE MarkerBase {
kSteele,
};
+ // Pauses concurrent marking if running while this scope is active.
+ class PauseConcurrentMarkingScope final {
+ public:
+ explicit PauseConcurrentMarkingScope(MarkerBase&);
+ ~PauseConcurrentMarkingScope();
+
+ private:
+ MarkerBase& marker_;
+ const bool resume_on_exit_;
+ };
+
virtual ~MarkerBase();
MarkerBase(const MarkerBase&) = delete;
@@ -98,6 +111,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void ProcessWeakness();
+ bool JoinConcurrentMarkingIfNeeded();
+
inline void WriteBarrierForInConstructionObject(HeapObjectHeader&);
template <WriteBarrierType type>
@@ -105,41 +120,25 @@ class V8_EXPORT_PRIVATE MarkerBase {
HeapBase& heap() { return heap_; }
- MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
- MutatorMarkingState& MutatorMarkingStateForTesting() {
- return mutator_marking_state_;
- }
cppgc::Visitor& Visitor() { return visitor(); }
- void ClearAllWorklistsForTesting();
-
- bool IncrementalMarkingStepForTesting(MarkingConfig::StackState);
-
- class IncrementalMarkingTask final : public cppgc::Task {
- public:
- using Handle = SingleThreadedHandle;
- IncrementalMarkingTask(MarkerBase*, MarkingConfig::StackState);
-
- static Handle Post(cppgc::TaskRunner*, MarkerBase*);
-
- private:
- void Run() final;
-
- MarkerBase* const marker_;
- MarkingConfig::StackState stack_state_;
- // TODO(chromium:1056170): Change to CancelableTask.
- Handle handle_;
- };
+ bool IsMarking() const { return is_marking_; }
void SetMainThreadMarkingDisabledForTesting(bool);
-
void WaitForConcurrentMarkingForTesting();
+ void ClearAllWorklistsForTesting();
+ bool IncrementalMarkingStepForTesting(MarkingConfig::StackState);
- bool IsMarking() const { return is_marking_; }
+ MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
+ MutatorMarkingState& MutatorMarkingStateForTesting() {
+ return mutator_marking_state_;
+ }
protected:
class IncrementalMarkingAllocationObserver;
+ using IncrementalMarkingTaskHandle = SingleThreadedHandle;
+
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
@@ -163,8 +162,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
void AdvanceMarkingOnAllocation();
- bool CancelConcurrentMarkingIfNeeded();
-
void HandleNotFullyConstructedObjects();
HeapBase& heap_;
@@ -172,7 +169,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
cppgc::Platform* platform_;
std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
- IncrementalMarkingTask::Handle incremental_marking_handle_;
+ IncrementalMarkingTaskHandle incremental_marking_handle_;
std::unique_ptr<IncrementalMarkingAllocationObserver>
incremental_marking_allocation_observer_;
@@ -183,7 +180,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
- bool concurrent_marking_active_ = false;
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index feb009708d..d57b92fcba 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -106,14 +106,12 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
#if defined(CPPGC_YOUNG_GENERATION)
if (collection_type_ == Heap::Config::CollectionType::kMinor) {
- const auto age = heap_.caged_heap()
- .local_data()
- .age_table[heap_.caged_heap().OffsetFromAddress(
- header.ObjectStart())];
+ const auto age = heap_.caged_heap().local_data().age_table.GetAge(
+ heap_.caged_heap().OffsetFromAddress(header.ObjectStart()));
if (age == AgeTable::Age::kOld) {
// Do not verify old objects.
return true;
- } else if (age == AgeTable::Age::kUnknown) {
+ } else if (age == AgeTable::Age::kMixed) {
// If the age is not known, the marked bytes may not be exact as possibly
// old objects are verified as well.
verifier_found_marked_bytes_are_exact_ = false;
diff --git a/deps/v8/src/heap/cppgc/metric-recorder.h b/deps/v8/src/heap/cppgc/metric-recorder.h
index 53c5f3a40b..4ac7a0fb29 100644
--- a/deps/v8/src/heap/cppgc/metric-recorder.h
+++ b/deps/v8/src/heap/cppgc/metric-recorder.h
@@ -20,7 +20,8 @@ class StatsCollector;
*/
class MetricRecorder {
public:
- struct FullCycle {
+ struct GCCycle {
+ enum class Type { kMinor, kMajor };
struct IncrementalPhases {
int64_t mark_duration_us = -1;
int64_t sweep_duration_us = -1;
@@ -35,6 +36,7 @@ class MetricRecorder {
int64_t freed_bytes = -1;
};
+ Type type = Type::kMajor;
Phases total;
Phases main_thread;
Phases main_thread_atomic;
@@ -56,7 +58,7 @@ class MetricRecorder {
virtual ~MetricRecorder() = default;
- virtual void AddMainThreadEvent(const FullCycle& event) {}
+ virtual void AddMainThreadEvent(const GCCycle& event) {}
virtual void AddMainThreadEvent(const MainThreadIncrementalMark& event) {}
virtual void AddMainThreadEvent(const MainThreadIncrementalSweep& event) {}
};
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 607f210789..3327260770 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -30,7 +30,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
#if defined(CPPGC_YOUNG_GENERATION)
DCHECK_LT(begin, end);
- static constexpr auto kEntrySize = AgeTable::kEntrySizeInBytes;
+ static constexpr auto kEntrySize = AgeTable::kCardSizeInBytes;
const uintptr_t offset_begin = CagedHeap::OffsetFromAddress(begin);
const uintptr_t offset_end = CagedHeap::OffsetFromAddress(end);
@@ -44,16 +44,16 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
auto& age_table = page->heap().caged_heap().local_data().age_table;
for (auto offset = young_offset_begin; offset < young_offset_end;
- offset += AgeTable::kEntrySizeInBytes) {
- age_table[offset] = AgeTable::Age::kYoung;
+ offset += AgeTable::kCardSizeInBytes) {
+ age_table.SetAge(offset, AgeTable::Age::kYoung);
}
// Set to kUnknown the first and the last regions of the newly allocated
// linear buffer.
if (begin != page->PayloadStart() && !IsAligned(offset_begin, kEntrySize))
- age_table[offset_begin] = AgeTable::Age::kUnknown;
+ age_table.SetAge(offset_begin, AgeTable::Age::kMixed);
if (end != page->PayloadEnd() && !IsAligned(offset_end, kEntrySize))
- age_table[offset_end] = AgeTable::Age::kUnknown;
+ age_table.SetAge(offset_end, AgeTable::Age::kMixed);
#endif
}
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index d7c5a45cf7..4522f94785 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -146,6 +146,8 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
STATIC_ASSERT(2 * kAllocationGranularity ==
api_constants::kMaxSupportedAlignment);
STATIC_ASSERT(kAllocationGranularity == sizeof(HeapObjectHeader));
+ STATIC_ASSERT(kAllocationGranularity ==
+ api_constants::kAllocationGranularity);
DCHECK_EQ(2 * sizeof(HeapObjectHeader), static_cast<size_t>(alignment));
constexpr size_t kAlignment = 2 * kAllocationGranularity;
constexpr size_t kAlignmentMask = kAlignment - 1;
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
index da5df3932e..44af79b21c 100644
--- a/deps/v8/src/heap/cppgc/object-start-bitmap.h
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -66,6 +66,11 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
// Clear the object start bitmap.
inline void Clear();
+ // Marks the bitmap as fully populated. Unpopulated bitmaps are in an
+ // inconsistent state and must be populated before they can be used to find
+ // object headers.
+ inline void MarkAsFullyPopulated();
+
private:
template <AccessMode = AccessMode::kNonAtomic>
inline void store(size_t cell_index, uint8_t value);
@@ -83,6 +88,17 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
const Address offset_;
+ // `fully_populated_` is used to denote that the bitmap is popluated with all
+ // currently allocated objects on the page and is in a consistent state. It is
+ // used to guard against using the bitmap for finding headers during
+ // concurrent sweeping.
+ //
+ // Although this flag can be used by both the main thread and concurrent
+ // sweeping threads, it is not atomic. The flag should never be accessed by
+ // multiple threads at the same time. If data races are observed on this flag,
+ // it likely means that the bitmap is queried while concurrent sweeping is
+ // active, which is not supported and should be avoided.
+ bool fully_populated_ = false;
// The bitmap contains a bit for every kGranularity aligned address on a
// a NormalPage, i.e., for a page of size kBlinkPageSize.
std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
@@ -90,11 +106,13 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
Clear();
+ MarkAsFullyPopulated();
}
template <AccessMode mode>
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
+ DCHECK(fully_populated_);
DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
size_t object_offset =
address_maybe_pointing_to_the_middle_of_object - offset_;
@@ -187,7 +205,13 @@ inline void ObjectStartBitmap::Iterate(Callback callback) const {
}
}
+void ObjectStartBitmap::MarkAsFullyPopulated() {
+ DCHECK(!fully_populated_);
+ fully_populated_ = true;
+}
+
void ObjectStartBitmap::Clear() {
+ fully_populated_ = false;
std::fill(object_start_bit_map_.begin(), object_start_bit_map_.end(), 0);
}
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 1739e837bd..252d4af750 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -64,9 +64,10 @@ void SameThreadEnabledCheckingPolicyBase::CheckPointerImpl(
const HeapObjectHeader* header = nullptr;
if (points_to_payload) {
header = &HeapObjectHeader::FromObject(ptr);
- } else if (!heap_->sweeper().IsSweepingInProgress()) {
- // Mixin case.
- header = &base_page->ObjectHeaderFromInnerAddress(ptr);
+ } else {
+ // Mixin case. Access the ObjectStartBitmap atomically since sweeping can be
+ // in progress.
+ header = &base_page->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(ptr);
DCHECK_LE(header->ObjectStart(), ptr);
DCHECK_GT(header->ObjectEnd(), ptr);
}
diff --git a/deps/v8/src/heap/cppgc/remembered-set.cc b/deps/v8/src/heap/cppgc/remembered-set.cc
index 8843219745..7282d1b119 100644
--- a/deps/v8/src/heap/cppgc/remembered-set.cc
+++ b/deps/v8/src/heap/cppgc/remembered-set.cc
@@ -17,7 +17,7 @@ namespace internal {
namespace {
// Visit remembered set that was recorded in the generational barrier.
-void VisitRememberedSlots(std::set<void*> slots, const HeapBase& heap,
+void VisitRememberedSlots(const std::set<void*>& slots, const HeapBase& heap,
MutatorMarkingState& mutator_marking_state) {
for (void* slot : slots) {
// Slot must always point to a valid, not freed object.
@@ -51,7 +51,8 @@ void VisitRememberedSlots(std::set<void*> slots, const HeapBase& heap,
// Visits source objects that were recorded in the generational barrier for
// slots.
void VisitRememberedSourceObjects(
- std::set<HeapObjectHeader*> remembered_source_objects, Visitor& visitor) {
+ const std::set<HeapObjectHeader*>& remembered_source_objects,
+ Visitor& visitor) {
for (HeapObjectHeader* source_hoh : remembered_source_objects) {
DCHECK(source_hoh);
// The age checking in the generational barrier is imprecise, since a card
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index 42b2188913..e19e13b614 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -120,13 +120,18 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
gc_state_ = GarbageCollectionState::kSweeping;
current_.marked_bytes = marked_bytes;
current_.object_size_before_sweep_bytes =
- previous_.marked_bytes + allocated_bytes_since_end_of_marking_ +
+ marked_bytes_so_far_ + allocated_bytes_since_end_of_marking_ +
allocated_bytes_since_safepoint_ -
explicitly_freed_bytes_since_safepoint_;
allocated_bytes_since_safepoint_ = 0;
explicitly_freed_bytes_since_safepoint_ = 0;
+
+ if (current_.collection_type == CollectionType::kMajor)
+ marked_bytes_so_far_ = 0;
+ marked_bytes_so_far_ += marked_bytes;
+
#ifdef CPPGC_VERIFY_HEAP
- tracked_live_bytes_ = marked_bytes;
+ tracked_live_bytes_ = marked_bytes_so_far_;
#endif // CPPGC_VERIFY_HEAP
DCHECK_LE(memory_freed_bytes_since_end_of_marking_, memory_allocated_bytes_);
@@ -134,8 +139,8 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
current_.memory_size_before_sweep_bytes = memory_allocated_bytes_;
memory_freed_bytes_since_end_of_marking_ = 0;
- ForAllAllocationObservers([marked_bytes](AllocationObserver* observer) {
- observer->ResetAllocatedObjectSize(marked_bytes);
+ ForAllAllocationObservers([this](AllocationObserver* observer) {
+ observer->ResetAllocatedObjectSize(marked_bytes_so_far_);
});
// HeapGrowing would use the below fields to estimate allocation rate during
@@ -154,20 +159,23 @@ double StatsCollector::GetRecentAllocationSpeedInBytesPerMs() const {
namespace {
-int64_t SumPhases(const MetricRecorder::FullCycle::Phases& phases) {
+int64_t SumPhases(const MetricRecorder::GCCycle::Phases& phases) {
return phases.mark_duration_us + phases.weak_duration_us +
phases.compact_duration_us + phases.sweep_duration_us;
}
-MetricRecorder::FullCycle GetFullCycleEventForMetricRecorder(
- int64_t atomic_mark_us, int64_t atomic_weak_us, int64_t atomic_compact_us,
- int64_t atomic_sweep_us, int64_t incremental_mark_us,
- int64_t incremental_sweep_us, int64_t concurrent_mark_us,
- int64_t concurrent_sweep_us, int64_t objects_before_bytes,
- int64_t objects_after_bytes, int64_t objects_freed_bytes,
- int64_t memory_before_bytes, int64_t memory_after_bytes,
- int64_t memory_freed_bytes) {
- MetricRecorder::FullCycle event;
+MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
+ StatsCollector::CollectionType type, int64_t atomic_mark_us,
+ int64_t atomic_weak_us, int64_t atomic_compact_us, int64_t atomic_sweep_us,
+ int64_t incremental_mark_us, int64_t incremental_sweep_us,
+ int64_t concurrent_mark_us, int64_t concurrent_sweep_us,
+ int64_t objects_before_bytes, int64_t objects_after_bytes,
+ int64_t objects_freed_bytes, int64_t memory_before_bytes,
+ int64_t memory_after_bytes, int64_t memory_freed_bytes) {
+ MetricRecorder::GCCycle event;
+ event.type = (type == StatsCollector::CollectionType::kMajor)
+ ? MetricRecorder::GCCycle::Type::kMajor
+ : MetricRecorder::GCCycle::Type::kMinor;
// MainThread.Incremental:
event.main_thread_incremental.mark_duration_us = incremental_mark_us;
event.main_thread_incremental.sweep_duration_us = incremental_sweep_us;
@@ -223,7 +231,8 @@ void StatsCollector::NotifySweepingCompleted() {
previous_ = std::move(current_);
current_ = Event();
if (metric_recorder_) {
- MetricRecorder::FullCycle event = GetFullCycleEventForMetricRecorder(
+ MetricRecorder::GCCycle event = GetCycleEventForMetricRecorder(
+ previous_.collection_type,
previous_.scope_data[kAtomicMark].InMicroseconds(),
previous_.scope_data[kAtomicWeak].InMicroseconds(),
previous_.scope_data[kAtomicCompact].InMicroseconds(),
@@ -233,9 +242,9 @@ void StatsCollector::NotifySweepingCompleted() {
previous_.concurrent_scope_data[kConcurrentMark],
previous_.concurrent_scope_data[kConcurrentSweep],
previous_.object_size_before_sweep_bytes /* objects_before */,
- previous_.marked_bytes /* objects_after */,
+ marked_bytes_so_far_ /* objects_after */,
previous_.object_size_before_sweep_bytes -
- previous_.marked_bytes /* objects_freed */,
+ marked_bytes_so_far_ /* objects_freed */,
previous_.memory_size_before_sweep_bytes /* memory_before */,
previous_.memory_size_before_sweep_bytes -
memory_freed_bytes_since_end_of_marking_ /* memory_after */,
@@ -249,26 +258,17 @@ size_t StatsCollector::allocated_memory_size() const {
}
size_t StatsCollector::allocated_object_size() const {
- // During sweeping we refer to the current Event as that already holds the
- // correct marking information. In all other phases, the previous event holds
- // the most up-to-date marking information.
- const Event& event =
- gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
- DCHECK_GE(static_cast<int64_t>(event.marked_bytes) +
- allocated_bytes_since_end_of_marking_,
- 0);
- return static_cast<size_t>(static_cast<int64_t>(event.marked_bytes) +
- allocated_bytes_since_end_of_marking_);
+ return marked_bytes_so_far_ + allocated_bytes_since_end_of_marking_;
}
size_t StatsCollector::marked_bytes() const {
DCHECK_NE(GarbageCollectionState::kMarking, gc_state_);
- // During sweeping we refer to the current Event as that already holds the
- // correct marking information. In all other phases, the previous event holds
- // the most up-to-date marking information.
- const Event& event =
- gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
- return event.marked_bytes;
+ return marked_bytes_so_far_;
+}
+
+size_t StatsCollector::marked_bytes_on_current_cycle() const {
+ DCHECK_NE(GarbageCollectionState::kNotRunning, gc_state_);
+ return current_.marked_bytes;
}
v8::base::TimeDelta StatsCollector::marking_time() const {
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index 934316a1dc..5c07a279c7 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -66,10 +66,11 @@ namespace internal {
// Sink for various time and memory statistics.
class V8_EXPORT_PRIVATE StatsCollector final {
- using CollectionType = GarbageCollector::Config::CollectionType;
using IsForcedGC = GarbageCollector::Config::IsForcedGC;
public:
+ using CollectionType = GarbageCollector::Config::CollectionType;
+
#if defined(CPPGC_DECLARE_ENUM)
static_assert(false, "CPPGC_DECLARE_ENUM macro is already defined");
#endif
@@ -281,9 +282,15 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// bytes and the bytes allocated since last marking.
size_t allocated_object_size() const;
- // Returns the most recent marked bytes count. Should not be called during
+ // Returns the overall marked bytes count, i.e. if young generation is
+ // enabled, it returns the accumulated number. Should not be called during
// marking.
size_t marked_bytes() const;
+
+ // Returns the marked bytes for the current cycle. Should only be called
+ // within GC cycle.
+ size_t marked_bytes_on_current_cycle() const;
+
// Returns the overall duration of the most recent marking phase. Should not
// be called during marking.
v8::base::TimeDelta marking_time() const;
@@ -340,6 +347,10 @@ class V8_EXPORT_PRIVATE StatsCollector final {
size_t tracked_live_bytes_ = 0;
#endif // CPPGC_VERIFY_HEAP
+ // The number of bytes marked so far. For young generation (with sticky bits)
+ // keeps track of marked bytes across multiple GC cycles.
+ size_t marked_bytes_so_far_ = 0;
+
int64_t memory_allocated_bytes_ = 0;
int64_t memory_freed_bytes_since_end_of_marking_ = 0;
std::atomic<size_t> discarded_bytes_{0};
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 0aa12a614a..1f12ae19fd 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -37,7 +37,16 @@ class ObjectStartBitmapVerifier
friend class HeapVisitor<ObjectStartBitmapVerifier>;
public:
- void Verify(RawHeap& heap) { Traverse(heap); }
+ void Verify(RawHeap& heap) {
+#if DEBUG
+ Traverse(heap);
+#endif // DEBUG
+ }
+ void Verify(NormalPage& page) {
+#if DEBUG
+ Traverse(page);
+#endif // DEBUG
+ }
private:
bool VisitNormalPage(NormalPage& page) {
@@ -51,9 +60,10 @@ class ObjectStartBitmapVerifier
if (header.IsLargeObject()) return true;
auto* raw_header = reinterpret_cast<ConstAddress>(&header);
- CHECK(bitmap_->CheckBit(raw_header));
+ CHECK(bitmap_->CheckBit<AccessMode::kAtomic>(raw_header));
if (prev_) {
- CHECK_EQ(prev_, bitmap_->FindHeader(raw_header - 1));
+ // No other bits in the range [prev_, raw_header) should be set.
+ CHECK_EQ(prev_, bitmap_->FindHeader<AccessMode::kAtomic>(raw_header - 1));
}
prev_ = &header;
return true;
@@ -286,14 +296,26 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
FinalizationBuilder builder(*page, page_allocator);
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
- bitmap.Clear();
size_t largest_new_free_list_entry = 0;
size_t live_bytes = 0;
Address start_of_gap = page->PayloadStart();
+
+ const auto clear_bit_if_coalesced_entry = [&bitmap,
+ &start_of_gap](Address address) {
+ if (address != start_of_gap) {
+ // Clear only if not the first freed entry.
+ bitmap.ClearBit<AccessMode::kAtomic>(address);
+ } else {
+ // Otherwise check that the bit is set.
+ DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(address));
+ }
+ };
+
for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
begin != end;) {
+ DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(begin));
HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(begin);
const size_t size = header->AllocatedSize();
// Check if this is a free list entry.
@@ -302,12 +324,14 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
// This prevents memory from being discarded in configurations where
// `CheckMemoryIsInaccessibleIsNoop()` is false.
CheckMemoryIsInaccessible(header, size);
+ clear_bit_if_coalesced_entry(begin);
begin += size;
continue;
}
// Check if object is not marked (not reachable).
if (!header->IsMarked<kAtomicAccess>()) {
builder.AddFinalizer(header, size);
+ clear_bit_if_coalesced_entry(begin);
begin += size;
continue;
}
@@ -317,12 +341,11 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
size_t new_free_list_entry_size =
static_cast<size_t>(header_address - start_of_gap);
builder.AddFreeListEntry(start_of_gap, new_free_list_entry_size);
+ DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
largest_new_free_list_entry =
std::max(largest_new_free_list_entry, new_free_list_entry_size);
- bitmap.SetBit(start_of_gap);
}
StickyUnmark(header);
- bitmap.SetBit(begin);
begin += size;
start_of_gap = begin;
live_bytes += size;
@@ -332,7 +355,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
start_of_gap != page->PayloadEnd()) {
builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
- bitmap.SetBit(start_of_gap);
+ DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
}
page->SetAllocatedBytesAtLastGC(live_bytes);
@@ -399,7 +422,7 @@ class SweepFinalizer final {
#if defined(CPPGC_CAGED_HEAP)
const uint64_t cage_base =
reinterpret_cast<uint64_t>(page->heap().caged_heap().base());
- HeapObjectHeader* next_unfinalized = 0;
+ HeapObjectHeader* next_unfinalized = nullptr;
for (auto* unfinalized_header = page_state->unfinalized_objects_head;
unfinalized_header; unfinalized_header = next_unfinalized) {
@@ -437,6 +460,10 @@ class SweepFinalizer final {
largest_new_free_list_entry_ = std::max(
page_state->largest_new_free_list_entry, largest_new_free_list_entry_);
+ // After the page was fully finalized and freelists have been merged, verify
+ // that the bitmap is consistent.
+ ObjectStartBitmapVerifier().Verify(static_cast<NormalPage&>(*page));
+
// Add the page to the space.
page->space().AddPage(page);
}
@@ -532,6 +559,9 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
if (result.is_empty) {
NormalPage::Destroy(&page);
} else {
+ // The page was eagerly finalized and all the freelist have been merged.
+ // Verify that the bitmap is consistent with headers.
+ ObjectStartBitmapVerifier().Verify(page);
page.space().AddPage(&page);
largest_new_free_list_entry_ = std::max(
result.largest_new_free_list_entry, largest_new_free_list_entry_);
@@ -714,10 +744,9 @@ class Sweeper::SweeperImpl final {
is_in_progress_ = true;
platform_ = platform;
config_ = config;
-#if DEBUG
+
// Verify bitmap for all spaces regardless of |compactable_space_handling|.
ObjectStartBitmapVerifier().Verify(heap_);
-#endif
// If inaccessible memory is touched to check whether it is set up
// correctly it cannot be discarded.
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index c533c353c3..242a450cb2 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -135,7 +135,7 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
// results in applying the generational barrier.
if (local_data.heap_base.in_atomic_pause()) return;
- if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
+ if (value_offset > 0 && age_table.GetAge(value_offset) == AgeTable::Age::kOld)
return;
// Record slot.
@@ -149,7 +149,7 @@ void WriteBarrier::GenerationalBarrierForSourceObjectSlow(
auto& object_header =
BasePage::FromInnerAddress(&local_data.heap_base, inner_pointer)
- ->ObjectHeaderFromInnerAddress(inner_pointer);
+ ->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(inner_pointer);
// Record the source object.
local_data.heap_base.remembered_set().AddSourceObject(
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 5c31a72186..781a0bef38 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -144,6 +144,24 @@ Handle<FixedArray> FactoryBase<Impl>::NewFixedArrayWithFiller(
}
template <typename Impl>
+Handle<FixedArray> FactoryBase<Impl>::NewFixedArrayWithZeroes(
+ int length, AllocationType allocation) {
+ DCHECK_LE(0, length);
+ if (length == 0) return impl()->empty_fixed_array();
+ if (length > FixedArray::kMaxLength) {
+ FATAL("Invalid FixedArray size %d", length);
+ }
+ HeapObject result = AllocateRawFixedArray(length, allocation);
+ DisallowGarbageCollection no_gc;
+ result.set_map_after_allocation(read_only_roots().fixed_array_map(),
+ SKIP_WRITE_BARRIER);
+ FixedArray array = FixedArray::cast(result);
+ array.set_length(length);
+ MemsetTagged(array.data_start(), Smi::zero(), length);
+ return handle(array, isolate());
+}
+
+template <typename Impl>
Handle<FixedArrayBase> FactoryBase<Impl>::NewFixedDoubleArray(
int length, AllocationType allocation) {
if (length == 0) return impl()->empty_fixed_array();
@@ -229,7 +247,7 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
instance.set_parameter_count(parameter_count);
instance.set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance.set_osr_loop_nesting_level(0);
+ instance.reset_osr_urgency_and_install_target();
instance.set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance.set_constant_pool(*constant_pool);
instance.set_handler_table(read_only_roots().empty_byte_array(),
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index 2a8eae50c9..53e83a87d6 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -112,6 +112,10 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<FixedArray> NewFixedArrayWithHoles(
int length, AllocationType allocation = AllocationType::kYoung);
+ // Allocate a new fixed array with Smi(0) entries.
+ Handle<FixedArray> NewFixedArrayWithZeroes(
+ int length, AllocationType allocation = AllocationType::kYoung);
+
// Allocate a new uninitialized fixed double array.
// The function returns a pre-allocated empty fixed array for length = 0,
// so the return type must be the general fixed array class.
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index d41521cdba..1ddacae0a5 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -509,7 +509,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
vector.set_length(length);
vector.set_invocation_count(0);
vector.set_profiler_ticks(0);
- vector.InitializeOptimizationState();
+ vector.reset_flags();
vector.set_closure_feedback_cell_array(*closure_feedback_cell_array);
// TODO(leszeks): Initialize based on the feedback metadata.
@@ -1184,7 +1184,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
context.set_math_random_index(Smi::zero());
context.set_serialized_objects(*empty_fixed_array());
context.set_microtask_queue(isolate(), nullptr);
- context.set_osr_code_cache(*empty_weak_fixed_array());
+ context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate()));
context.set_retained_maps(*empty_weak_array_list());
return handle(context, isolate());
}
@@ -1680,7 +1680,7 @@ Handle<WasmArray> Factory::NewWasmArrayFromMemory(uint32_t length,
result.set_length(length);
MemCopy(reinterpret_cast<void*>(result.ElementAddress(0)),
reinterpret_cast<void*>(source),
- length * element_type.element_size_bytes());
+ length * element_type.value_kind_size());
return handle(result, isolate());
}
@@ -2240,8 +2240,7 @@ DEFINE_ERROR(WasmExceptionError, wasm_exception_error)
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
- Handle<NativeContext> native_context(function->context().native_context(),
- isolate());
+ Handle<NativeContext> native_context(function->native_context(), isolate());
Handle<Map> new_map;
if (V8_UNLIKELY(IsAsyncGeneratorFunction(function->shared().kind()))) {
new_map = handle(native_context->async_generator_object_prototype_map(),
@@ -2416,7 +2415,7 @@ Handle<BytecodeArray> Factory::CopyBytecodeArray(Handle<BytecodeArray> source) {
copy.set_handler_table(raw_source.handler_table());
copy.set_source_position_table(raw_source.source_position_table(kAcquireLoad),
kReleaseStore);
- copy.set_osr_loop_nesting_level(raw_source.osr_loop_nesting_level());
+ copy.set_osr_urgency(raw_source.osr_urgency());
copy.set_bytecode_age(raw_source.bytecode_age());
raw_source.CopyBytecodesTo(copy);
return handle(copy, isolate());
@@ -2560,7 +2559,8 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
InitializeJSObjectFromMap(js_obj, *empty_fixed_array(), *map);
- DCHECK(js_obj.HasFastElements() || js_obj.HasTypedArrayElements() ||
+ DCHECK(js_obj.HasFastElements() ||
+ js_obj.HasTypedArrayOrRabGsabTypedArrayElements() ||
js_obj.HasFastStringWrapperElements() ||
js_obj.HasFastArgumentsElements() || js_obj.HasDictionaryElements());
return handle(js_obj, isolate());
@@ -3460,6 +3460,16 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
return handle(info, isolate());
}
+Handle<PromiseOnStack> Factory::NewPromiseOnStack(Handle<Object> prev,
+ Handle<JSObject> promise) {
+ PromiseOnStack promise_on_stack = NewStructInternal<PromiseOnStack>(
+ PROMISE_ON_STACK_TYPE, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ promise_on_stack.set_prev(*prev, SKIP_WRITE_BARRIER);
+ promise_on_stack.set_promise(*MaybeObjectHandle::Weak(promise));
+ return handle(promise_on_stack, isolate());
+}
+
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
bool strict_mode_callee = is_strict(callee->shared().language_mode()) ||
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 0387482010..08d51cad4c 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -407,6 +407,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
int bytecode_offset_or_source_position, Handle<String> function_name,
bool is_constructor);
+ Handle<PromiseOnStack> NewPromiseOnStack(Handle<Object> prev,
+ Handle<JSObject> promise);
+
// Allocate various microtasks.
Handle<CallableTask> NewCallableTask(Handle<JSReceiver> callable,
Handle<Context> context);
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index e6ffbe1796..1c339ea67c 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -5,7 +5,6 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/flags/flags.h"
-#include "src/heap/gc-tracer.h"
#include "src/utils/utils.h"
namespace v8 {
diff --git a/deps/v8/src/heap/gc-tracer-inl.h b/deps/v8/src/heap/gc-tracer-inl.h
new file mode 100644
index 0000000000..409641154b
--- /dev/null
+++ b/deps/v8/src/heap/gc-tracer-inl.h
@@ -0,0 +1,174 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_GC_TRACER_INL_H_
+#define V8_HEAP_GC_TRACER_INL_H_
+
+#include "src/base/platform/platform.h"
+#include "src/execution/isolate.h"
+#include "src/heap/gc-tracer.h"
+
+namespace v8 {
+namespace internal {
+
+GCTracer::IncrementalMarkingInfos::IncrementalMarkingInfos()
+ : duration(0), longest_step(0), steps(0) {}
+
+void GCTracer::IncrementalMarkingInfos::Update(double delta) {
+ steps++;
+ duration += delta;
+ if (delta > longest_step) {
+ longest_step = delta;
+ }
+}
+
+void GCTracer::IncrementalMarkingInfos::ResetCurrentCycle() {
+ duration = 0;
+ longest_step = 0;
+ steps = 0;
+}
+
+GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
+ : tracer_(tracer),
+ scope_(scope),
+ thread_kind_(thread_kind),
+ start_time_(tracer_->MonotonicallyIncreasingTimeInMs()) {
+#ifdef V8_RUNTIME_CALL_STATS
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
+ if (thread_kind_ == ThreadKind::kMain) {
+#if DEBUG
+ AssertMainThread();
+#endif // DEBUG
+ runtime_stats_ = tracer_->heap_->isolate_->counters()->runtime_call_stats();
+ runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
+ } else {
+ runtime_call_stats_scope_.emplace(
+ tracer->worker_thread_runtime_call_stats());
+ runtime_stats_ = runtime_call_stats_scope_->Get();
+ runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
+ }
+#endif // defined(V8_RUNTIME_CALL_STATS)
+}
+
+GCTracer::Scope::~Scope() {
+ double duration_ms = tracer_->MonotonicallyIncreasingTimeInMs() - start_time_;
+ tracer_->AddScopeSample(scope_, duration_ms);
+
+ if (thread_kind_ == ThreadKind::kMain) {
+#if DEBUG
+ AssertMainThread();
+#endif // DEBUG
+
+ if (scope_ == ScopeId::MC_INCREMENTAL ||
+ scope_ == ScopeId::MC_INCREMENTAL_START ||
+ scope_ == ScopeId::MC_INCREMENTAL_FINALIZE) {
+ auto* long_task_stats =
+ tracer_->heap_->isolate_->GetCurrentLongTaskStats();
+ long_task_stats->gc_full_incremental_wall_clock_duration_us +=
+ static_cast<int64_t>(duration_ms *
+ base::Time::kMicrosecondsPerMillisecond);
+ }
+ }
+
+#ifdef V8_RUNTIME_CALL_STATS
+ if (V8_LIKELY(runtime_stats_ == nullptr)) return;
+ runtime_stats_->Leave(&timer_);
+#endif // defined(V8_RUNTIME_CALL_STATS)
+}
+
+constexpr int GCTracer::Scope::IncrementalOffset(ScopeId id) {
+ DCHECK_LE(FIRST_INCREMENTAL_SCOPE, id);
+ DCHECK_GE(LAST_INCREMENTAL_SCOPE, id);
+ return id - FIRST_INCREMENTAL_SCOPE;
+}
+
+constexpr bool GCTracer::Event::IsYoungGenerationEvent(Type type) {
+ DCHECK_NE(START, type);
+ return type == SCAVENGER || type == MINOR_MARK_COMPACTOR;
+}
+
+CollectionEpoch GCTracer::CurrentEpoch(Scope::ScopeId id) const {
+ return Scope::NeedsYoungEpoch(id) ? epoch_young_ : epoch_full_;
+}
+
+#ifdef DEBUG
+bool GCTracer::IsInObservablePause() const {
+ return 0.0 < start_of_observable_pause_;
+}
+
+bool GCTracer::IsConsistentWithCollector(GarbageCollector collector) const {
+ return (collector == GarbageCollector::SCAVENGER &&
+ current_.type == Event::SCAVENGER) ||
+ (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
+ current_.type == Event::MINOR_MARK_COMPACTOR) ||
+ (collector == GarbageCollector::MARK_COMPACTOR &&
+ (current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR));
+}
+
+bool GCTracer::IsSweepingInProgress() const {
+ return (current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR) &&
+ current_.state == Event::State::SWEEPING;
+}
+#endif
+
+constexpr double GCTracer::current_scope(Scope::ScopeId id) const {
+ if (Scope::FIRST_INCREMENTAL_SCOPE <= id &&
+ id <= Scope::LAST_INCREMENTAL_SCOPE) {
+ return incremental_scope(id).duration;
+ } else if (Scope::FIRST_BACKGROUND_SCOPE <= id &&
+ id <= Scope::LAST_BACKGROUND_SCOPE) {
+ return background_counter_[id].total_duration_ms;
+ } else {
+ DCHECK_GT(Scope::NUMBER_OF_SCOPES, id);
+ return current_.scopes[id];
+ }
+}
+
+constexpr const GCTracer::IncrementalMarkingInfos& GCTracer::incremental_scope(
+ Scope::ScopeId id) const {
+ return incremental_scopes_[Scope::IncrementalOffset(id)];
+}
+
+void GCTracer::AddScopeSample(Scope::ScopeId id, double duration) {
+ if (Scope::FIRST_INCREMENTAL_SCOPE <= id &&
+ id <= Scope::LAST_INCREMENTAL_SCOPE) {
+ incremental_scopes_[Scope::IncrementalOffset(id)].Update(duration);
+ } else if (Scope::FIRST_BACKGROUND_SCOPE <= id &&
+ id <= Scope::LAST_BACKGROUND_SCOPE) {
+ base::MutexGuard guard(&background_counter_mutex_);
+ background_counter_[id].total_duration_ms += duration;
+ } else {
+ DCHECK_GT(Scope::NUMBER_OF_SCOPES, id);
+ current_.scopes[id] += duration;
+ }
+}
+
+#ifdef V8_RUNTIME_CALL_STATS
+WorkerThreadRuntimeCallStats* GCTracer::worker_thread_runtime_call_stats() {
+ return heap_->isolate_->counters()->worker_thread_runtime_call_stats();
+}
+
+RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
+ STATIC_ASSERT(Scope::FIRST_SCOPE == Scope::MC_INCREMENTAL);
+ return static_cast<RuntimeCallCounterId>(
+ static_cast<int>(RuntimeCallCounterId::kGC_MC_INCREMENTAL) +
+ static_cast<int>(id));
+}
+#endif // defined(V8_RUNTIME_CALL_STATS)
+
+double GCTracer::MonotonicallyIncreasingTimeInMs() {
+ if (V8_UNLIKELY(FLAG_predictable)) {
+ return heap_->MonotonicallyIncreasingTimeInMs();
+ } else {
+ return base::TimeTicks::Now().ToInternalValue() /
+ static_cast<double>(base::Time::kMicrosecondsPerMillisecond);
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_GC_TRACER_INL_H_
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 9dbaa9717d..44d0b95110 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -10,11 +10,12 @@
#include "src/base/atomic-utils.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
-#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/cppgc/metric-recorder.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/logging/counters.h"
@@ -36,28 +37,6 @@ static size_t CountTotalHolesSize(Heap* heap) {
return holes_size;
}
-#ifdef V8_RUNTIME_CALL_STATS
-WorkerThreadRuntimeCallStats* GCTracer::worker_thread_runtime_call_stats() {
- return heap_->isolate()->counters()->worker_thread_runtime_call_stats();
-}
-
-RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
- STATIC_ASSERT(Scope::FIRST_SCOPE == Scope::MC_INCREMENTAL);
- return static_cast<RuntimeCallCounterId>(
- static_cast<int>(RuntimeCallCounterId::kGC_MC_INCREMENTAL) +
- static_cast<int>(id));
-}
-#endif // defined(V8_RUNTIME_CALL_STATS)
-
-double GCTracer::MonotonicallyIncreasingTimeInMs() {
- if (V8_UNLIKELY(FLAG_predictable)) {
- return heap_->MonotonicallyIncreasingTimeInMs();
- } else {
- return base::TimeTicks::Now().ToInternalValue() /
- static_cast<double>(base::Time::kMicrosecondsPerMillisecond);
- }
-}
-
namespace {
std::atomic<CollectionEpoch> global_epoch{0};
@@ -66,55 +45,6 @@ CollectionEpoch next_epoch() {
}
} // namespace
-GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
- : tracer_(tracer), scope_(scope), thread_kind_(thread_kind) {
- start_time_ = tracer_->MonotonicallyIncreasingTimeInMs();
-#ifdef V8_RUNTIME_CALL_STATS
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
- if (thread_kind_ == ThreadKind::kMain) {
-#if DEBUG
- AssertMainThread();
-#endif // DEBUG
- runtime_stats_ =
- tracer_->heap_->isolate()->counters()->runtime_call_stats();
- runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
- } else {
- runtime_call_stats_scope_.emplace(
- tracer->worker_thread_runtime_call_stats());
- runtime_stats_ = runtime_call_stats_scope_->Get();
- runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
- }
-#endif // defined(V8_RUNTIME_CALL_STATS)
-}
-
-GCTracer::Scope::~Scope() {
- double duration_ms = tracer_->MonotonicallyIncreasingTimeInMs() - start_time_;
-
- if (thread_kind_ == ThreadKind::kMain) {
-#if DEBUG
- AssertMainThread();
-#endif // DEBUG
-
- tracer_->AddScopeSample(scope_, duration_ms);
- if (scope_ == ScopeId::MC_INCREMENTAL ||
- scope_ == ScopeId::MC_INCREMENTAL_START ||
- scope_ == ScopeId::MC_INCREMENTAL_FINALIZE) {
- auto* long_task_stats =
- tracer_->heap_->isolate()->GetCurrentLongTaskStats();
- long_task_stats->gc_full_incremental_wall_clock_duration_us +=
- static_cast<int64_t>(duration_ms *
- base::Time::kMicrosecondsPerMillisecond);
- }
- } else {
- tracer_->AddScopeSampleBackground(scope_, duration_ms);
- }
-
-#ifdef V8_RUNTIME_CALL_STATS
- if (V8_LIKELY(runtime_stats_ == nullptr)) return;
- runtime_stats_->Leave(&timer_);
-#endif // defined(V8_RUNTIME_CALL_STATS)
-}
-
#if DEBUG
void GCTracer::Scope::AssertMainThread() {
Isolate* isolate = tracer_->heap_->isolate();
@@ -274,7 +204,9 @@ void GCTracer::ResetForTesting() {
previous_ = current_;
start_of_observable_pause_ = 0.0;
notified_sweeping_completed_ = false;
- notified_cppgc_completed_ = false;
+ notified_full_cppgc_completed_ = false;
+ notified_young_cppgc_completed_ = false;
+ notified_young_cppgc_running_ = false;
young_gc_while_full_gc_ = false;
ResetIncrementalMarkingCounters();
allocation_time_ms_ = 0.0;
@@ -419,7 +351,7 @@ void GCTracer::ResetIncrementalMarkingCounters() {
incremental_marking_bytes_ = 0;
incremental_marking_duration_ = 0;
for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
- incremental_marking_scopes_[i].ResetCurrentCycle();
+ incremental_scopes_[i].ResetCurrentCycle();
}
}
@@ -459,35 +391,23 @@ void GCTracer::UpdateStatistics(GarbageCollector collector) {
MakeBytesAndDuration(current_.young_object_size, duration));
recorded_minor_gcs_survived_.Push(
MakeBytesAndDuration(current_.survived_young_object_size, duration));
- FetchBackgroundMinorGCCounters();
long_task_stats->gc_young_wall_clock_duration_us += duration_us;
} else {
if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
- current_.incremental_marking_bytes = incremental_marking_bytes_;
- current_.incremental_marking_duration = incremental_marking_duration_;
- for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
- current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
- current_.scopes[i] = incremental_marking_scopes_[i].duration;
- }
- RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
- current_.incremental_marking_duration);
+ RecordIncrementalMarkingSpeed(incremental_marking_bytes_,
+ incremental_marking_duration_);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.end_object_size, duration));
} else {
- DCHECK_EQ(0u, current_.incremental_marking_bytes);
- DCHECK_EQ(0, current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.end_object_size, duration));
}
RecordMutatorUtilization(current_.end_time,
- duration + current_.incremental_marking_duration);
+ duration + incremental_marking_duration_);
RecordGCSumCounters();
- ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
- FetchBackgroundMarkCompactCounters();
long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
}
- FetchBackgroundGeneralCounters();
heap_->UpdateTotalGCTime(duration);
@@ -514,6 +434,31 @@ void GCTracer::UpdateStatistics(GarbageCollector collector) {
}
}
+void GCTracer::FinalizeCurrentEvent() {
+ const bool is_young = Event::IsYoungGenerationEvent(current_.type);
+
+ if (is_young) {
+ FetchBackgroundMinorGCCounters();
+ } else {
+ if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
+ current_.incremental_marking_bytes = incremental_marking_bytes_;
+ current_.incremental_marking_duration = incremental_marking_duration_;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ current_.incremental_scopes[i] = incremental_scopes_[i];
+ current_.scopes[i] = incremental_scopes_[i].duration;
+ }
+ ResetIncrementalMarkingCounters();
+ } else {
+ DCHECK_EQ(0u, incremental_marking_bytes_);
+ DCHECK_EQ(0.0, incremental_marking_duration_);
+ DCHECK_EQ(0u, current_.incremental_marking_bytes);
+ DCHECK_EQ(0.0, current_.incremental_marking_duration);
+ }
+ FetchBackgroundMarkCompactCounters();
+ }
+ FetchBackgroundGeneralCounters();
+}
+
void GCTracer::StopAtomicPause() {
DCHECK_EQ(Event::State::ATOMIC, current_.state);
current_.state = Event::State::SWEEPING;
@@ -524,9 +469,7 @@ void GCTracer::StopCycle(GarbageCollector collector) {
current_.state = Event::State::NOT_RUNNING;
DCHECK(IsConsistentWithCollector(collector));
-
- Counters* counters = heap_->isolate()->counters();
- GarbageCollectionReason gc_reason = current_.gc_reason;
+ FinalizeCurrentEvent();
if (Heap::IsYoungGenerationCollector(collector)) {
ReportYoungCycleToRecorder();
@@ -540,7 +483,8 @@ void GCTracer::StopCycle(GarbageCollector collector) {
} else {
ReportFullCycleToRecorder();
- counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+ heap_->isolate()->counters()->mark_compact_reason()->AddSample(
+ static_cast<int>(current_.gc_reason));
if (FLAG_trace_gc_freelists) {
PrintIsolate(heap_->isolate(),
@@ -550,13 +494,29 @@ void GCTracer::StopCycle(GarbageCollector collector) {
}
}
-void GCTracer::StopCycleIfNeeded() {
+void GCTracer::StopFullCycleIfNeeded() {
if (current_.state != Event::State::SWEEPING) return;
if (!notified_sweeping_completed_) return;
- if (heap_->cpp_heap() && !notified_cppgc_completed_) return;
+ if (heap_->cpp_heap() && !notified_full_cppgc_completed_) return;
StopCycle(GarbageCollector::MARK_COMPACTOR);
notified_sweeping_completed_ = false;
- notified_cppgc_completed_ = false;
+ notified_full_cppgc_completed_ = false;
+}
+
+void GCTracer::StopYoungCycleIfNeeded() {
+ // We rely here on the fact that young GCs in V8 are atomic and by the time
+ // this is called, the Scavenger or Minor MC has already finished.
+ DCHECK(Event::IsYoungGenerationEvent(current_.type));
+ if (current_.state != Event::State::SWEEPING) return;
+ // Check if young cppgc was scheduled but hasn't completed yet.
+ if (heap_->cpp_heap() && notified_young_cppgc_running_ &&
+ !notified_young_cppgc_completed_)
+ return;
+ StopCycle(current_.type == Event::SCAVENGER
+ ? GarbageCollector::SCAVENGER
+ : GarbageCollector::MINOR_MARK_COMPACTOR);
+ notified_young_cppgc_running_ = false;
+ notified_young_cppgc_completed_ = false;
}
void GCTracer::NotifySweepingCompleted() {
@@ -586,19 +546,39 @@ void GCTracer::NotifySweepingCompleted() {
}
DCHECK(!notified_sweeping_completed_);
notified_sweeping_completed_ = true;
- StopCycleIfNeeded();
+ StopFullCycleIfNeeded();
}
-void GCTracer::NotifyCppGCCompleted() {
+void GCTracer::NotifyFullCppGCCompleted() {
// Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
// finished sweeping. This method is invoked by cppgc.
DCHECK(heap_->cpp_heap());
- DCHECK(CppHeap::From(heap_->cpp_heap())
- ->GetMetricRecorder()
- ->MetricsReportPending());
- DCHECK(!notified_cppgc_completed_);
- notified_cppgc_completed_ = true;
- StopCycleIfNeeded();
+ const auto* metric_recorder =
+ CppHeap::From(heap_->cpp_heap())->GetMetricRecorder();
+ USE(metric_recorder);
+ DCHECK(metric_recorder->FullGCMetricsReportPending());
+ DCHECK(!notified_full_cppgc_completed_);
+ notified_full_cppgc_completed_ = true;
+ StopFullCycleIfNeeded();
+}
+
+void GCTracer::NotifyYoungCppGCCompleted() {
+ // Stop a young GC cycle only when both v8 and cppgc (if available) GCs have
+ // finished sweeping. This method is invoked by cppgc.
+ DCHECK(heap_->cpp_heap());
+ DCHECK(notified_young_cppgc_running_);
+ const auto* metric_recorder =
+ CppHeap::From(heap_->cpp_heap())->GetMetricRecorder();
+ USE(metric_recorder);
+ DCHECK(metric_recorder->YoungGCMetricsReportPending());
+ DCHECK(!notified_young_cppgc_completed_);
+ notified_young_cppgc_completed_ = true;
+ StopYoungCycleIfNeeded();
+}
+
+void GCTracer::NotifyYoungCppGCRunning() {
+ DCHECK(!notified_young_cppgc_running_);
+ notified_young_cppgc_running_ = true;
}
void GCTracer::SampleAllocation(double current_ms,
@@ -666,7 +646,11 @@ void GCTracer::AddIncrementalMarkingStep(double duration, size_t bytes) {
incremental_marking_bytes_ += bytes;
incremental_marking_duration_ += duration;
}
- ReportIncrementalMarkingStepToRecorder();
+ ReportIncrementalMarkingStepToRecorder(duration);
+}
+
+void GCTracer::AddIncrementalSweepingStep(double duration) {
+ ReportIncrementalSweepingStepToRecorder(duration);
}
void GCTracer::Output(const char* format, ...) const {
@@ -698,12 +682,19 @@ void GCTracer::Print() const {
incremental_buffer, kIncrementalStatsSize,
" (+ %.1f ms in %d steps since start of marking, "
"biggest step %.1f ms, walltime since start of marking %.f ms)",
- current_.scopes[Scope::MC_INCREMENTAL],
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].longest_step,
+ current_scope(Scope::MC_INCREMENTAL),
+ incremental_scope(Scope::MC_INCREMENTAL).steps,
+ incremental_scope(Scope::MC_INCREMENTAL).longest_step,
current_.end_time - incremental_marking_start_time_);
}
+ const double total_external_time =
+ current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) +
+ current_scope(Scope::HEAP_EXTERNAL_EPILOGUE) +
+ current_scope(Scope::HEAP_EXTERNAL_PROLOGUE) +
+ current_scope(Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE) +
+ current_scope(Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
+
// Avoid PrintF as Output also appends the string to the tracing ring buffer
// that gets printed on OOM failures.
Output(
@@ -720,7 +711,7 @@ void GCTracer::Print() const {
static_cast<double>(current_.start_memory_size) / MB,
static_cast<double>(current_.end_object_size) / MB,
static_cast<double>(current_.end_memory_size) / MB, duration,
- TotalExternalTime(), incremental_buffer,
+ total_external_time, incremental_buffer,
AverageMarkCompactMutatorUtilization(),
CurrentMarkCompactMutatorUtilization(),
Heap::GarbageCollectionReasonToString(current_.gc_reason),
@@ -740,6 +731,9 @@ void GCTracer::PrintNVP() const {
current_.end_time - incremental_marking_start_time_;
}
+ // Avoid data races when printing the background scopes.
+ base::MutexGuard guard(&background_counter_mutex_);
+
switch (current_.type) {
case Event::SCAVENGER:
heap_->isolate()->PrintWithTimestamp(
@@ -789,31 +783,28 @@ void GCTracer::PrintNVP() const {
"unmapper_chunks=%d\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::TIME_TO_SAFEPOINT],
- current_.scopes[Scope::HEAP_PROLOGUE],
- current_.scopes[Scope::HEAP_EPILOGUE],
- current_.scopes[Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE],
- current_.scopes[Scope::HEAP_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE],
- current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
- current_.scopes[Scope::SCAVENGER_FAST_PROMOTE],
- current_.scopes[Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS],
- current_.scopes[Scope::SCAVENGER_SCAVENGE],
- current_.scopes[Scope::SCAVENGER_FREE_REMEMBERED_SET],
- current_.scopes[Scope::SCAVENGER_SCAVENGE_ROOTS],
- current_.scopes[Scope::SCAVENGER_SCAVENGE_WEAK],
- current_
- .scopes[Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY],
- current_
- .scopes[Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS],
- current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL],
- current_.scopes[Scope::SCAVENGER_SCAVENGE_UPDATE_REFS],
- current_.scopes[Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS],
- current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL],
- current_.scopes[Scope::BACKGROUND_UNMAPPER],
- current_.scopes[Scope::UNMAPPER],
- current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
- .steps,
- current_.scopes[Scope::MC_INCREMENTAL],
+ current_scope(Scope::HEAP_PROLOGUE),
+ current_scope(Scope::HEAP_EPILOGUE),
+ current_scope(Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE),
+ current_scope(Scope::HEAP_EXTERNAL_PROLOGUE),
+ current_scope(Scope::HEAP_EXTERNAL_EPILOGUE),
+ current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES),
+ current_scope(Scope::SCAVENGER_FAST_PROMOTE),
+ current_scope(Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS),
+ current_scope(Scope::SCAVENGER_SCAVENGE),
+ current_scope(Scope::SCAVENGER_FREE_REMEMBERED_SET),
+ current_scope(Scope::SCAVENGER_SCAVENGE_ROOTS),
+ current_scope(Scope::SCAVENGER_SCAVENGE_WEAK),
+ current_scope(Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY),
+ current_scope(Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS),
+ current_scope(Scope::SCAVENGER_SCAVENGE_PARALLEL),
+ current_scope(Scope::SCAVENGER_SCAVENGE_UPDATE_REFS),
+ current_scope(Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS),
+ current_scope(Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL),
+ current_scope(Scope::BACKGROUND_UNMAPPER),
+ current_scope(Scope::UNMAPPER),
+ incremental_scope(GCTracer::Scope::MC_INCREMENTAL).steps,
+ current_scope(Scope::MC_INCREMENTAL),
ScavengeSpeedInBytesPerMillisecond(), current_.start_object_size,
current_.end_object_size, current_.start_holes_size,
current_.end_holes_size, allocated_since_last_gc,
@@ -856,30 +847,29 @@ void GCTracer::PrintNVP() const {
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
- current_.scopes[Scope::MINOR_MC],
- current_.scopes[Scope::MINOR_MC_SWEEPING],
- current_.scopes[Scope::TIME_TO_SAFEPOINT],
- current_.scopes[Scope::MINOR_MC_MARK],
- current_.scopes[Scope::MINOR_MC_MARK_SEED],
- current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
- current_.scopes[Scope::MINOR_MC_MARK_WEAK],
- current_.scopes[Scope::MINOR_MC_MARK_GLOBAL_HANDLES],
- current_.scopes[Scope::MINOR_MC_CLEAR],
- current_.scopes[Scope::MINOR_MC_CLEAR_STRING_TABLE],
- current_.scopes[Scope::MINOR_MC_CLEAR_WEAK_LISTS],
- current_.scopes[Scope::MINOR_MC_EVACUATE],
- current_.scopes[Scope::MINOR_MC_EVACUATE_COPY],
- current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS],
- current_
- .scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
- current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS],
- current_.scopes[Scope::MINOR_MC_BACKGROUND_MARKING],
- current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY],
- current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::BACKGROUND_UNMAPPER],
- current_.scopes[Scope::UNMAPPER],
- current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
- current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
+ current_scope(Scope::MINOR_MC),
+ current_scope(Scope::MINOR_MC_SWEEPING),
+ current_scope(Scope::TIME_TO_SAFEPOINT),
+ current_scope(Scope::MINOR_MC_MARK),
+ current_scope(Scope::MINOR_MC_MARK_SEED),
+ current_scope(Scope::MINOR_MC_MARK_ROOTS),
+ current_scope(Scope::MINOR_MC_MARK_WEAK),
+ current_scope(Scope::MINOR_MC_MARK_GLOBAL_HANDLES),
+ current_scope(Scope::MINOR_MC_CLEAR),
+ current_scope(Scope::MINOR_MC_CLEAR_STRING_TABLE),
+ current_scope(Scope::MINOR_MC_CLEAR_WEAK_LISTS),
+ current_scope(Scope::MINOR_MC_EVACUATE),
+ current_scope(Scope::MINOR_MC_EVACUATE_COPY),
+ current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS),
+ current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS),
+ current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS),
+ current_scope(Scope::MINOR_MC_BACKGROUND_MARKING),
+ current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY),
+ current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS),
+ current_scope(Scope::BACKGROUND_UNMAPPER),
+ current_scope(Scope::UNMAPPER),
+ current_scope(Scope::MINOR_MC_MARKING_DEQUE),
+ current_scope(Scope::MINOR_MC_RESET_LIVENESS));
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
@@ -978,85 +968,77 @@ void GCTracer::PrintNVP() const {
"unmapper_chunks=%d "
"compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true),
- current_.reduce_memory, current_.scopes[Scope::TIME_TO_SAFEPOINT],
- current_.scopes[Scope::HEAP_PROLOGUE],
- current_.scopes[Scope::HEAP_EMBEDDER_TRACING_EPILOGUE],
- current_.scopes[Scope::HEAP_EPILOGUE],
- current_.scopes[Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE],
- current_.scopes[Scope::HEAP_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE],
- current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
- current_.scopes[Scope::MC_CLEAR],
- current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
- current_.scopes[Scope::MC_CLEAR_MAPS],
- current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
- current_.scopes[Scope::MC_CLEAR_STRING_TABLE],
- current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
- current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
- current_.scopes[Scope::MC_CLEAR_WEAK_REFERENCES],
- current_.scopes[Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS],
- current_.scopes[Scope::MC_EPILOGUE],
- current_.scopes[Scope::MC_EVACUATE],
- current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
- current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
- current_.scopes[Scope::MC_EVACUATE_COPY],
- current_.scopes[Scope::MC_EVACUATE_PROLOGUE],
- current_.scopes[Scope::MC_EVACUATE_EPILOGUE],
- current_.scopes[Scope::MC_EVACUATE_REBALANCE],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
- current_.scopes[Scope::MC_FINISH],
- current_.scopes[Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS],
- current_.scopes[Scope::MC_MARK],
- current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
- current_.scopes[Scope::MC_MARK_ROOTS],
- current_.scopes[Scope::MC_MARK_MAIN],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_HARMONY],
- current_.scopes[Scope::MC_MARK_EMBEDDER_PROLOGUE],
- current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING],
- current_.scopes[Scope::MC_PROLOGUE], current_.scopes[Scope::MC_SWEEP],
- current_.scopes[Scope::MC_SWEEP_CODE],
- current_.scopes[Scope::MC_SWEEP_MAP],
- current_.scopes[Scope::MC_SWEEP_OLD],
- current_.scopes[Scope::MC_INCREMENTAL],
- current_.scopes[Scope::MC_INCREMENTAL_FINALIZE],
- current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY],
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
- current_.scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE],
- current_.scopes[Scope::MC_INCREMENTAL_START],
- current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
- current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE],
- current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING],
- current_
- .incremental_marking_scopes
- [Scope::MC_INCREMENTAL_EMBEDDER_TRACING]
+ current_.reduce_memory, current_scope(Scope::TIME_TO_SAFEPOINT),
+ current_scope(Scope::HEAP_PROLOGUE),
+ current_scope(Scope::HEAP_EMBEDDER_TRACING_EPILOGUE),
+ current_scope(Scope::HEAP_EPILOGUE),
+ current_scope(Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE),
+ current_scope(Scope::HEAP_EXTERNAL_PROLOGUE),
+ current_scope(Scope::HEAP_EXTERNAL_EPILOGUE),
+ current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES),
+ current_scope(Scope::MC_CLEAR),
+ current_scope(Scope::MC_CLEAR_DEPENDENT_CODE),
+ current_scope(Scope::MC_CLEAR_MAPS),
+ current_scope(Scope::MC_CLEAR_SLOTS_BUFFER),
+ current_scope(Scope::MC_CLEAR_STRING_TABLE),
+ current_scope(Scope::MC_CLEAR_WEAK_COLLECTIONS),
+ current_scope(Scope::MC_CLEAR_WEAK_LISTS),
+ current_scope(Scope::MC_CLEAR_WEAK_REFERENCES),
+ current_scope(Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS),
+ current_scope(Scope::MC_EPILOGUE), current_scope(Scope::MC_EVACUATE),
+ current_scope(Scope::MC_EVACUATE_CANDIDATES),
+ current_scope(Scope::MC_EVACUATE_CLEAN_UP),
+ current_scope(Scope::MC_EVACUATE_COPY),
+ current_scope(Scope::MC_EVACUATE_PROLOGUE),
+ current_scope(Scope::MC_EVACUATE_EPILOGUE),
+ current_scope(Scope::MC_EVACUATE_REBALANCE),
+ current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS),
+ current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS),
+ current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN),
+ current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK),
+ current_scope(Scope::MC_FINISH),
+ current_scope(Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS),
+ current_scope(Scope::MC_MARK),
+ current_scope(Scope::MC_MARK_FINISH_INCREMENTAL),
+ current_scope(Scope::MC_MARK_ROOTS),
+ current_scope(Scope::MC_MARK_MAIN),
+ current_scope(Scope::MC_MARK_WEAK_CLOSURE),
+ current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON),
+ current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING),
+ current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR),
+ current_scope(Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES),
+ current_scope(Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS),
+ current_scope(Scope::MC_MARK_WEAK_CLOSURE_HARMONY),
+ current_scope(Scope::MC_MARK_EMBEDDER_PROLOGUE),
+ current_scope(Scope::MC_MARK_EMBEDDER_TRACING),
+ current_scope(Scope::MC_PROLOGUE), current_scope(Scope::MC_SWEEP),
+ current_scope(Scope::MC_SWEEP_CODE),
+ current_scope(Scope::MC_SWEEP_MAP),
+ current_scope(Scope::MC_SWEEP_OLD),
+ current_scope(Scope::MC_INCREMENTAL),
+ current_scope(Scope::MC_INCREMENTAL_FINALIZE),
+ current_scope(Scope::MC_INCREMENTAL_FINALIZE_BODY),
+ current_scope(Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE),
+ current_scope(Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE),
+ current_scope(Scope::MC_INCREMENTAL_LAYOUT_CHANGE),
+ current_scope(Scope::MC_INCREMENTAL_START),
+ current_scope(Scope::MC_INCREMENTAL_SWEEPING),
+ current_scope(Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE),
+ current_scope(Scope::MC_INCREMENTAL_EMBEDDER_TRACING),
+ incremental_scope(Scope::MC_INCREMENTAL_EMBEDDER_TRACING)
.longest_step,
- current_
- .incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY]
- .longest_step,
- current_
- .incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY]
- .steps,
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL]
- .longest_step,
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
+ incremental_scope(Scope::MC_INCREMENTAL_FINALIZE_BODY).longest_step,
+ incremental_scope(Scope::MC_INCREMENTAL_FINALIZE_BODY).steps,
+ incremental_scope(Scope::MC_INCREMENTAL).longest_step,
+ incremental_scope(Scope::MC_INCREMENTAL).steps,
IncrementalMarkingSpeedInBytesPerMillisecond(),
incremental_walltime_duration,
- current_.scopes[Scope::MC_BACKGROUND_MARKING],
- current_.scopes[Scope::MC_BACKGROUND_SWEEPING],
- current_.scopes[Scope::MC_BACKGROUND_EVACUATE_COPY],
- current_.scopes[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::BACKGROUND_UNMAPPER],
- current_.scopes[Scope::UNMAPPER], current_.start_object_size,
+ current_scope(Scope::MC_BACKGROUND_MARKING),
+ current_scope(Scope::MC_BACKGROUND_SWEEPING),
+ current_scope(Scope::MC_BACKGROUND_EVACUATE_COPY),
+ current_scope(Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS),
+ current_scope(Scope::BACKGROUND_UNMAPPER),
+ current_scope(Scope::UNMAPPER), current_.start_object_size,
current_.end_object_size, current_.start_holes_size,
current_.end_holes_size, allocated_since_last_gc,
heap_->promoted_objects_size(),
@@ -1332,12 +1314,6 @@ void GCTracer::FetchBackgroundCounters(int first_scope, int last_scope) {
}
}
-void GCTracer::AddScopeSampleBackground(Scope::ScopeId scope, double duration) {
- base::MutexGuard guard(&background_counter_mutex_);
- BackgroundCounter& counter = background_counter_[scope];
- counter.total_duration_ms += duration;
-}
-
void GCTracer::RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode) {
Counters* counters = heap_->isolate()->counters();
if (mode == RecordGCPhasesInfo::Mode::Finalize) {
@@ -1402,16 +1378,12 @@ void GCTracer::RecordGCSumCounters() {
const double atomic_pause_duration = current_.scopes[Scope::MARK_COMPACTOR];
const double incremental_marking =
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
- .duration +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
- .duration +
+ incremental_scopes_[Scope::MC_INCREMENTAL_LAYOUT_CHANGE].duration +
+ incremental_scopes_[Scope::MC_INCREMENTAL_START].duration +
incremental_marking_duration_ +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
- .duration;
+ incremental_scopes_[Scope::MC_INCREMENTAL_FINALIZE].duration;
const double incremental_sweeping =
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
- .duration;
+ incremental_scopes_[Scope::MC_INCREMENTAL_SWEEPING].duration;
const double overall_duration =
atomic_pause_duration + incremental_marking + incremental_sweeping;
const double background_duration =
@@ -1442,7 +1414,7 @@ namespace {
void CopyTimeMetrics(
::v8::metrics::GarbageCollectionPhases& metrics,
- const cppgc::internal::MetricRecorder::FullCycle::IncrementalPhases&
+ const cppgc::internal::MetricRecorder::GCCycle::IncrementalPhases&
cppgc_metrics) {
DCHECK_NE(-1, cppgc_metrics.mark_duration_us);
metrics.mark_wall_clock_duration_in_us = cppgc_metrics.mark_duration_us;
@@ -1455,7 +1427,7 @@ void CopyTimeMetrics(
void CopyTimeMetrics(
::v8::metrics::GarbageCollectionPhases& metrics,
- const cppgc::internal::MetricRecorder::FullCycle::Phases& cppgc_metrics) {
+ const cppgc::internal::MetricRecorder::GCCycle::Phases& cppgc_metrics) {
DCHECK_NE(-1, cppgc_metrics.compact_duration_us);
metrics.compact_wall_clock_duration_in_us = cppgc_metrics.compact_duration_us;
DCHECK_NE(-1, cppgc_metrics.mark_duration_us);
@@ -1473,7 +1445,7 @@ void CopyTimeMetrics(
void CopySizeMetrics(
::v8::metrics::GarbageCollectionSizes& metrics,
- const cppgc::internal::MetricRecorder::FullCycle::Sizes& cppgc_metrics) {
+ const cppgc::internal::MetricRecorder::GCCycle::Sizes& cppgc_metrics) {
DCHECK_NE(-1, cppgc_metrics.after_bytes);
metrics.bytes_after = cppgc_metrics.after_bytes;
DCHECK_NE(-1, cppgc_metrics.before_bytes);
@@ -1491,9 +1463,9 @@ void CopySizeMetrics(
return isolate->GetOrRegisterRecorderContextId(isolate->native_context());
}
-void FlushBatchedIncrementalEvents(
- v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalMark&
- batched_events,
+template <typename EventType>
+void FlushBatchedEvents(
+ v8::metrics::GarbageCollectionBatchedEvents<EventType>& batched_events,
Isolate* isolate) {
DCHECK_NOT_NULL(isolate->metrics_recorder());
DCHECK(!batched_events.events.empty());
@@ -1509,20 +1481,23 @@ void GCTracer::ReportFullCycleToRecorder() {
DCHECK_EQ(Event::State::NOT_RUNNING, current_.state);
auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
DCHECK_IMPLIES(cpp_heap,
- cpp_heap->GetMetricRecorder()->MetricsReportPending());
+ cpp_heap->GetMetricRecorder()->FullGCMetricsReportPending());
const std::shared_ptr<metrics::Recorder>& recorder =
heap_->isolate()->metrics_recorder();
DCHECK_NOT_NULL(recorder);
if (!recorder->HasEmbedderRecorder()) {
incremental_mark_batched_events_ = {};
+ incremental_sweep_batched_events_ = {};
if (cpp_heap) {
cpp_heap->GetMetricRecorder()->ClearCachedEvents();
}
return;
}
if (!incremental_mark_batched_events_.events.empty()) {
- FlushBatchedIncrementalEvents(incremental_mark_batched_events_,
- heap_->isolate());
+ FlushBatchedEvents(incremental_mark_batched_events_, heap_->isolate());
+ }
+ if (!incremental_sweep_batched_events_.events.empty()) {
+ FlushBatchedEvents(incremental_sweep_batched_events_, heap_->isolate());
}
v8::metrics::GarbageCollectionFullCycle event;
@@ -1531,13 +1506,15 @@ void GCTracer::ReportFullCycleToRecorder() {
// Managed C++ heap statistics:
if (cpp_heap) {
cpp_heap->GetMetricRecorder()->FlushBatchedIncrementalEvents();
- const base::Optional<cppgc::internal::MetricRecorder::FullCycle>
+ const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
optional_cppgc_event =
cpp_heap->GetMetricRecorder()->ExtractLastFullGcEvent();
DCHECK(optional_cppgc_event.has_value());
- DCHECK(!cpp_heap->GetMetricRecorder()->MetricsReportPending());
- const cppgc::internal::MetricRecorder::FullCycle& cppgc_event =
+ DCHECK(!cpp_heap->GetMetricRecorder()->FullGCMetricsReportPending());
+ const cppgc::internal::MetricRecorder::GCCycle& cppgc_event =
optional_cppgc_event.value();
+ DCHECK_EQ(cppgc_event.type,
+ cppgc::internal::MetricRecorder::GCCycle::Type::kMajor);
CopyTimeMetrics(event.total_cpp, cppgc_event.total);
CopyTimeMetrics(event.main_thread_cpp, cppgc_event.main_thread);
CopyTimeMetrics(event.main_thread_atomic_cpp,
@@ -1560,16 +1537,13 @@ void GCTracer::ReportFullCycleToRecorder() {
// Unified heap statistics:
const double atomic_pause_duration = current_.scopes[Scope::MARK_COMPACTOR];
const double incremental_marking =
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
- .duration +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
+ current_.incremental_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
+ current_.incremental_scopes[Scope::MC_INCREMENTAL_START].duration +
current_.incremental_marking_duration +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
- .duration;
+ current_.incremental_scopes[Scope::MC_INCREMENTAL_FINALIZE].duration;
const double incremental_sweeping =
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
- .duration;
+ current_.incremental_scopes[Scope::MC_INCREMENTAL_SWEEPING].duration;
const double overall_duration =
atomic_pause_duration + incremental_marking + incremental_sweeping;
const double marking_background_duration =
@@ -1628,9 +1602,12 @@ void GCTracer::ReportFullCycleToRecorder() {
event.total.sweep_wall_clock_duration_in_us =
static_cast<int64_t>((sweeping_duration + sweeping_background_duration) *
base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_incremental.mark_wall_clock_duration_in_us =
+ incremental_marking;
+ event.main_thread_incremental.sweep_wall_clock_duration_in_us =
+ incremental_sweeping;
// TODO(chromium:1154636): Populate the following:
- // - event.main_thread_incremental
// - event.objects
// - event.memory
// - event.collection_rate_in_percent
@@ -1640,7 +1617,7 @@ void GCTracer::ReportFullCycleToRecorder() {
recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
}
-void GCTracer::ReportIncrementalMarkingStepToRecorder() {
+void GCTracer::ReportIncrementalMarkingStepToRecorder(double v8_duration) {
DCHECK_EQ(Event::Type::INCREMENTAL_MARK_COMPACTOR, current_.type);
static constexpr int kMaxBatchedEvents =
CppHeap::MetricRecorderAdapter::kMaxBatchedEvents;
@@ -1661,10 +1638,27 @@ void GCTracer::ReportIncrementalMarkingStepToRecorder() {
.cpp_wall_clock_duration_in_us = cppgc_event.value().duration_us;
}
}
- // TODO(chromium:1154636): Populate event.wall_clock_duration_in_us.
+ incremental_mark_batched_events_.events.back().wall_clock_duration_in_us =
+ static_cast<int64_t>(v8_duration *
+ base::Time::kMicrosecondsPerMillisecond);
if (incremental_mark_batched_events_.events.size() == kMaxBatchedEvents) {
- FlushBatchedIncrementalEvents(incremental_mark_batched_events_,
- heap_->isolate());
+ FlushBatchedEvents(incremental_mark_batched_events_, heap_->isolate());
+ }
+}
+
+void GCTracer::ReportIncrementalSweepingStepToRecorder(double v8_duration) {
+ static constexpr int kMaxBatchedEvents =
+ CppHeap::MetricRecorderAdapter::kMaxBatchedEvents;
+ const std::shared_ptr<metrics::Recorder>& recorder =
+ heap_->isolate()->metrics_recorder();
+ DCHECK_NOT_NULL(recorder);
+ if (!recorder->HasEmbedderRecorder()) return;
+ incremental_sweep_batched_events_.events.emplace_back();
+ incremental_sweep_batched_events_.events.back().wall_clock_duration_in_us =
+ static_cast<int64_t>(v8_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ if (incremental_sweep_batched_events_.events.size() == kMaxBatchedEvents) {
+ FlushBatchedEvents(incremental_sweep_batched_events_, heap_->isolate());
}
}
@@ -1675,9 +1669,41 @@ void GCTracer::ReportYoungCycleToRecorder() {
heap_->isolate()->metrics_recorder();
DCHECK_NOT_NULL(recorder);
if (!recorder->HasEmbedderRecorder()) return;
+
v8::metrics::GarbageCollectionYoungCycle event;
// Reason:
event.reason = static_cast<int>(current_.gc_reason);
+#if defined(CPPGC_YOUNG_GENERATION)
+ // Managed C++ heap statistics:
+ auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
+ if (cpp_heap) {
+ auto* metric_recorder = cpp_heap->GetMetricRecorder();
+ const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
+ optional_cppgc_event = metric_recorder->ExtractLastYoungGcEvent();
+ // We bail out from Oilpan's young GC if the full GC is already in progress.
+ // Check here if the young generation event was reported.
+ if (optional_cppgc_event) {
+ DCHECK(!metric_recorder->YoungGCMetricsReportPending());
+ const cppgc::internal::MetricRecorder::GCCycle& cppgc_event =
+ optional_cppgc_event.value();
+ DCHECK_EQ(cppgc_event.type,
+ cppgc::internal::MetricRecorder::GCCycle::Type::kMinor);
+ CopyTimeMetrics(event.total_cpp, cppgc_event.total);
+ CopySizeMetrics(event.objects_cpp, cppgc_event.objects);
+ CopySizeMetrics(event.memory_cpp, cppgc_event.memory);
+ DCHECK_NE(-1, cppgc_event.collection_rate_in_percent);
+ event.collection_rate_cpp_in_percent =
+ cppgc_event.collection_rate_in_percent;
+ DCHECK_NE(-1, cppgc_event.efficiency_in_bytes_per_us);
+ event.efficiency_cpp_in_bytes_per_us =
+ cppgc_event.efficiency_in_bytes_per_us;
+ DCHECK_NE(-1, cppgc_event.main_thread_efficiency_in_bytes_per_us);
+ event.main_thread_efficiency_cpp_in_bytes_per_us =
+ cppgc_event.main_thread_efficiency_in_bytes_per_us;
+ }
+ }
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
// Total:
const double total_wall_clock_duration_in_us =
(current_.scopes[Scope::SCAVENGER] +
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index c008b1da06..c6f0e7dbae 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -9,8 +9,6 @@
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
-#include "src/base/platform/platform.h"
-#include "src/base/platform/time.h"
#include "src/base/ring-buffer.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
@@ -61,21 +59,9 @@ class V8_EXPORT_PRIVATE GCTracer {
GCTracer& operator=(const GCTracer&) = delete;
struct IncrementalMarkingInfos {
- IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
-
- void Update(double delta) {
- steps++;
- duration += delta;
- if (delta > longest_step) {
- longest_step = delta;
- }
- }
-
- void ResetCurrentCycle() {
- duration = 0;
- longest_step = 0;
- steps = 0;
- }
+ V8_INLINE IncrementalMarkingInfos();
+ V8_INLINE void Update(double delta);
+ V8_INLINE void ResetCurrentCycle();
double duration; // in ms
double longest_step; // in ms
@@ -103,25 +89,27 @@ class V8_EXPORT_PRIVATE GCTracer {
LAST_TOP_MC_SCOPE = MC_SWEEP,
FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
- FIRST_BACKGROUND_SCOPE = FIRST_GENERAL_BACKGROUND_SCOPE
+ FIRST_BACKGROUND_SCOPE = FIRST_GENERAL_BACKGROUND_SCOPE,
+ LAST_BACKGROUND_SCOPE = LAST_MINOR_GC_BACKGROUND_SCOPE
};
- Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind);
- ~Scope();
+ V8_INLINE Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind);
+ V8_INLINE ~Scope();
Scope(const Scope&) = delete;
Scope& operator=(const Scope&) = delete;
static const char* Name(ScopeId id);
static bool NeedsYoungEpoch(ScopeId id);
+ V8_INLINE static constexpr int IncrementalOffset(ScopeId id);
private:
#if DEBUG
void AssertMainThread();
#endif // DEBUG
- GCTracer* tracer_;
- ScopeId scope_;
- ThreadKind thread_kind_;
- double start_time_;
+ GCTracer* const tracer_;
+ const ScopeId scope_;
+ const ThreadKind thread_kind_;
+ const double start_time_;
#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallTimer timer_;
RuntimeCallStats* runtime_stats_ = nullptr;
@@ -139,13 +127,8 @@ class V8_EXPORT_PRIVATE GCTracer {
START = 4
};
-#ifdef DEBUG
// Returns true if the event corresponds to a young generation GC.
- static constexpr bool IsYoungGenerationEvent(Type type) {
- DCHECK_NE(START, type);
- return type == SCAVENGER || type == MINOR_MARK_COMPACTOR;
- }
-#endif
+ V8_INLINE static constexpr bool IsYoungGenerationEvent(Type type);
// The state diagram for a GC cycle:
// (NOT_RUNNING) -----(StartCycle)----->
@@ -216,7 +199,7 @@ class V8_EXPORT_PRIVATE GCTracer {
// Holds details for incremental marking scopes.
IncrementalMarkingInfos
- incremental_marking_scopes[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
+ incremental_scopes[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
};
class RecordGCPhasesInfo {
@@ -244,14 +227,12 @@ class V8_EXPORT_PRIVATE GCTracer {
double optional_speed);
#ifdef V8_RUNTIME_CALL_STATS
- static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
+ V8_INLINE static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
#endif // defined(V8_RUNTIME_CALL_STATS)
explicit GCTracer(Heap* heap);
- CollectionEpoch CurrentEpoch(Scope::ScopeId id) const {
- return Scope::NeedsYoungEpoch(id) ? epoch_young_ : epoch_full_;
- }
+ V8_INLINE CollectionEpoch CurrentEpoch(Scope::ScopeId id) const;
// Start and stop an observable pause.
void StartObservablePause();
@@ -262,14 +243,15 @@ class V8_EXPORT_PRIVATE GCTracer {
const char* collector_reason);
void UpdateStatistics(GarbageCollector collector);
+ void FinalizeCurrentEvent();
enum class MarkingType { kAtomic, kIncremental };
// Start and stop a GC cycle (collecting data and reporting results).
void StartCycle(GarbageCollector collector, GarbageCollectionReason gc_reason,
const char* collector_reason, MarkingType marking);
- void StopCycle(GarbageCollector collector);
- void StopCycleIfNeeded();
+ void StopYoungCycleIfNeeded();
+ void StopFullCycleIfNeeded();
// Start and stop a cycle's atomic pause.
void StartAtomicPause();
@@ -279,32 +261,23 @@ class V8_EXPORT_PRIVATE GCTracer {
void StopInSafepoint();
void NotifySweepingCompleted();
- void NotifyCppGCCompleted();
+ void NotifyFullCppGCCompleted();
+
+ void NotifyYoungCppGCRunning();
+ void NotifyYoungCppGCCompleted();
void NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling);
#ifdef DEBUG
- bool IsInObservablePause() const { return 0.0 < start_of_observable_pause_; }
+ V8_INLINE bool IsInObservablePause() const;
// Checks if the current event is consistent with a collector.
- bool IsConsistentWithCollector(GarbageCollector collector) const {
- return (collector == GarbageCollector::SCAVENGER &&
- current_.type == Event::SCAVENGER) ||
- (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
- current_.type == Event::MINOR_MARK_COMPACTOR) ||
- (collector == GarbageCollector::MARK_COMPACTOR &&
- (current_.type == Event::MARK_COMPACTOR ||
- current_.type == Event::INCREMENTAL_MARK_COMPACTOR));
- }
+ V8_INLINE bool IsConsistentWithCollector(GarbageCollector collector) const;
// Checks if the current event corresponds to a full GC cycle whose sweeping
// has not finalized yet.
- bool IsSweepingInProgress() const {
- return (current_.type == Event::MARK_COMPACTOR ||
- current_.type == Event::INCREMENTAL_MARK_COMPACTOR) &&
- current_.state == Event::State::SWEEPING;
- }
+ V8_INLINE bool IsSweepingInProgress() const;
#endif
// Sample and accumulate bytes allocated since the last GC.
@@ -322,6 +295,9 @@ class V8_EXPORT_PRIVATE GCTracer {
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, size_t bytes);
+ // Log an incremental marking step.
+ void AddIncrementalSweepingStep(double duration);
+
// Compute the average incremental marking speed in bytes/millisecond.
// Returns a conservative value if no events have been recorded.
double IncrementalMarkingSpeedInBytesPerMillisecond() const;
@@ -407,18 +383,7 @@ class V8_EXPORT_PRIVATE GCTracer {
double AverageMarkCompactMutatorUtilization() const;
double CurrentMarkCompactMutatorUtilization() const;
- V8_INLINE void AddScopeSample(Scope::ScopeId scope, double duration) {
- DCHECK(scope < Scope::NUMBER_OF_SCOPES);
- if (scope >= Scope::FIRST_INCREMENTAL_SCOPE &&
- scope <= Scope::LAST_INCREMENTAL_SCOPE) {
- incremental_marking_scopes_[scope - Scope::FIRST_INCREMENTAL_SCOPE]
- .Update(duration);
- } else {
- current_.scopes[scope] += duration;
- }
- }
-
- void AddScopeSampleBackground(Scope::ScopeId scope, double duration);
+ V8_INLINE void AddScopeSample(Scope::ScopeId id, double duration);
void RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode);
@@ -430,7 +395,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordTimeToIncrementalMarkingTask(double time_to_task);
#ifdef V8_RUNTIME_CALL_STATS
- WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
+ V8_INLINE WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
#endif // defined(V8_RUNTIME_CALL_STATS)
private:
@@ -456,6 +421,19 @@ class V8_EXPORT_PRIVATE GCTracer {
double total_duration_ms;
};
+ void StopCycle(GarbageCollector collector);
+
+ // Statistics for incremental and background scopes are kept out of the
+ // current event and only copied there by FinalizeCurrentEvent, at StopCycle.
+ // This method can be used to access scopes correctly, before this happens.
+ // Note: when accessing a background scope via this method, the caller is
+ // responsible for avoiding data races, e.g., by acquiring
+ // background_counter_mutex_.
+ V8_INLINE constexpr double current_scope(Scope::ScopeId id) const;
+
+ V8_INLINE constexpr const IncrementalMarkingInfos& incremental_scope(
+ Scope::ScopeId id) const;
+
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
@@ -474,7 +452,7 @@ class V8_EXPORT_PRIVATE GCTracer {
// end of the atomic pause.
void RecordGCSumCounters();
- double MonotonicallyIncreasingTimeInMs();
+ V8_INLINE double MonotonicallyIncreasingTimeInMs();
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -488,21 +466,14 @@ class V8_EXPORT_PRIVATE GCTracer {
// it can be included in later crash dumps.
void PRINTF_FORMAT(2, 3) Output(const char* format, ...) const;
- double TotalExternalTime() const {
- return current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES] +
- current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::HEAP_EXTERNAL_PROLOGUE] +
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE];
- }
-
void FetchBackgroundCounters(int first_scope, int last_scope);
void FetchBackgroundMinorGCCounters();
void FetchBackgroundMarkCompactCounters();
void FetchBackgroundGeneralCounters();
void ReportFullCycleToRecorder();
- void ReportIncrementalMarkingStepToRecorder();
+ void ReportIncrementalMarkingStepToRecorder(double v8_duration);
+ void ReportIncrementalSweepingStepToRecorder(double v8_duration);
void ReportYoungCycleToRecorder();
// Pointer to the heap that owns this tracer.
@@ -542,7 +513,7 @@ class V8_EXPORT_PRIVATE GCTracer {
// Incremental scopes carry more information than just the duration. The infos
// here are merged back upon starting/stopping the GC tracer.
IncrementalMarkingInfos
- incremental_marking_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
+ incremental_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
// Timestamp and allocation counter at the last sampled allocation event.
double allocation_time_ms_;
@@ -580,7 +551,13 @@ class V8_EXPORT_PRIVATE GCTracer {
// A full GC cycle stops only when both v8 and cppgc (if available) GCs have
// finished sweeping.
bool notified_sweeping_completed_ = false;
- bool notified_cppgc_completed_ = false;
+ bool notified_full_cppgc_completed_ = false;
+ // Similar to full GCs, a young GC cycle stops only when both v8 and cppgc GCs
+ // have finished sweeping.
+ bool notified_young_cppgc_completed_ = false;
+ // Keep track whether the young cppgc GC was scheduled (as opposed to full
+ // cycles, for young cycles cppgc is not always scheduled).
+ bool notified_young_cppgc_running_ = false;
// When a full GC cycle is interrupted by a young generation GC cycle, the
// |previous_| event is used as temporary storage for the |current_| event
@@ -589,8 +566,10 @@ class V8_EXPORT_PRIVATE GCTracer {
v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalMark
incremental_mark_batched_events_;
+ v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalSweep
+ incremental_sweep_batched_events_;
- base::Mutex background_counter_mutex_;
+ mutable base::Mutex background_counter_mutex_;
BackgroundCounter background_counter_[Scope::NUMBER_OF_SCOPES];
};
diff --git a/deps/v8/src/heap/heap-allocator-inl.h b/deps/v8/src/heap/heap-allocator-inl.h
index 043f4c629b..6f5569ef96 100644
--- a/deps/v8/src/heap/heap-allocator-inl.h
+++ b/deps/v8/src/heap/heap-allocator-inl.h
@@ -30,9 +30,7 @@ OldLargeObjectSpace* HeapAllocator::lo_space() const {
return static_cast<OldLargeObjectSpace*>(spaces_[LO_SPACE]);
}
-PagedSpace* HeapAllocator::map_space() const {
- return static_cast<PagedSpace*>(spaces_[MAP_SPACE]);
-}
+PagedSpace* HeapAllocator::space_for_maps() const { return space_for_maps_; }
NewSpace* HeapAllocator::new_space() const {
return static_cast<NewSpace*>(spaces_[NEW_SPACE]);
@@ -111,7 +109,7 @@ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult HeapAllocator::AllocateRaw(
break;
case AllocationType::kMap:
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
- allocation = map_space()->AllocateRawUnaligned(size_in_bytes);
+ allocation = space_for_maps()->AllocateRawUnaligned(size_in_bytes);
break;
case AllocationType::kReadOnly:
DCHECK(read_only_space()->writable());
diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc
index 580f56c9e0..2915977011 100644
--- a/deps/v8/src/heap/heap-allocator.cc
+++ b/deps/v8/src/heap/heap-allocator.cc
@@ -22,8 +22,15 @@ void HeapAllocator::Setup() {
for (int i = FIRST_SPACE; i <= LAST_SPACE; ++i) {
spaces_[i] = heap_->space(i);
}
+
+ space_for_maps_ = spaces_[MAP_SPACE]
+ ? static_cast<PagedSpace*>(spaces_[MAP_SPACE])
+ : static_cast<PagedSpace*>(spaces_[OLD_SPACE]);
+
shared_old_allocator_ = heap_->shared_old_allocator_.get();
- shared_map_allocator_ = heap_->shared_map_allocator_.get();
+ shared_map_allocator_ = heap_->shared_map_allocator_
+ ? heap_->shared_map_allocator_.get()
+ : shared_old_allocator_;
}
void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) {
diff --git a/deps/v8/src/heap/heap-allocator.h b/deps/v8/src/heap/heap-allocator.h
index 9de82295f2..adb118a249 100644
--- a/deps/v8/src/heap/heap-allocator.h
+++ b/deps/v8/src/heap/heap-allocator.h
@@ -75,7 +75,7 @@ class V8_EXPORT_PRIVATE HeapAllocator final {
private:
V8_INLINE PagedSpace* code_space() const;
V8_INLINE CodeLargeObjectSpace* code_lo_space() const;
- V8_INLINE PagedSpace* map_space() const;
+ V8_INLINE PagedSpace* space_for_maps() const;
V8_INLINE NewSpace* new_space() const;
V8_INLINE NewLargeObjectSpace* new_lo_space() const;
V8_INLINE OldLargeObjectSpace* lo_space() const;
@@ -100,6 +100,7 @@ class V8_EXPORT_PRIVATE HeapAllocator final {
Heap* const heap_;
Space* spaces_[LAST_SPACE + 1];
+ PagedSpace* space_for_maps_;
ReadOnlySpace* read_only_space_;
ConcurrentAllocator* shared_old_allocator_;
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index ce799eeef3..02eefd9e4f 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -34,7 +34,6 @@
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles-inl.h"
#include "src/heap/array-buffer-sweeper.h"
-#include "src/heap/barrier.h"
#include "src/heap/base/stack.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
@@ -48,6 +47,7 @@
#include "src/heap/embedder-tracing.h"
#include "src/heap/finalization-registry-cleanup-task.h"
#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap-layout-tracer.h"
@@ -473,6 +473,12 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return GarbageCollector::MARK_COMPACTOR;
}
+ if (FLAG_separate_gc_phases && incremental_marking()->IsMarking()) {
+ // TODO(v8:12503): Remove previous condition when flag gets removed.
+ *reason = "Incremental marking forced finalization";
+ return GarbageCollector::MARK_COMPACTOR;
+ }
+
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
@@ -1909,9 +1915,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
// order; the latter may replace the current event with that of an
// interrupted full cycle.
if (IsYoungGenerationCollector(collector)) {
- tracer()->StopCycle(collector);
+ tracer()->StopYoungCycleIfNeeded();
} else {
- tracer()->StopCycleIfNeeded();
+ tracer()->StopFullCycleIfNeeded();
}
}
@@ -2184,6 +2190,17 @@ void Heap::CheckCollectionRequested() {
current_gc_callback_flags_);
}
+#if V8_ENABLE_WEBASSEMBLY
+void Heap::EnsureWasmCanonicalRttsSize(int length) {
+ Handle<WeakArrayList> current_rtts = handle(wasm_canonical_rtts(), isolate_);
+ if (length <= current_rtts->length()) return;
+ Handle<WeakArrayList> result = WeakArrayList::EnsureSpace(
+ isolate(), current_rtts, length, AllocationType::kOld);
+ result->set_length(length);
+ set_wasm_canonical_rtts(*result);
+}
+#endif
+
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@@ -2225,17 +2242,17 @@ size_t Heap::PerformGarbageCollection(
const char* collector_reason, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- // We don't really perform a GC here but need this scope for the nested
- // SafepointScope inside Verify().
- AllowGarbageCollection allow_gc;
- Verify();
- }
-#endif // VERIFY_HEAP
-
if (IsYoungGenerationCollector(collector)) {
CompleteSweepingYoung(collector);
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ // If heap verification is enabled, we want to ensure that sweeping is
+ // completed here, as it will be triggered from Heap::Verify anyway.
+ // In this way, sweeping finalization is accounted to the corresponding
+ // full GC cycle.
+ CompleteSweepingFull();
+ }
+#endif // VERIFY_HEAP
tracer()->StartCycle(collector, gc_reason, collector_reason,
GCTracer::MarkingType::kAtomic);
} else {
@@ -2268,6 +2285,15 @@ size_t Heap::PerformGarbageCollection(
collection_barrier_->StopTimeToCollectionTimer();
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ // We don't really perform a GC here but need this scope for the nested
+ // SafepointScope inside Verify().
+ AllowGarbageCollection allow_gc;
+ Verify();
+ }
+#endif // VERIFY_HEAP
+
tracer()->StartInSafepoint();
GarbageCollectionPrologueInSafepoint();
@@ -2332,9 +2358,17 @@ size_t Heap::PerformGarbageCollection(
local_embedder_heap_tracer()->TraceEpilogue();
}
- if (collector == GarbageCollector::SCAVENGER && cpp_heap()) {
- CppHeap::From(cpp_heap())->RunMinorGC();
+#if defined(CPPGC_YOUNG_GENERATION)
+ // Schedule Oilpan's Minor GC. Since the minor GC doesn't support conservative
+ // stack scanning, do it only when Scavenger runs from task, which is
+ // non-nestable.
+ if (cpp_heap() && IsYoungGenerationCollector(collector)) {
+ const bool with_stack = (gc_reason != GarbageCollectionReason::kTask);
+ CppHeap::From(cpp_heap())
+ ->RunMinorGC(with_stack ? CppHeap::StackState::kMayContainHeapPointers
+ : CppHeap::StackState::kNoHeapPointers);
}
+#endif // defined(CPPGC_YOUNG_GENERATION)
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -2403,7 +2437,7 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
tracer()->StopAtomicPause();
tracer()->StopObservablePause();
tracer()->UpdateStatistics(collector);
- tracer()->StopCycleIfNeeded();
+ tracer()->StopFullCycleIfNeeded();
}
void Heap::CompleteSweepingYoung(GarbageCollector collector) {
@@ -2429,6 +2463,11 @@ void Heap::CompleteSweepingYoung(GarbageCollector collector) {
// the sweeping here, to avoid having to pause and resume during the young
// generation GC.
mark_compact_collector()->FinishSweepingIfOutOfWork();
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ // Always complete sweeping if young generation is enabled.
+ if (cpp_heap()) CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
+#endif // defined(CPPGC_YOUNG_GENERATION)
}
void Heap::EnsureSweepingCompleted(HeapObject object) {
@@ -2604,7 +2643,14 @@ void Heap::MinorMarkCompact() {
: nullptr);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
- ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
+ // Young generation garbage collection is orthogonal from full GC marking. It
+ // is possible that objects that are currently being processed for marking are
+ // reclaimed in the young generation GC that interleaves concurrent marking.
+ // Pause concurrent markers to allow processing them using
+ // `UpdateMarkingWorklistAfterYoungGenGC()`.
+ ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
+ CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
+ CppHeap::From(cpp_heap_));
minor_mark_compact_collector_->CollectGarbage();
@@ -2647,7 +2693,14 @@ void Heap::CheckNewSpaceExpansionCriteria() {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
base::MutexGuard guard(relocation_mutex());
- ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
+ // Young generation garbage collection is orthogonal from full GC marking. It
+ // is possible that objects that are currently being processed for marking are
+ // reclaimed in the young generation GC that interleaves concurrent marking.
+ // Pause concurrent markers to allow processing them using
+ // `UpdateMarkingWorklistAfterYoungGenGC()`.
+ ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
+ CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
+ CppHeap::From(cpp_heap_));
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
@@ -2692,6 +2745,7 @@ void Heap::EvacuateYoungGeneration() {
void Heap::Scavenge() {
DCHECK_NOT_NULL(new_space());
+ DCHECK_IMPLIES(FLAG_separate_gc_phases, !incremental_marking()->IsMarking());
if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
isolate()->PrintWithTimestamp(
@@ -2709,7 +2763,14 @@ void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::MutexGuard guard(relocation_mutex());
- ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
+ // Young generation garbage collection is orthogonal from full GC marking. It
+ // is possible that objects that are currently being processed for marking are
+ // reclaimed in the young generation GC that interleaves concurrent marking.
+ // Pause concurrent markers to allow processing them using
+ // `UpdateMarkingWorklistAfterYoungGenGC()`.
+ ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
+ CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
+ CppHeap::From(cpp_heap_));
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
@@ -3887,7 +3948,6 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
ThreadKind::kMain);
IgnoreLocalGCRequests ignore_gc_requests(this);
- SafepointScope safepoint(this);
InvokeIncrementalMarkingPrologueCallbacks();
incremental_marking()->FinalizeIncrementally();
InvokeIncrementalMarkingEpilogueCallbacks();
@@ -4097,10 +4157,12 @@ double Heap::MonotonicallyIncreasingTimeInMs() const {
static_cast<double>(base::Time::kMillisecondsPerSecond);
}
+#if DEBUG
void Heap::VerifyNewSpaceTop() {
if (!new_space()) return;
new_space()->VerifyTop();
}
+#endif // DEBUG
bool Heap::IdleNotification(int idle_time_in_ms) {
return IdleNotification(
@@ -4717,18 +4779,6 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
return KEEP_SLOT;
},
SlotSet::FREE_EMPTY_BUCKETS);
- if (direction == OLD_TO_NEW) {
- CHECK(chunk->SweepingDone());
- RememberedSetSweeping::Iterate(
- chunk,
- [start, end, untyped](MaybeObjectSlot slot) {
- if (start <= slot.address() && slot.address() < end) {
- untyped->insert(slot.address());
- }
- return KEEP_SLOT;
- },
- SlotSet::FREE_EMPTY_BUCKETS);
- }
RememberedSet<direction>::IterateTyped(
chunk, [=](SlotType type, Address slot) {
if (start <= slot && slot < end) {
@@ -5882,6 +5932,9 @@ void Heap::PrintMaxNewSpaceSizeReached() {
}
int Heap::NextStressMarkingLimit() {
+ // Reuse Heap-global mutex as this getter is called from different threads on
+ // allocation slow paths.
+ base::MutexGuard guard(relocation_mutex());
return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 29aa5aad76..b0a8757ca9 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -697,7 +697,9 @@ class Heap {
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const;
+#if DEBUG
void VerifyNewSpaceTop();
+#endif // DEBUG
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -783,6 +785,12 @@ class Heap {
std::min(max_old_generation_size(), std::max(heap_limit, min_limit)));
}
+#if V8_ENABLE_WEBASSEMBLY
+ // TODO(manoskouk): Inline this if STRONG_MUTABLE_MOVABLE_ROOT_LIST setters
+ // become public.
+ void EnsureWasmCanonicalRttsSize(int length);
+#endif
+
// ===========================================================================
// Initialization. ===========================================================
// ===========================================================================
@@ -2431,6 +2439,7 @@ class Heap {
friend class ScavengeTaskObserver;
friend class IgnoreLocalGCRequests;
friend class IncrementalMarking;
+ friend class IncrementalMarkingRootMarkingVisitor;
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
friend class LocalHeap;
@@ -2453,6 +2462,7 @@ class Heap {
friend class StressConcurrentAllocationObserver;
friend class Space;
friend class Sweeper;
+ friend class UnifiedHeapMarkingState;
friend class heap::TestMemoryAllocatorScope;
friend class third_party_heap::Heap;
friend class third_party_heap::Impl;
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 2dc1555929..92489422d4 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -41,14 +41,6 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
return false;
}
-void IncrementalMarking::MarkRootObject(Root root, HeapObject obj) {
- if (heap_->incremental_marking()->WhiteToGreyAndPush(obj)) {
- if (V8_UNLIKELY(FLAG_track_retaining_path)) {
- heap_->AddRetainingRoot(root, obj);
- }
- }
-}
-
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 5d7dd4a1dd..5f0b80a79e 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -10,10 +10,13 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
@@ -103,45 +106,8 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
DCHECK(marking_state()->IsBlack(to));
}
-class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
- public:
- explicit IncrementalMarkingRootMarkingVisitor(
- IncrementalMarking* incremental_marking)
- : heap_(incremental_marking->heap()) {}
-
- void VisitRootPointer(Root root, const char* description,
- FullObjectSlot p) override {
- DCHECK(!MapWord::IsPacked((*p).ptr()));
- MarkObjectByPointer(root, p);
- }
-
- void VisitRootPointers(Root root, const char* description,
- FullObjectSlot start, FullObjectSlot end) override {
- for (FullObjectSlot p = start; p < end; ++p) {
- DCHECK(!MapWord::IsPacked((*p).ptr()));
- MarkObjectByPointer(root, p);
- }
- }
-
- private:
- void MarkObjectByPointer(Root root, FullObjectSlot p) {
- Object object = *p;
- if (!object.IsHeapObject()) return;
- DCHECK(!MapWord::IsPacked(object.ptr()));
- HeapObject heap_object = HeapObject::cast(object);
- BasicMemoryChunk* target_page =
- BasicMemoryChunk::FromHeapObject(heap_object);
- if (target_page->InSharedHeap()) return;
- heap_->incremental_marking()->MarkRootObject(root, heap_object);
- }
-
- Heap* heap_;
-};
-
-
bool IncrementalMarking::WasActivated() { return was_activated_; }
-
bool IncrementalMarking::CanBeActivated() {
// Only start incremental marking in a safe state:
// 1) when incremental marking is turned on
@@ -216,6 +182,58 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
incremental_marking_job()->Start(heap_);
}
+class IncrementalMarkingRootMarkingVisitor final : public RootVisitor {
+ public:
+ explicit IncrementalMarkingRootMarkingVisitor(Heap* heap)
+ : heap_(heap), incremental_marking_(heap->incremental_marking()) {}
+
+ void VisitRootPointer(Root root, const char* description,
+ FullObjectSlot p) override {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
+ MarkObjectByPointer(root, p);
+ }
+
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
+ MarkObjectByPointer(root, p);
+ }
+ }
+
+ private:
+ void MarkObjectByPointer(Root root, FullObjectSlot p) {
+ Object object = *p;
+ if (!object.IsHeapObject()) return;
+ DCHECK(!MapWord::IsPacked(object.ptr()));
+ HeapObject heap_object = HeapObject::cast(object);
+
+ if (heap_object.InSharedHeap()) return;
+
+ if (incremental_marking_->WhiteToGreyAndPush(heap_object)) {
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainingRoot(root, heap_object);
+ }
+ }
+ }
+
+ Heap* const heap_;
+ IncrementalMarking* const incremental_marking_;
+};
+
+namespace {
+
+void MarkRoots(Heap* heap) {
+ IncrementalMarkingRootMarkingVisitor visitor(heap);
+ heap->IterateRoots(
+ &visitor,
+ base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
+ SkipRoot::kWeak});
+}
+
+} // namespace
+
+void IncrementalMarking::MarkRootsForTesting() { MarkRoots(heap_); }
void IncrementalMarking::StartMarking() {
if (heap_->isolate()->serializer_enabled()) {
@@ -258,7 +276,7 @@ void IncrementalMarking::StartMarking() {
StartBlackAllocation();
- MarkRoots();
+ MarkRoots(heap_);
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleJob();
@@ -334,17 +352,6 @@ void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
}
}
-void IncrementalMarking::MarkRoots() {
- DCHECK(!finalize_marking_completed_);
- DCHECK(IsMarking());
-
- IncrementalMarkingRootMarkingVisitor visitor(this);
- heap_->IterateRoots(
- &visitor,
- base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
- SkipRoot::kWeak});
-}
-
bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
if (age == 0) {
// The map has aged. Do not retain this map.
@@ -360,7 +367,6 @@ bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
return true;
}
-
void IncrementalMarking::RetainMaps() {
// Do not retain dead maps if flag disables it or there is
// - memory pressure (reduce_memory_footprint_),
@@ -416,18 +422,10 @@ void IncrementalMarking::FinalizeIncrementally() {
double start = heap_->MonotonicallyIncreasingTimeInMs();
- // After finishing incremental marking, we try to discover all unmarked
- // objects to reduce the marking load in the final pause.
- // 1) We scan and mark the roots again to find all changes to the root set.
- // 2) Age and retain maps embedded in optimized code.
- MarkRoots();
-
// Map retaining is needed for performance, not correctness,
// so we can do it only once at the beginning of the finalization.
RetainMaps();
- MarkingBarrier::PublishAll(heap());
-
finalize_marking_completed_ = true;
if (FLAG_trace_incremental_marking) {
@@ -577,31 +575,9 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
: StepResult::kMoreWorkRemaining;
}
-void IncrementalMarking::Hurry() {
- if (!local_marking_worklists()->IsEmpty()) {
- double start = 0.0;
- if (FLAG_trace_incremental_marking) {
- start = heap_->MonotonicallyIncreasingTimeInMs();
- if (FLAG_trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
- }
- }
- collector_->ProcessMarkingWorklist(0);
- SetState(COMPLETE);
- if (FLAG_trace_incremental_marking) {
- double end = heap_->MonotonicallyIncreasingTimeInMs();
- double delta = end - start;
- if (FLAG_trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
- static_cast<int>(delta));
- }
- }
- }
-}
+bool IncrementalMarking::Stop() {
+ if (IsStopped()) return false;
-void IncrementalMarking::Stop() {
- if (IsStopped()) return;
if (FLAG_trace_incremental_marking) {
int old_generation_size_mb =
static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
@@ -629,21 +605,16 @@ void IncrementalMarking::Stop() {
FinishBlackAllocation();
// Merge live bytes counters of background threads
- for (auto pair : background_live_bytes_) {
+ for (const auto& pair : background_live_bytes_) {
MemoryChunk* memory_chunk = pair.first;
intptr_t live_bytes = pair.second;
-
if (live_bytes) {
marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
}
}
-
background_live_bytes_.clear();
-}
-void IncrementalMarking::Finalize() {
- Hurry();
- Stop();
+ return true;
}
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 4a0c196358..4c77fabaab 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -135,18 +135,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool WasActivated();
void Start(GarbageCollectionReason gc_reason);
+ // Returns true if incremental marking was running and false otherwise.
+ bool Stop();
void FinalizeIncrementally();
void UpdateMarkingWorklistAfterYoungGenGC();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
- void Hurry();
-
- void Finalize();
-
- void Stop();
-
void FinalizeMarking(CompletionAction action);
void MarkingComplete(CompletionAction action);
@@ -176,9 +172,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// from white to grey.
V8_INLINE bool WhiteToGreyAndPush(HeapObject obj);
- // Marks object referenced from roots.
- V8_INLINE void MarkRootObject(Root root, HeapObject obj);
-
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker.
@@ -221,6 +214,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
background_live_bytes_[chunk] += by;
}
+ void MarkRootsForTesting();
+
private:
class Observer : public AllocationObserver {
public:
@@ -240,7 +235,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void PauseBlackAllocation();
void FinishBlackAllocation();
- void MarkRoots();
bool ShouldRetainMap(Map map, int age);
// Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
// increase chances of reusing of map transition tree in future.
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 19844ff4c8..5a795a1e3f 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -30,6 +30,21 @@ namespace internal {
// order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
+LargePage::LargePage(Heap* heap, BaseSpace* space, size_t chunk_size,
+ Address area_start, Address area_end,
+ VirtualMemory reservation, Executability executable)
+ : MemoryChunk(heap, space, chunk_size, area_start, area_end,
+ std::move(reservation), executable, PageSize::kLarge) {
+ STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+
+ if (executable && chunk_size > LargePage::kMaxCodePageSize) {
+ FATAL("Code page is too large.");
+ }
+
+ SetFlag(MemoryChunk::LARGE_PAGE);
+ list_node().Initialize();
+}
+
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
@@ -45,7 +60,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
return page;
}
-size_t LargeObjectSpace::Available() {
+size_t LargeObjectSpace::Available() const {
// We return zero here since we cannot take advantage of already allocated
// large object memory.
return 0;
@@ -65,7 +80,6 @@ Address LargePage::GetAddressToShrink(Address object_address,
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
- DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
@@ -107,7 +121,8 @@ void LargeObjectSpace::TearDown() {
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
- heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
+ page);
}
}
@@ -195,7 +210,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
- object_size, this, executable);
+ this, object_size, executable);
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
@@ -211,7 +226,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
return page;
}
-size_t LargeObjectSpace::CommittedPhysicalMemory() {
+size_t LargeObjectSpace::CommittedPhysicalMemory() const {
// On a platform that provides lazy committing of memory, we over-account
// the actually committed memory. There is no easy way right now to support
// precise accounting of committed memory in large object space.
@@ -324,14 +339,15 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
} else {
RemovePage(current, size);
- heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
+ current);
}
current = next_current;
}
objects_size_ = surviving_object_size;
}
-bool LargeObjectSpace::Contains(HeapObject object) {
+bool LargeObjectSpace::Contains(HeapObject object) const {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this);
@@ -341,8 +357,8 @@ bool LargeObjectSpace::Contains(HeapObject object) {
return owned;
}
-bool LargeObjectSpace::ContainsSlow(Address addr) {
- for (LargePage* page : *this) {
+bool LargeObjectSpace::ContainsSlow(Address addr) const {
+ for (const LargePage* page : *this) {
if (page->Contains(addr)) return true;
}
return false;
@@ -396,6 +412,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
object.IsPropertyArray(cage_base) || //
object.IsScopeInfo() || //
object.IsSeqString(cage_base) || //
+ object.IsSloppyArgumentsElements(cage_base) || //
object.IsSwissNameDictionary() || //
object.IsThinString(cage_base) || //
object.IsUncompiledDataWithoutPreparseData(cage_base) || //
@@ -520,7 +537,9 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
return AllocationResult::FromObject(result);
}
-size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
+size_t NewLargeObjectSpace::Available() const {
+ return capacity_ - SizeOfObjects();
+}
void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr;
@@ -544,7 +563,8 @@ void NewLargeObjectSpace::FreeDeadObjects(
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
- heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
+ page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
diff --git a/deps/v8/src/heap/large-spaces.h b/deps/v8/src/heap/large-spaces.h
index 65734d5b34..0f062189f3 100644
--- a/deps/v8/src/heap/large-spaces.h
+++ b/deps/v8/src/heap/large-spaces.h
@@ -31,6 +31,10 @@ class LargePage : public MemoryChunk {
// already imposes on x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
+ LargePage(Heap* heap, BaseSpace* space, size_t chunk_size, Address area_start,
+ Address area_end, VirtualMemory reservation,
+ Executability executable);
+
static LargePage* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
@@ -39,6 +43,9 @@ class LargePage : public MemoryChunk {
HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
+ const LargePage* next_page() const {
+ return static_cast<const LargePage*>(list_node_.next());
+ }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
@@ -62,6 +69,7 @@ STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
public:
using iterator = LargePageIterator;
+ using const_iterator = ConstLargePageIterator;
~LargeObjectSpace() override { TearDown(); }
@@ -69,27 +77,27 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
void TearDown();
// Available bytes for objects in this space.
- size_t Available() override;
+ size_t Available() const override;
- size_t Size() override { return size_; }
- size_t SizeOfObjects() override { return objects_size_; }
+ size_t Size() const override { return size_; }
+ size_t SizeOfObjects() const override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
+ size_t CommittedPhysicalMemory() const override;
- int PageCount() { return page_count_; }
+ int PageCount() const { return page_count_; }
// Frees unmarked objects.
virtual void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1).
- bool Contains(HeapObject obj);
+ bool Contains(HeapObject obj) const;
// Checks whether an address is in the object area in this space. Iterates all
// objects in the space. May be slow.
- bool ContainsSlow(Address addr);
+ bool ContainsSlow(Address addr) const;
// Checks whether the space is empty.
- bool IsEmpty() { return first_page() == nullptr; }
+ bool IsEmpty() const { return first_page() == nullptr; }
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
@@ -97,10 +105,16 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
LargePage* first_page() override {
return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
}
+ const LargePage* first_page() const override {
+ return reinterpret_cast<const LargePage*>(memory_chunk_list_.front());
+ }
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
+ const_iterator begin() const { return const_iterator(first_page()); }
+ const_iterator end() const { return const_iterator(nullptr); }
+
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
virtual bool is_off_thread() const { return false; }
@@ -115,7 +129,7 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
// The last allocated object that is not guaranteed to be initialized when the
// concurrent marker visits it.
- Address pending_object() {
+ Address pending_object() const {
return pending_object_.load(std::memory_order_acquire);
}
@@ -183,7 +197,7 @@ class NewLargeObjectSpace : public LargeObjectSpace {
AllocateRaw(int object_size);
// Available bytes for objects in this space.
- size_t Available() override;
+ size_t Available() const override;
void Flip();
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index 700016cade..b5a38298a3 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -15,6 +15,7 @@
#include "src/handles/local-handles.h"
#include "src/heap/collection-barrier.h"
#include "src/heap/concurrent-allocator.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
@@ -152,9 +153,7 @@ bool LocalHeap::ContainsLocalHandle(Address* location) {
}
bool LocalHeap::IsHandleDereferenceAllowed() {
-#ifdef DEBUG
VerifyCurrent();
-#endif
return IsRunning();
}
#endif
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 206cf936df..49a05b6a13 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -24,6 +24,7 @@
#include "src/heap/code-object-registry.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/evacuation-allocator-inl.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
@@ -43,6 +44,7 @@
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/safepoint.h"
+#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/weak-object-worklists.h"
@@ -395,6 +397,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
void Run() override {
+ DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
VerifyRoots();
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
@@ -607,7 +610,7 @@ void MarkCompactCollector::StartMarking() {
if (FLAG_verify_heap) {
VerifyMarkbitsAreClean();
}
-#endif
+#endif // VERIFY_HEAP
}
void MarkCompactCollector::CollectGarbage() {
@@ -966,8 +969,6 @@ void MarkCompactCollector::AbortCompaction() {
}
void MarkCompactCollector::Prepare() {
- was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
-
#ifdef DEBUG
DCHECK(state_ == IDLE);
state_ = PREPARE_GC;
@@ -975,17 +976,22 @@ void MarkCompactCollector::Prepare() {
DCHECK(!sweeping_in_progress());
- if (!was_marked_incrementally_) {
+ if (!heap()->incremental_marking()->IsMarking()) {
+ const auto embedder_flags = heap_->flags_for_embedder_tracer();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
- auto embedder_flags = heap_->flags_for_embedder_tracer();
// PrepareForTrace should be called before visitor initialization in
// StartMarking.
heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
- heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
+ // TracePrologue immediately starts marking which requires V8 worklists to
+ // be set up.
+ heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
+ }
}
heap_->FreeLinearAllocationAreas();
@@ -1016,6 +1022,9 @@ void MarkCompactCollector::FinishConcurrentMarking() {
non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
}
+ if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
+ cpp_heap->FinishConcurrentMarkingIfNeeded();
+ }
}
void MarkCompactCollector::VerifyMarking() {
@@ -1517,7 +1526,6 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
DCHECK(chunk->SweepingDone());
- DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
@@ -2359,14 +2367,12 @@ void MarkCompactCollector::MarkLiveObjects() {
// with the C stack limit check.
PostponeInterruptsScope postpone(isolate());
+ bool was_marked_incrementally = false;
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
- IncrementalMarking* incremental_marking = heap_->incremental_marking();
- if (was_marked_incrementally_) {
- incremental_marking->Finalize();
+ if (heap_->incremental_marking()->Stop()) {
MarkingBarrier::PublishAll(heap());
- } else {
- CHECK(incremental_marking->IsStopped());
+ was_marked_incrementally = true;
}
}
@@ -2476,7 +2482,11 @@ void MarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedHeapObject);
}
}
- if (was_marked_incrementally_) {
+
+ if (was_marked_incrementally) {
+ // Disable the marking barrier after concurrent/parallel marking has
+ // finished as it will reset page flags that share the same bitmap as
+ // the evacuation candidate bit.
MarkingBarrier::DeactivateAll(heap());
GlobalHandles::DisableMarkingBarrier(heap()->isolate());
}
@@ -2634,7 +2644,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
- DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
@@ -2887,7 +2896,6 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
- DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
@@ -3225,7 +3233,6 @@ static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
} else {
DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map_word.ToMap()));
}
- // OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
}
@@ -3513,13 +3520,17 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
#ifdef DEBUG
- // Old-to-old slot sets must be empty after evacuation.
- for (Page* p : *heap()->old_space()) {
- DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
- DCHECK_NULL((p->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()));
- DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
- DCHECK_NULL(p->invalidated_slots<OLD_TO_OLD>());
- DCHECK_NULL(p->invalidated_slots<OLD_TO_NEW>());
+ MemoryChunkIterator chunk_iterator(heap());
+
+ while (chunk_iterator.HasNext()) {
+ MemoryChunk* chunk = chunk_iterator.Next();
+
+ // Old-to-old slot sets must be empty after evacuation.
+ DCHECK_NULL((chunk->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
+ DCHECK_NULL((chunk->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()));
+ DCHECK_NULL((chunk->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
+ DCHECK_NULL(chunk->invalidated_slots<OLD_TO_OLD>());
+ DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
}
#endif
}
@@ -3978,13 +3989,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
this, std::move(evacuation_items), nullptr);
}
- // After evacuation there might still be swept pages that weren't
- // added to one of the compaction space but still reside in the
- // sweeper's swept_list_. Merge remembered sets for those pages as
- // well such that after mark-compact all pages either store slots
- // in the sweeping or old-to-new remembered set.
- sweeper()->MergeOldToNewRememberedSetsForSweptPages();
-
const size_t aborted_pages = PostProcessEvacuationCandidates();
if (FLAG_trace_evacuation) {
@@ -4375,11 +4379,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
- DCHECK_IMPLIES(
- collector == GarbageCollector::MARK_COMPACTOR,
- chunk_->SweepingDone() &&
- chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>() == nullptr);
-
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
@@ -4396,30 +4395,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
}
- if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
- DCHECK_IMPLIES(
- collector == GarbageCollector::MARK_COMPACTOR,
- !chunk_->SweepingDone() &&
- (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>()) ==
- nullptr);
- DCHECK(!chunk_->IsLargePage());
-
- InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
- int slots = RememberedSetSweeping::Iterate(
- chunk_,
- [this, &filter](MaybeObjectSlot slot) {
- if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
- return CheckAndUpdateOldToNewSlot(slot);
- },
- SlotSet::FREE_EMPTY_BUCKETS);
-
- DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
-
- if (slots == 0) {
- chunk_->ReleaseSweepingSlotSet();
- }
- }
-
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were
// processed.
@@ -4433,10 +4408,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[&filter, cage_base](MaybeObjectSlot slot) {
- if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
- return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
+ if (filter.IsValid(slot.address())) {
+ UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
+ }
+ // Always keep slot since all slots are dropped at once after
+ // iteration.
+ return KEEP_SLOT;
},
- SlotSet::FREE_EMPTY_BUCKETS);
+ SlotSet::KEEP_EMPTY_BUCKETS);
chunk_->ReleaseSlotSet<OLD_TO_OLD>();
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
@@ -4497,11 +4476,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
PtrComprCageBase cage_base = heap_->isolate();
- return UpdateTypedSlotHelper::UpdateTypedSlot(
+ UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
});
+ // Always keep slot since all slots are dropped at once after iteration.
+ return KEEP_SLOT;
});
+ chunk_->ReleaseTypedSlotSet<OLD_TO_OLD>();
}
}
@@ -4534,18 +4516,15 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
- const bool contains_old_to_new_sweeping_slots =
- chunk->sweeping_slot_set() != nullptr;
const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
- if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots &&
- !contains_old_to_old_slots && !contains_old_to_old_invalidated_slots &&
+ if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
+ !contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots && !contains_old_to_code_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
- contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
@@ -4751,8 +4730,6 @@ void ReRecordPage(
// might not have recorded them in first place.
// Remove outdated slots.
- RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
- SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
@@ -4804,6 +4781,7 @@ size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
}
}
DCHECK_EQ(aborted_pages_verified, aborted_pages);
+ USE(aborted_pages_verified);
return aborted_pages;
}
@@ -4972,6 +4950,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
: EvacuationVerifier(heap) {}
void Run() override {
+ DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
VerifyRoots();
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
@@ -5628,14 +5607,6 @@ class PageMarkingItem : public ParallelWorkItem {
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
- filter = InvalidatedSlotsFilter::OldToNew(chunk_);
- RememberedSetSweeping::Iterate(
- chunk_,
- [this, task, &filter](MaybeObjectSlot slot) {
- if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
- return CheckAndMarkObject(task, slot);
- },
- SlotSet::FREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index ea9173f5be..6fab6e77b4 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -785,7 +785,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
const bool is_shared_heap_;
- bool was_marked_incrementally_ = false;
bool evacuation_ = false;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
diff --git a/deps/v8/src/heap/marking-barrier.h b/deps/v8/src/heap/marking-barrier.h
index d7cc79315f..fdf208d5cc 100644
--- a/deps/v8/src/heap/marking-barrier.h
+++ b/deps/v8/src/heap/marking-barrier.h
@@ -30,7 +30,7 @@ class MarkingBarrier {
static void ActivateAll(Heap* heap, bool is_compacting);
static void DeactivateAll(Heap* heap);
- static void PublishAll(Heap* heap);
+ V8_EXPORT_PRIVATE static void PublishAll(Heap* heap);
void Write(HeapObject host, HeapObjectSlot, HeapObject value);
void Write(Code host, RelocInfo*, HeapObject value);
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index c59ae55d2d..756befa539 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -55,6 +55,7 @@ template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessWeakHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
concrete_visitor()->SynchronizePageAccess(heap_object);
+ if (!is_shared_heap_ && heap_object.InSharedHeap()) return;
if (concrete_visitor()->marking_state()->IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
// reduce the processing time of weak cells during the main GC
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index 9f467305bf..637c68db81 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -10,8 +10,11 @@
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
+#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
#include "src/logging/log.h"
@@ -51,8 +54,8 @@ void MemoryAllocator::TearDown() {
// DCHECK_EQ(0, size_executable_);
capacity_ = 0;
- if (last_chunk_.IsReserved()) {
- last_chunk_.Free();
+ if (reserved_chunk_at_virtual_memory_limit_) {
+ reserved_chunk_at_virtual_memory_limit_->Free();
}
code_page_allocator_ = nullptr;
@@ -140,7 +143,7 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
- while ((chunk = GetMemoryChunkSafe(kNonRegular)) != nullptr) {
+ while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kNonRegular)) != nullptr) {
allocator_->PerformFreeMemory(chunk);
if (delegate && delegate->ShouldYield()) return;
}
@@ -156,17 +159,17 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
NumberOfChunks());
}
// Regular chunks.
- while ((chunk = GetMemoryChunkSafe(kRegular)) != nullptr) {
+ while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular)) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
allocator_->PerformFreeMemory(chunk);
- if (pooled) AddMemoryChunkSafe(kPooled, chunk);
+ if (pooled) AddMemoryChunkSafe(ChunkQueueType::kPooled, chunk);
if (delegate && delegate->ShouldYield()) return;
}
if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
// to the pooled list. In case of kFreePooled we need to free them though as
// well.
- while ((chunk = GetMemoryChunkSafe(kPooled)) != nullptr) {
+ while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled)) != nullptr) {
allocator_->FreePooledChunk(chunk);
if (delegate && delegate->ShouldYield()) return;
}
@@ -177,20 +180,21 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
void MemoryAllocator::Unmapper::TearDown() {
CHECK(!job_handle_ || !job_handle_->IsValid());
PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ for (int i = 0; i < ChunkQueueType::kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
}
}
size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
base::MutexGuard guard(&mutex_);
- return chunks_[kRegular].size() + chunks_[kNonRegular].size();
+ return chunks_[ChunkQueueType::kRegular].size() +
+ chunks_[ChunkQueueType::kNonRegular].size();
}
int MemoryAllocator::Unmapper::NumberOfChunks() {
base::MutexGuard guard(&mutex_);
size_t result = 0;
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ for (int i = 0; i < ChunkQueueType::kNumberOfChunkQueues; i++) {
result += chunks_[i].size();
}
return static_cast<int>(result);
@@ -202,10 +206,10 @@ size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
size_t sum = 0;
// kPooled chunks are already uncommited. We only have to account for
// kRegular and kNonRegular chunks.
- for (auto& chunk : chunks_[kRegular]) {
+ for (auto& chunk : chunks_[ChunkQueueType::kRegular]) {
sum += chunk->size();
}
- for (auto& chunk : chunks_[kNonRegular]) {
+ for (auto& chunk : chunks_[ChunkQueueType::kNonRegular]) {
sum += chunk->size();
}
return sum;
@@ -236,186 +240,160 @@ void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
}
Address MemoryAllocator::AllocateAlignedMemory(
- size_t reserve_size, size_t commit_size, size_t alignment,
+ size_t chunk_size, size_t area_size, size_t alignment,
Executability executable, void* hint, VirtualMemory* controller) {
v8::PageAllocator* page_allocator = this->page_allocator(executable);
- DCHECK(commit_size <= reserve_size);
- VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
- if (!reservation.IsReserved()) return kNullAddress;
+ DCHECK_LT(area_size, chunk_size);
+
+ VirtualMemory reservation(page_allocator, chunk_size, hint, alignment);
+ if (!reservation.IsReserved()) return HandleAllocationFailure();
+
+ // We cannot use the last chunk in the address space because we would
+ // overflow when comparing top and limit if this chunk is used for a
+ // linear allocation area.
+ if ((reservation.address() + static_cast<Address>(chunk_size)) == 0u) {
+ CHECK(!reserved_chunk_at_virtual_memory_limit_);
+ reserved_chunk_at_virtual_memory_limit_ = std::move(reservation);
+ CHECK(reserved_chunk_at_virtual_memory_limit_);
+
+ // Retry reserve virtual memory.
+ reservation = VirtualMemory(page_allocator, chunk_size, hint, alignment);
+ if (!reservation.IsReserved()) return HandleAllocationFailure();
+ }
+
Address base = reservation.address();
- size_ += reservation.size();
if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation, base, commit_size,
- reserve_size)) {
- base = kNullAddress;
+ const size_t aligned_area_size = ::RoundUp(area_size, GetCommitPageSize());
+ if (!SetPermissionsOnExecutableMemoryChunk(&reservation, base,
+ aligned_area_size, chunk_size)) {
+ return HandleAllocationFailure();
}
} else {
+ // No guard page between page header and object area. This allows us to make
+ // all OS pages for both regions readable+writable at once.
+ const size_t commit_size =
+ ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
+ GetCommitPageSize());
+
if (reservation.SetPermissions(base, commit_size,
PageAllocator::kReadWrite)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
- base = kNullAddress;
+ return HandleAllocationFailure();
}
}
- if (base == kNullAddress) {
- // Failed to commit the body. Free the mapping and any partially committed
- // regions inside it.
- reservation.Free();
- size_ -= reserve_size;
- return kNullAddress;
- }
-
*controller = std::move(reservation);
return base;
}
-V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
- size_t reserve_area_size, size_t commit_area_size, Executability executable,
- BaseSpace* owner) {
- DCHECK_LE(commit_area_size, reserve_area_size);
-
- size_t chunk_size;
+Address MemoryAllocator::HandleAllocationFailure() {
Heap* heap = isolate_->heap();
- Address base = kNullAddress;
- VirtualMemory reservation;
- Address area_start = kNullAddress;
- Address area_end = kNullAddress;
-#ifdef V8_COMPRESS_POINTERS
- // When pointer compression is enabled, spaces are expected to be at a
- // predictable address (see mkgrokdump) so we don't supply a hint and rely on
- // the deterministic behaviour of the BoundedPageAllocator.
- void* address_hint = nullptr;
-#else
- void* address_hint =
- AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
-#endif
+ if (!heap->deserialization_complete()) {
+ heap->FatalProcessOutOfMemory(
+ "MemoryChunk allocation failed during deserialization.");
+ }
+ return kNullAddress;
+}
+
+size_t MemoryAllocator::ComputeChunkSize(size_t area_size,
+ Executability executable) {
+ if (executable == EXECUTABLE) {
+ //
+ // Executable
+ // +----------------------------+<- base aligned at MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- base + CodePageGuardStartOffset
+ // | Guard |
+ // +----------------------------+<- area_start_
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Guard |
+ // +----------------------------+<- base + chunk_size
+ //
+
+ return ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
+ area_size + MemoryChunkLayout::CodePageGuardSize(),
+ GetCommitPageSize());
+ }
- //
- // MemoryChunk layout:
- //
- // Executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- base + CodePageGuardStartOffset
- // | Guard |
- // +----------------------------+<- area_start_
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- aligned at OS page boundary
- // | Guard |
- // +----------------------------+<- base + chunk_size
//
// Non-executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // +----------------------------+<- base aligned at MemoryChunk::kAlignment
// | Header |
// +----------------------------+<- area_start_ (base + area_start_)
// | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // +----------------------------+<- area_end_ (area_start + area_size)
// | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
// +----------------------------+<- base + chunk_size
//
+ DCHECK_EQ(executable, NOT_EXECUTABLE);
+ return ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
+ GetCommitPageSize());
+}
+
+base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
+MemoryAllocator::AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
+ Executability executable,
+ PageSize page_size) {
+#ifdef V8_COMPRESS_POINTERS
+ // When pointer compression is enabled, spaces are expected to be at a
+ // predictable address (see mkgrokdump) so we don't supply a hint and rely on
+ // the deterministic behaviour of the BoundedPageAllocator.
+ void* address_hint = nullptr;
+#else
+ void* address_hint = AlignedAddress(isolate_->heap()->GetRandomMmapAddr(),
+ MemoryChunk::kAlignment);
+#endif
+
+ VirtualMemory reservation;
+ size_t chunk_size = ComputeChunkSize(area_size, executable);
+ DCHECK_EQ(chunk_size % GetCommitPageSize(), 0);
+
+ Address base =
+ AllocateAlignedMemory(chunk_size, area_size, MemoryChunk::kAlignment,
+ executable, address_hint, &reservation);
+ if (base == kNullAddress) return {};
+
+ size_ += reservation.size();
+
+ // Update executable memory size.
if (executable == EXECUTABLE) {
- chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
- reserve_area_size +
- MemoryChunkLayout::CodePageGuardSize(),
- GetCommitPageSize());
-
- // Size of header (not executable) plus area (executable).
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
- if (base == kNullAddress) return nullptr;
- // Update executable memory size.
size_executable_ += reservation.size();
+ }
- if (Heap::ShouldZapGarbage()) {
+ if (Heap::ShouldZapGarbage()) {
+ if (executable == EXECUTABLE) {
+ // Page header and object area is split by guard page. Zap page header
+ // first.
ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
+ // Now zap object area.
ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
- commit_area_size, kZapValue);
- }
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
- area_end = area_start + commit_area_size;
- } else {
- chunk_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
- GetCommitPageSize());
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
-
- if (base == kNullAddress) return nullptr;
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(
- base,
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- kZapValue);
+ area_size, kZapValue);
+ } else {
+ DCHECK_EQ(executable, NOT_EXECUTABLE);
+ // Zap both page header and object area at once. No guard page in-between.
+ ZapBlock(base,
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
+ kZapValue);
}
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
- area_end = area_start + commit_area_size;
}
- // Use chunk_size for statistics because we assume that treat reserved but
- // not-yet committed memory regions of chunks as allocated.
LOG(isolate_,
NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
- // We cannot use the last chunk in the address space because we would
- // overflow when comparing top and limit if this chunk is used for a
- // linear allocation area.
- if ((base + chunk_size) == 0u) {
- CHECK(!last_chunk_.IsReserved());
- last_chunk_ = std::move(reservation);
- UncommitMemory(&last_chunk_);
- size_ -= chunk_size;
- if (executable == EXECUTABLE) {
- size_executable_ -= chunk_size;
- }
- CHECK(last_chunk_.IsReserved());
- return AllocateBasicChunk(reserve_area_size, commit_area_size, executable,
- owner);
- }
-
- BasicMemoryChunk* chunk =
- BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- owner, std::move(reservation));
+ Address area_start = base + MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
+ space->identity());
+ Address area_end = area_start + area_size;
- return chunk;
-}
-
-MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable,
- PageSize page_size,
- BaseSpace* owner) {
- BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
- reserve_area_size, commit_area_size, executable, owner);
-
- if (basic_chunk == nullptr) return nullptr;
-
- MemoryChunk* chunk = MemoryChunk::Initialize(basic_chunk, isolate_->heap(),
- executable, page_size);
-
-#ifdef DEBUG
- if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
-#endif // DEBUG
- return chunk;
+ return MemoryChunkAllocationResult{
+ reinterpret_cast<void*>(base), chunk_size, area_start, area_end,
+ std::move(reservation),
+ };
}
void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
@@ -527,16 +505,16 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) {
switch (mode) {
- case kImmediately:
+ case FreeMode::kImmediately:
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
- case kConcurrentlyAndPool:
+ case FreeMode::kConcurrentlyAndPool:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
V8_FALLTHROUGH;
- case kConcurrently:
+ case FreeMode::kConcurrently:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
@@ -552,29 +530,47 @@ void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) {
}
Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
- size_t size, Space* owner,
- Executability executable) {
- MemoryChunk* chunk = nullptr;
- if (alloc_mode == kUsePool) {
+ Space* space, Executability executable) {
+ size_t size =
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space->identity());
+ base::Optional<MemoryChunkAllocationResult> chunk_info;
+ if (alloc_mode == AllocationMode::kUsePool) {
DCHECK_EQ(size, static_cast<size_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- owner->identity())));
+ space->identity())));
DCHECK_EQ(executable, NOT_EXECUTABLE);
- chunk = AllocatePagePooled(owner);
+ chunk_info = AllocateUninitializedPageFromPool(space);
}
- if (chunk == nullptr) {
- chunk = AllocateChunk(size, size, executable, PageSize::kRegular, owner);
+
+ if (!chunk_info) {
+ chunk_info =
+ AllocateUninitializedChunk(space, size, executable, PageSize::kRegular);
}
- if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk);
+
+ if (!chunk_info) return nullptr;
+
+ Page* page = new (chunk_info->start) Page(
+ isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
+ chunk_info->area_end, std::move(chunk_info->reservation), executable);
+
+#ifdef DEBUG
+ if (page->executable()) RegisterExecutableMemoryChunk(page);
+#endif // DEBUG
+
+ space->InitializePage(page);
+ return page;
}
-ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
- ReadOnlySpace* owner) {
- BasicMemoryChunk* chunk =
- AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
- if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk);
+ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(ReadOnlySpace* space) {
+ DCHECK_EQ(space->identity(), RO_SPACE);
+ size_t size = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE);
+ base::Optional<MemoryChunkAllocationResult> chunk_info =
+ AllocateUninitializedChunk(space, size, NOT_EXECUTABLE,
+ PageSize::kRegular);
+ if (!chunk_info) return nullptr;
+ return new (chunk_info->start) ReadOnlyPage(
+ isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
+ chunk_info->area_end, std::move(chunk_info->reservation));
}
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
@@ -583,38 +579,48 @@ MemoryAllocator::RemapSharedPage(
return shared_memory->RemapTo(reinterpret_cast<void*>(new_address));
}
-LargePage* MemoryAllocator::AllocateLargePage(size_t size,
- LargeObjectSpace* owner,
+LargePage* MemoryAllocator::AllocateLargePage(LargeObjectSpace* space,
+ size_t object_size,
Executability executable) {
- MemoryChunk* chunk =
- AllocateChunk(size, size, executable, PageSize::kLarge, owner);
- if (chunk == nullptr) return nullptr;
- return LargePage::Initialize(isolate_->heap(), chunk, executable);
+ base::Optional<MemoryChunkAllocationResult> chunk_info =
+ AllocateUninitializedChunk(space, object_size, executable,
+ PageSize::kLarge);
+
+ if (!chunk_info) return nullptr;
+
+ LargePage* page = new (chunk_info->start) LargePage(
+ isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
+ chunk_info->area_end, std::move(chunk_info->reservation), executable);
+
+#ifdef DEBUG
+ if (page->executable()) RegisterExecutableMemoryChunk(page);
+#endif // DEBUG
+
+ return page;
}
-MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) {
- MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
- if (chunk == nullptr) return nullptr;
+base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
+MemoryAllocator::AllocateUninitializedPageFromPool(Space* space) {
+ void* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
+ if (chunk == nullptr) return {};
const int size = MemoryChunk::kPageSize;
const Address start = reinterpret_cast<Address>(chunk);
const Address area_start =
start +
- MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
const Address area_end = start + size;
// Pooled pages are always regular data pages.
- DCHECK_NE(CODE_SPACE, owner->identity());
+ DCHECK_NE(CODE_SPACE, space->identity());
VirtualMemory reservation(data_page_allocator(), start, size);
- if (!CommitMemory(&reservation)) return nullptr;
+ if (!CommitMemory(&reservation)) return {};
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size, kZapValue);
}
- BasicMemoryChunk* basic_chunk =
- BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
- area_end, owner, std::move(reservation));
- MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE,
- PageSize::kRegular);
+
size_ += size;
- return chunk;
+ return MemoryChunkAllocationResult{
+ chunk, size, area_start, area_end, std::move(reservation),
+ };
}
void MemoryAllocator::ZapBlock(Address start, size_t size,
@@ -645,42 +651,50 @@ base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
discardable_end - discardable_start);
}
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
- size_t commit_size,
- size_t reserved_size) {
+bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
+ Address start,
+ size_t area_size,
+ size_t chunk_size) {
const size_t page_size = GetCommitPageSize();
+
// All addresses and sizes must be aligned to the commit page size.
DCHECK(IsAligned(start, page_size));
- DCHECK_EQ(0, commit_size % page_size);
- DCHECK_EQ(0, reserved_size % page_size);
+ DCHECK_EQ(0, area_size % page_size);
+ DCHECK_EQ(0, chunk_size % page_size);
+
const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
const size_t code_area_offset =
MemoryChunkLayout::ObjectStartOffsetInCodePage();
- // reserved_size includes two guard regions, commit_size does not.
- DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
+
+ DCHECK_EQ(pre_guard_offset + guard_size + area_size + guard_size, chunk_size);
+
const Address pre_guard_page = start + pre_guard_offset;
const Address code_area = start + code_area_offset;
- const Address post_guard_page = start + reserved_size - guard_size;
+ const Address post_guard_page = start + chunk_size - guard_size;
+
// Commit the non-executable header, from start to pre-code guard page.
if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
- if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
+ if (vm->SetPermissions(code_area, area_size,
MemoryChunk::GetCodeModificationPermission())) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
- UpdateAllocatedSpaceLimits(start, code_area + commit_size);
+ UpdateAllocatedSpaceLimits(start, code_area + area_size);
return true;
}
- vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
+
+ vm->SetPermissions(code_area, area_size, PageAllocator::kNoAccess);
}
}
+
vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
}
+
return false;
}
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index f7a5da5c26..655484b7e0 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -17,6 +17,8 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
+#include "src/common/globals.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-range.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
@@ -44,15 +46,15 @@ class MemoryAllocator {
Unmapper(Heap* heap, MemoryAllocator* allocator)
: heap_(heap), allocator_(allocator) {
- chunks_[kRegular].reserve(kReservedQueueingSlots);
- chunks_[kPooled].reserve(kReservedQueueingSlots);
+ chunks_[ChunkQueueType::kRegular].reserve(kReservedQueueingSlots);
+ chunks_[ChunkQueueType::kPooled].reserve(kReservedQueueingSlots);
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
- AddMemoryChunkSafe(kRegular, chunk);
+ AddMemoryChunkSafe(ChunkQueueType::kRegular, chunk);
} else {
- AddMemoryChunkSafe(kNonRegular, chunk);
+ AddMemoryChunkSafe(ChunkQueueType::kNonRegular, chunk);
}
}
@@ -62,9 +64,9 @@ class MemoryAllocator {
// been uncommitted.
// (2) Try to steal any memory chunk of kPageSize that would've been
// uncommitted.
- MemoryChunk* chunk = GetMemoryChunkSafe(kPooled);
+ MemoryChunk* chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled);
if (chunk == nullptr) {
- chunk = GetMemoryChunkSafe(kRegular);
+ chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular);
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
chunk->ReleaseAllAllocatedMemory();
@@ -126,13 +128,13 @@ class MemoryAllocator {
Heap* const heap_;
MemoryAllocator* const allocator_;
base::Mutex mutex_;
- std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+ std::vector<MemoryChunk*> chunks_[ChunkQueueType::kNumberOfChunkQueues];
std::unique_ptr<v8::JobHandle> job_handle_;
friend class MemoryAllocator;
};
- enum AllocationMode {
+ enum class AllocationMode {
// Regular allocation path. Does not use pool.
kRegular,
@@ -140,7 +142,7 @@ class MemoryAllocator {
kUsePool,
};
- enum FreeMode {
+ enum class FreeMode {
// Frees page immediately on the main thread.
kImmediately,
@@ -182,13 +184,14 @@ class MemoryAllocator {
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
// should be tried first.
V8_EXPORT_PRIVATE Page* AllocatePage(
- MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner,
+ MemoryAllocator::AllocationMode alloc_mode, Space* space,
Executability executable);
- LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
- Executability executable);
+ V8_EXPORT_PRIVATE LargePage* AllocateLargePage(LargeObjectSpace* space,
+ size_t object_size,
+ Executability executable);
- ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
+ ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space);
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
@@ -216,15 +219,6 @@ class MemoryAllocator {
address >= highest_ever_allocated_;
}
- // Returns a MemoryChunk in which the memory region from commit_area_size to
- // reserve_area_size of the chunk area is reserved but not committed, it
- // could be committed later by calling MemoryChunk::CommitArea.
- V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable,
- PageSize page_size,
- BaseSpace* space);
-
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so
@@ -264,15 +258,32 @@ class MemoryAllocator {
void UnregisterReadOnlyPage(ReadOnlyPage* page);
+ Address HandleAllocationFailure();
+
private:
- // Returns a BasicMemoryChunk in which the memory region from commit_area_size
- // to reserve_area_size of the chunk area is reserved but not committed, it
- // could be committed later by calling MemoryChunk::CommitArea.
- V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
- size_t reserve_area_size, size_t commit_area_size,
- Executability executable, BaseSpace* space);
-
- Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+ // Used to store all data about MemoryChunk allocation, e.g. in
+ // AllocateUninitializedChunk.
+ struct MemoryChunkAllocationResult {
+ void* start;
+ size_t size;
+ size_t area_start;
+ size_t area_end;
+ VirtualMemory reservation;
+ };
+
+ // Computes the size of a MemoryChunk from the size of the object_area and
+ // whether the chunk is executable or not.
+ static size_t ComputeChunkSize(size_t area_size, Executability executable);
+
+ // Internal allocation method for all pages/memory chunks. Returns data about
+ // the unintialized memory region.
+ V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
+ AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
+ Executability executable, PageSize page_size);
+
+ // Internal raw allocation method that allocates an aligned MemoryChunk and
+ // sets the right memory permissions.
+ Address AllocateAlignedMemory(size_t chunk_size, size_t area_size,
size_t alignment, Executability executable,
void* hint, VirtualMemory* controller);
@@ -280,10 +291,11 @@ class MemoryAllocator {
// it succeeded and false otherwise.
bool CommitMemory(VirtualMemory* reservation);
- V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
+ // Sets memory permissions on executable memory chunks. This entails page
+ // header (RW), guard pages (no access) and the object area (code modification
+ // permissions).
+ V8_WARN_UNUSED_RESULT bool SetPermissionsOnExecutableMemoryChunk(
+ VirtualMemory* vm, Address start, size_t area_size, size_t reserved_size);
// Disallows any access on memory region owned by given reservation object.
// Returns true if it succeeded and false otherwise.
@@ -304,7 +316,8 @@ class MemoryAllocator {
// See AllocatePage for public interface. Note that currently we only
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
- MemoryChunk* AllocatePagePooled(Space* owner);
+ base::Optional<MemoryChunkAllocationResult> AllocateUninitializedPageFromPool(
+ Space* space);
// Frees a pooled page. Only used on tear-down and last-resort GCs.
void FreePooledChunk(MemoryChunk* chunk);
@@ -314,7 +327,7 @@ class MemoryAllocator {
// collector to rebuild page headers in the from space, which is
// used as a marking stack and its page headers are destroyed.
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner);
+ PagedSpace* space);
void UpdateAllocatedSpaceLimits(Address low, Address high) {
// The use of atomic primitives does not guarantee correctness (wrt.
@@ -385,7 +398,7 @@ class MemoryAllocator {
std::atomic<Address> lowest_ever_allocated_;
std::atomic<Address> highest_ever_allocated_;
- VirtualMemory last_chunk_;
+ base::Optional<VirtualMemory> reserved_chunk_at_virtual_memory_limit_;
Unmapper unmapper_;
#ifdef DEBUG
diff --git a/deps/v8/src/heap/memory-chunk-layout.cc b/deps/v8/src/heap/memory-chunk-layout.cc
index 3b437928e6..ff2dbd915f 100644
--- a/deps/v8/src/heap/memory-chunk-layout.cc
+++ b/deps/v8/src/heap/memory-chunk-layout.cc
@@ -4,6 +4,7 @@
#include "src/heap/memory-chunk-layout.h"
+#include "src/common/globals.h"
#include "src/heap/marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
@@ -46,7 +47,7 @@ intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) {
- if (space == CODE_SPACE) {
+ if (space == CODE_SPACE || space == CODE_LO_SPACE) {
return ObjectStartOffsetInCodePage();
}
return ObjectStartOffsetInDataPage();
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 9a76730e82..69c1151f2e 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -57,7 +57,6 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(SlotSet* [kNumSets], SlotSet),
FIELD(ProgressBar, ProgressBar),
FIELD(std::atomic<intptr_t>, LiveByteCount),
- FIELD(SlotSet*, SweepingSlotSet),
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
FIELD(void* [kNumSets], InvalidatedSlots),
FIELD(base::Mutex*, Mutex),
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 08baeee8b2..9554daf1d6 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -7,6 +7,7 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/common/globals.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
@@ -118,95 +119,85 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
} // namespace
-MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
- Executability executable,
- PageSize page_size) {
- MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
-
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_SHARED],
- nullptr);
+MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
+ Address area_start, Address area_end,
+ VirtualMemory reservation, Executability executable,
+ PageSize page_size)
+ : BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
+ std::move(reservation)) {
+ base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_NEW], nullptr);
+ base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_SHARED], nullptr);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE],
- nullptr);
+ base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
}
- base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
- nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
+ base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
+ base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_SHARED],
- nullptr);
- chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
- chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
+ invalidated_slots_[OLD_TO_NEW] = nullptr;
+ invalidated_slots_[OLD_TO_OLD] = nullptr;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// Not actually used but initialize anyway for predictability.
- chunk->invalidated_slots_[OLD_TO_CODE] = nullptr;
+ invalidated_slots_[OLD_TO_CODE] = nullptr;
}
- chunk->progress_bar_.Initialize();
- chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
- chunk->page_protection_change_mutex_ = new base::Mutex();
- chunk->write_unprotect_counter_ = 0;
- chunk->mutex_ = new base::Mutex();
- chunk->young_generation_bitmap_ = nullptr;
+ progress_bar_.Initialize();
+ set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
+ page_protection_change_mutex_ = new base::Mutex();
+ write_unprotect_counter_ = 0;
+ mutex_ = new base::Mutex();
+ young_generation_bitmap_ = nullptr;
- chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
- 0;
- chunk->external_backing_store_bytes_
- [ExternalBackingStoreType::kExternalString] = 0;
+ external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
+ external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] = 0;
- chunk->categories_ = nullptr;
+ categories_ = nullptr;
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(this,
0);
if (executable == EXECUTABLE) {
- chunk->SetFlag(IS_EXECUTABLE);
+ SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) {
- chunk->write_unprotect_counter_ =
+ write_unprotect_counter_ =
heap->code_space_memory_modification_scope_depth();
} else {
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAligned(chunk->area_start(), page_size));
- size_t area_size =
- RoundUp(chunk->area_end() - chunk->area_start(), page_size);
- CHECK(chunk->reservation_.SetPermissions(
- chunk->area_start(), area_size, DefaultWritableCodePermissions()));
+ DCHECK(IsAligned(area_start_, page_size));
+ size_t area_size = RoundUp(area_end_ - area_start_, page_size);
+ CHECK(reservation_.SetPermissions(area_start_, area_size,
+ DefaultWritableCodePermissions()));
}
}
- if (chunk->owner()->identity() == CODE_SPACE) {
- chunk->code_object_registry_ = new CodeObjectRegistry();
+ if (owner()->identity() == CODE_SPACE) {
+ code_object_registry_ = new CodeObjectRegistry();
} else {
- chunk->code_object_registry_ = nullptr;
+ code_object_registry_ = nullptr;
}
- chunk->possibly_empty_buckets_.Initialize();
+ possibly_empty_buckets_.Initialize();
if (page_size == PageSize::kRegular) {
- chunk->active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
- MemoryAllocator::GetCommitPageSizeBits(),
- chunk->size());
+ active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
+ MemoryAllocator::GetCommitPageSizeBits(), size());
} else {
// We do not track active system pages for large pages.
- chunk->active_system_pages_.Clear();
+ active_system_pages_.Clear();
}
// All pages of a shared heap need to be marked with this flag.
- if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
+ if (heap->IsShared()) SetFlag(MemoryChunk::IN_SHARED_HEAP);
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
#endif
#ifdef DEBUG
- ValidateOffsets(chunk);
+ ValidateOffsets(this);
#endif
-
- return chunk;
}
-size_t MemoryChunk::CommittedPhysicalMemory() {
+size_t MemoryChunk::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
}
@@ -252,7 +243,6 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
possibly_empty_buckets_.Release();
ReleaseSlotSet<OLD_TO_NEW>();
- ReleaseSweepingSlotSet();
ReleaseSlotSet<OLD_TO_OLD>();
if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
@@ -285,10 +275,6 @@ SlotSet* MemoryChunk::AllocateSlotSet() {
return AllocateSlotSet(&slot_set_[type]);
}
-SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
- return AllocateSlotSet(&sweeping_slot_set_);
-}
-
SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
SlotSet* new_slot_set = SlotSet::Allocate(buckets());
SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
@@ -313,10 +299,6 @@ void MemoryChunk::ReleaseSlotSet() {
ReleaseSlotSet(&slot_set_[type]);
}
-void MemoryChunk::ReleaseSweepingSlotSet() {
- ReleaseSlotSet(&sweeping_slot_set_);
-}
-
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) {
SlotSet::Delete(*slot_set, buckets());
@@ -450,9 +432,6 @@ void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
reinterpret_cast<Address>(&chunk->live_byte_count_) - chunk->address(),
MemoryChunkLayout::kLiveByteCountOffset);
DCHECK_EQ(
- reinterpret_cast<Address>(&chunk->sweeping_slot_set_) - chunk->address(),
- MemoryChunkLayout::kSweepingSlotSetOffset);
- DCHECK_EQ(
reinterpret_cast<Address>(&chunk->typed_slot_set_) - chunk->address(),
MemoryChunkLayout::kTypedSlotSetOffset);
DCHECK_EQ(
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index 8a8f556426..103d6d59d7 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -53,6 +53,10 @@ class MemoryChunk : public BasicMemoryChunk {
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
+ MemoryChunk(Heap* heap, BaseSpace* space, size_t size, Address area_start,
+ Address area_end, VirtualMemory reservation,
+ Executability executable, PageSize page_size);
+
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return cast(BasicMemoryChunk::FromAddress(a));
@@ -94,7 +98,7 @@ class MemoryChunk : public BasicMemoryChunk {
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
}
- bool SweepingDone() {
+ bool SweepingDone() const {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
}
@@ -111,13 +115,6 @@ class MemoryChunk : public BasicMemoryChunk {
return slot_set_[type];
}
- template <AccessMode access_mode = AccessMode::ATOMIC>
- SlotSet* sweeping_slot_set() {
- if (access_mode == AccessMode::ATOMIC)
- return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
- return sweeping_slot_set_;
- }
-
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
@@ -134,7 +131,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set);
- void ReleaseSweepingSlotSet();
+
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
@@ -161,7 +158,7 @@ class MemoryChunk : public BasicMemoryChunk {
int FreeListsLength();
// Approximate amount of physical memory committed for this chunk.
- V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
+ V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const;
class ProgressBar& ProgressBar() {
return progress_bar_;
@@ -174,7 +171,7 @@ class MemoryChunk : public BasicMemoryChunk {
inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
- size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const {
return external_backing_store_bytes_[type];
}
@@ -219,9 +216,6 @@ class MemoryChunk : public BasicMemoryChunk {
#endif
protected:
- static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
- Executability executable, PageSize page_size);
-
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
void ReleaseAllAllocatedMemory();
@@ -254,7 +248,6 @@ class MemoryChunk : public BasicMemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
- SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index 0f1a3a361a..1a35823f34 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -85,74 +85,6 @@ HeapObject SemiSpaceObjectIterator::Next() {
// -----------------------------------------------------------------------------
// NewSpace
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(!FLAG_single_generation);
- DCHECK(!FLAG_enable_third_party_heap);
-#if DEBUG
- VerifyTop();
-#endif
-
- AllocationResult result;
-
- if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
- result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
- } else {
- result = AllocateFastUnaligned(size_in_bytes, origin);
- }
-
- return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
- : result;
-}
-
-AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
- return AllocationResult::Failure();
- }
- HeapObject obj =
- HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return AllocationResult::FromObject(obj);
-}
-
-AllocationResult NewSpace::AllocateFastAligned(
- int size_in_bytes, int* result_aligned_size_in_bytes,
- AllocationAlignment alignment, AllocationOrigin origin) {
- Address top = allocation_info_->top();
- int filler_size = Heap::GetFillToAlign(top, alignment);
- int aligned_size_in_bytes = size_in_bytes + filler_size;
-
- if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
- return AllocationResult::Failure();
- }
- HeapObject obj = HeapObject::FromAddress(
- allocation_info_->IncrementTop(aligned_size_in_bytes));
- if (result_aligned_size_in_bytes)
- *result_aligned_size_in_bytes = aligned_size_in_bytes;
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- if (filler_size > 0) {
- obj = heap()->PrecedeWithFiller(obj, filler_size);
- }
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return AllocationResult::FromObject(obj);
-}
-
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 685e631f23..e57c58df26 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -39,6 +39,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
if (IsCommitted()) {
const int expected_pages =
static_cast<int>(target_capacity_ / Page::kPageSize);
+ // `target_capacity_` is a multiple of `Page::kPageSize`.
+ DCHECK_EQ(target_capacity_, expected_pages * Page::kPageSize);
MemoryChunk* current_page = first_page();
int actual_pages = 0;
@@ -49,17 +51,27 @@ bool SemiSpace::EnsureCurrentCapacity() {
current_page = current_page->list_node().next();
}
+ DCHECK_LE(actual_pages, expected_pages);
+
// Free all overallocated pages which are behind current_page.
while (current_page) {
+ DCHECK_EQ(actual_pages, expected_pages);
MemoryChunk* next_current = current_page->list_node().next();
+ // Promoted pages contain live objects and should not be discarded.
+ DCHECK(!current_page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ // `current_page_` contains the current allocation area. Thus, we should
+ // never free the `current_page_`. Furthermore, live objects generally
+ // reside before the current allocation area, so `current_page_` also
+ // serves as a guard against freeing pages with live objects on them.
+ DCHECK_NE(current_page, current_page_);
AccountUncommitted(Page::kPageSize);
DecrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->ClearFlags(Page::kIsInYoungGenerationMask);
- heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
- current_page);
+ heap()->memory_allocator()->Free(
+ MemoryAllocator::FreeMode::kConcurrentlyAndPool, current_page);
current_page = next_current;
}
@@ -69,20 +81,19 @@ bool SemiSpace::EnsureCurrentCapacity() {
while (actual_pages < expected_pages) {
actual_pages++;
current_page = heap()->memory_allocator()->AllocatePage(
- MemoryAllocator::kUsePool,
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
+ MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
AccountCommitted(Page::kPageSize);
IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
- current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
+ current_page->SetFlags(first_page()->GetFlags());
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
}
+ DCHECK_EQ(expected_pages, actual_pages);
}
return true;
}
@@ -115,8 +126,7 @@ bool SemiSpace::Commit() {
// collector. Therefore, they must be initialized with the same FreeList as
// old pages.
Page* new_page = heap()->memory_allocator()->AllocatePage(
- MemoryAllocator::kUsePool,
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
+ MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
DCHECK(!IsCommitted());
@@ -142,8 +152,8 @@ bool SemiSpace::Uncommit() {
MemoryChunk* chunk = memory_chunk_list_.front();
DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory());
memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
- chunk);
+ heap()->memory_allocator()->Free(
+ MemoryAllocator::FreeMode::kConcurrentlyAndPool, chunk);
}
current_page_ = nullptr;
current_capacity_ = 0;
@@ -157,7 +167,7 @@ bool SemiSpace::Uncommit() {
return true;
}
-size_t SemiSpace::CommittedPhysicalMemory() {
+size_t SemiSpace::CommittedPhysicalMemory() const {
if (!IsCommitted()) return 0;
if (!base::OS::HasLazyCommits()) return CommittedMemory();
return committed_physical_memory_;
@@ -178,8 +188,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page = heap()->memory_allocator()->AllocatePage(
- MemoryAllocator::kUsePool,
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
+ MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
@@ -202,8 +211,8 @@ void SemiSpace::RewindPages(int num_pages) {
MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last);
DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory());
- heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
- last);
+ heap()->memory_allocator()->Free(
+ MemoryAllocator::FreeMode::kConcurrentlyAndPool, last);
num_pages--;
}
}
@@ -265,7 +274,7 @@ void SemiSpace::RemovePage(Page* page) {
}
void SemiSpace::PrependPage(Page* page) {
- page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask);
+ page->SetFlags(current_page()->GetFlags());
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
@@ -352,7 +361,7 @@ void SemiSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
-void SemiSpace::Verify() {
+void SemiSpace::Verify() const {
bool is_from_space = (id_ == kFromSpace);
size_t external_backing_store_bytes[kNumTypes];
@@ -363,7 +372,7 @@ void SemiSpace::Verify() {
int actual_pages = 0;
size_t computed_committed_physical_memory = 0;
- for (Page* page : *this) {
+ for (const Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
@@ -426,7 +435,7 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator implementation.
-SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
+SemiSpaceObjectIterator::SemiSpaceObjectIterator(const NewSpace* space) {
Initialize(space->first_allocatable_address(), space->top());
}
@@ -436,7 +445,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
limit_ = end;
}
-size_t NewSpace::CommittedPhysicalMemory() {
+size_t NewSpace::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
size_t size = to_space_.CommittedPhysicalMemory();
@@ -467,7 +476,9 @@ NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
ResetLinearAllocationArea();
}
-void NewSpace::TearDown() {
+NewSpace::~NewSpace() {
+ // Tears down the space. Heap memory was not allocated by the space, so it
+ // is not deallocated here.
allocation_info_->Reset(kNullAddress, kNullAddress);
to_space_.TearDown();
@@ -617,7 +628,14 @@ bool NewSpace::AddParkedAllocationBuffer(int size_in_bytes,
}
bool NewSpace::EnsureAllocation(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin,
+ int* out_max_aligned_size) {
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+#if DEBUG
+ VerifyTop();
+#endif // DEBUG
+
AdvanceAllocationObservers();
Address old_top = allocation_info_->top();
@@ -625,26 +643,29 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
- if (old_top + aligned_size_in_bytes <= high) {
- UpdateInlineAllocationLimit(aligned_size_in_bytes);
- return true;
- }
+ if (old_top + aligned_size_in_bytes > high) {
+ // Not enough room in the page, try to allocate a new one.
+ if (!AddFreshPage()) {
+ // When we cannot grow NewSpace anymore we query for parked allocations.
+ if (!FLAG_allocation_buffer_parking ||
+ !AddParkedAllocationBuffer(size_in_bytes, alignment))
+ return false;
+ }
- // Not enough room in the page, try to allocate a new one.
- if (!AddFreshPage()) {
- // When we cannot grow NewSpace anymore we query for parked allocations.
- if (!FLAG_allocation_buffer_parking ||
- !AddParkedAllocationBuffer(size_in_bytes, alignment))
- return false;
+ old_top = allocation_info_->top();
+ high = to_space_.page_high();
+ filler_size = Heap::GetFillToAlign(old_top, alignment);
+ aligned_size_in_bytes = size_in_bytes + filler_size;
}
- old_top = allocation_info_->top();
- high = to_space_.page_high();
- filler_size = Heap::GetFillToAlign(old_top, alignment);
- aligned_size_in_bytes = size_in_bytes + filler_size;
+ if (out_max_aligned_size) {
+ *out_max_aligned_size = aligned_size_in_bytes;
+ }
DCHECK(old_top + aligned_size_in_bytes <= high);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
}
@@ -662,54 +683,6 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
-AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-}
-
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK(!FLAG_enable_third_party_heap);
- if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
- return AllocationResult::Failure();
- }
-
- DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
-
- AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
- DCHECK(!result.IsFailure());
-
- InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
- size_in_bytes);
-
- return result;
-}
-
-AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(!FLAG_enable_third_party_heap);
- if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Failure();
- }
-
- DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
-
- int aligned_size_in_bytes;
-
- AllocationResult result = AllocateFastAligned(
- size_in_bytes, &aligned_size_in_bytes, alignment, origin);
- DCHECK(!result.IsFailure());
-
- InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
- aligned_size_in_bytes, aligned_size_in_bytes);
-
- return result;
-}
-
void NewSpace::MakeLinearAllocationAreaIterable() {
Address to_top = top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
@@ -725,10 +698,9 @@ void NewSpace::FreeLinearAllocationArea() {
UpdateInlineAllocationLimit(0);
}
-void NewSpace::VerifyTop() {
- // Ensure validity of LAB: start <= top <= limit
- DCHECK_LE(allocation_info_->start(), allocation_info_->top());
- DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
+#if DEBUG
+void NewSpace::VerifyTop() const {
+ SpaceWithLinearArea::VerifyTop();
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
@@ -739,11 +711,12 @@ void NewSpace::VerifyTop() {
DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high());
}
+#endif // DEBUG
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify(Isolate* isolate) {
+void NewSpace::Verify(Isolate* isolate) const {
// The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index b31dfa28e4..5f7fa8328d 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -59,7 +59,7 @@ class SemiSpace : public Space {
bool Commit();
bool Uncommit();
- bool IsCommitted() { return !memory_chunk_list_.Empty(); }
+ bool IsCommitted() const { return !memory_chunk_list_.Empty(); }
// Grow the semispace to the new capacity. The new capacity requested must
// be larger than the current capacity and less than the maximum capacity.
@@ -73,7 +73,7 @@ class SemiSpace : public Space {
bool EnsureCurrentCapacity();
// Returns the start address of the first page of the space.
- Address space_start() {
+ Address space_start() const {
DCHECK_NE(memory_chunk_list_.front(), nullptr);
return memory_chunk_list_.front()->area_start();
}
@@ -81,10 +81,10 @@ class SemiSpace : public Space {
Page* current_page() { return current_page_; }
// Returns the start address of the current page of the space.
- Address page_low() { return current_page_->area_start(); }
+ Address page_low() const { return current_page_->area_start(); }
// Returns one past the end address of the current page of the space.
- Address page_high() { return current_page_->area_end(); }
+ Address page_high() const { return current_page_->area_end(); }
bool AdvancePage() {
Page* next_page = current_page_->next_page();
@@ -109,34 +109,34 @@ class SemiSpace : public Space {
Page* InitializePage(MemoryChunk* chunk) override;
// Age mark accessors.
- Address age_mark() { return age_mark_; }
+ Address age_mark() const { return age_mark_; }
void set_age_mark(Address mark);
// Returns the current capacity of the semispace.
- size_t current_capacity() { return current_capacity_; }
+ size_t current_capacity() const { return current_capacity_; }
// Returns the target capacity of the semispace.
- size_t target_capacity() { return target_capacity_; }
+ size_t target_capacity() const { return target_capacity_; }
// Returns the maximum capacity of the semispace.
- size_t maximum_capacity() { return maximum_capacity_; }
+ size_t maximum_capacity() const { return maximum_capacity_; }
// Returns the initial capacity of the semispace.
- size_t minimum_capacity() { return minimum_capacity_; }
+ size_t minimum_capacity() const { return minimum_capacity_; }
- SemiSpaceId id() { return id_; }
+ SemiSpaceId id() const { return id_; }
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
+ size_t CommittedPhysicalMemory() const override;
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called:
- size_t Size() override { UNREACHABLE(); }
+ size_t Size() const override { UNREACHABLE(); }
- size_t SizeOfObjects() override { return Size(); }
+ size_t SizeOfObjects() const override { return Size(); }
- size_t Available() override { UNREACHABLE(); }
+ size_t Available() const override { UNREACHABLE(); }
Page* first_page() override {
return reinterpret_cast<Page*>(memory_chunk_list_.front());
@@ -172,7 +172,7 @@ class SemiSpace : public Space {
#endif
#ifdef VERIFY_HEAP
- virtual void Verify();
+ virtual void Verify() const;
#endif
void AddRangeToActiveSystemPages(Address start, Address end);
@@ -220,7 +220,7 @@ class SemiSpace : public Space {
class SemiSpaceObjectIterator : public ObjectIterator {
public:
// Create an iterator over the allocated objects in the given to-space.
- explicit SemiSpaceObjectIterator(NewSpace* space);
+ explicit SemiSpaceObjectIterator(const NewSpace* space);
inline HeapObject Next() override;
@@ -239,7 +239,7 @@ class SemiSpaceObjectIterator : public ObjectIterator {
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
-class V8_EXPORT_PRIVATE NewSpace
+class V8_EXPORT_PRIVATE NewSpace final
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
@@ -249,16 +249,12 @@ class V8_EXPORT_PRIVATE NewSpace
size_t initial_semispace_capacity, size_t max_semispace_capacity,
LinearAllocationArea* allocation_info);
- ~NewSpace() override { TearDown(); }
+ ~NewSpace() override;
inline bool ContainsSlow(Address a) const;
inline bool Contains(Object o) const;
inline bool Contains(HeapObject o) const;
- // Tears down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
- void TearDown();
-
void ResetParkedAllocationBuffers();
// Flip the pair of spaces.
@@ -272,17 +268,17 @@ class V8_EXPORT_PRIVATE NewSpace
void Shrink();
// Return the allocated bytes in the active semispace.
- size_t Size() final {
+ size_t Size() const final {
DCHECK_GE(top(), to_space_.page_low());
return (to_space_.current_capacity() - Page::kPageSize) / Page::kPageSize *
MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low());
}
- size_t SizeOfObjects() final { return Size(); }
+ size_t SizeOfObjects() const final { return Size(); }
// Return the allocatable capacity of a semispace.
- size_t Capacity() {
+ size_t Capacity() const {
SLOW_DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
return (to_space_.target_capacity() / Page::kPageSize) *
MemoryChunkLayout::AllocatableMemoryInDataPage();
@@ -290,27 +286,27 @@ class V8_EXPORT_PRIVATE NewSpace
// Return the current size of a semispace, allocatable and non-allocatable
// memory.
- size_t TotalCapacity() {
+ size_t TotalCapacity() const {
DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
return to_space_.target_capacity();
}
// Committed memory for NewSpace is the committed memory of both semi-spaces
// combined.
- size_t CommittedMemory() final {
+ size_t CommittedMemory() const final {
return from_space_.CommittedMemory() + to_space_.CommittedMemory();
}
- size_t MaximumCommittedMemory() final {
+ size_t MaximumCommittedMemory() const final {
return from_space_.MaximumCommittedMemory() +
to_space_.MaximumCommittedMemory();
}
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() final;
+ size_t CommittedPhysicalMemory() const final;
// Return the available bytes without growing.
- size_t Available() final {
+ size_t Available() const final {
DCHECK_GE(Capacity(), Size());
return Capacity() - Size();
}
@@ -322,7 +318,7 @@ class V8_EXPORT_PRIVATE NewSpace
return to_space_.ExternalBackingStoreBytes(type);
}
- size_t ExternalBackingStoreBytes() {
+ size_t ExternalBackingStoreBytes() const {
size_t result = 0;
for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
result +=
@@ -331,7 +327,7 @@ class V8_EXPORT_PRIVATE NewSpace
return result;
}
- size_t AllocatedSinceLastGC() {
+ size_t AllocatedSinceLastGC() const {
const Address age_mark = to_space_.age_mark();
DCHECK_NE(age_mark, kNullAddress);
DCHECK_NE(top(), kNullAddress);
@@ -368,49 +364,45 @@ class V8_EXPORT_PRIVATE NewSpace
bool Rebalance();
// Return the maximum capacity of a semispace.
- size_t MaximumCapacity() {
+ size_t MaximumCapacity() const {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
return to_space_.maximum_capacity();
}
- bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
+ bool IsAtMaximumCapacity() const {
+ return TotalCapacity() == MaximumCapacity();
+ }
// Returns the initial capacity of a semispace.
- size_t InitialTotalCapacity() {
+ size_t InitialTotalCapacity() const {
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
return to_space_.minimum_capacity();
}
- void VerifyTop();
+#if DEBUG
+ void VerifyTop() const;
+#endif // DEBUG
- Address original_top_acquire() {
+ Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
- Address original_limit_relaxed() {
+ Address original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
// Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides.
- Address first_allocatable_address() { return to_space_.space_start(); }
+ Address first_allocatable_address() const { return to_space_.space_start(); }
// Get the age mark of the inactive semispace.
- Address age_mark() { return from_space_.age_mark(); }
+ Address age_mark() const { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
- V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
@@ -437,7 +429,7 @@ class V8_EXPORT_PRIVATE NewSpace
#ifdef VERIFY_HEAP
// Verify the active semispace.
- virtual void Verify(Isolate* isolate);
+ virtual void Verify(Isolate* isolate) const;
#endif
#ifdef DEBUG
@@ -456,7 +448,7 @@ class V8_EXPORT_PRIVATE NewSpace
return from_space_.Uncommit();
}
- bool IsFromSpaceCommitted() { return from_space_.IsCommitted(); }
+ bool IsFromSpaceCommitted() const { return from_space_.IsCommitted(); }
SemiSpace* active_space() { return &to_space_; }
@@ -520,23 +512,10 @@ class V8_EXPORT_PRIVATE NewSpace
ParkedAllocationBuffersVector parked_allocation_buffers_;
- // Internal allocation methods.
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
- AllocationAlignment alignment, AllocationOrigin origin);
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
-
- V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin);
-
- V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
- int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
- bool SupportsAllocationObserver() override { return true; }
+ bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin,
+ int* out_max_aligned_size) final;
+ bool SupportsAllocationObserver() const override { return true; }
friend class SemiSpaceObjectIterator;
};
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 32df0a46d9..9494656bde 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -40,7 +40,9 @@ namespace internal {
V(JSWeakRef) \
V(Map) \
V(NativeContext) \
+ V(Oddball) \
V(PreparseData) \
+ V(PromiseOnStack) \
V(PropertyArray) \
V(PropertyCell) \
V(PrototypeInfo) \
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index fbade0ea3d..df725d8c88 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -86,107 +86,6 @@ bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
return false;
}
-bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
- if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
- return true;
- }
- return RefillLabMain(size_in_bytes, origin);
-}
-
-AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
- if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
- return AllocationResult::Failure();
- }
- return AllocationResult::FromObject(
- HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
-}
-
-AllocationResult PagedSpace::AllocateFastAligned(
- int size_in_bytes, int* aligned_size_in_bytes,
- AllocationAlignment alignment) {
- Address current_top = allocation_info_->top();
- int filler_size = Heap::GetFillToAlign(current_top, alignment);
- int aligned_size = filler_size + size_in_bytes;
- if (!allocation_info_->CanIncrementTop(aligned_size)) {
- return AllocationResult::Failure();
- }
- HeapObject obj =
- HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
- if (aligned_size_in_bytes) *aligned_size_in_bytes = aligned_size;
- if (filler_size > 0) {
- obj = heap()->PrecedeWithFiller(obj, filler_size);
- }
- return AllocationResult::FromObject(obj);
-}
-
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK(!FLAG_enable_third_party_heap);
- if (!EnsureLabMain(size_in_bytes, origin)) {
- return AllocationResult::Failure();
- }
-
- AllocationResult result = AllocateFastUnaligned(size_in_bytes);
- DCHECK(!result.IsFailure());
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
- size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
- size_in_bytes);
-
- return result;
-}
-
-AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(!FLAG_enable_third_party_heap);
- DCHECK_EQ(identity(), OLD_SPACE);
- int allocation_size = size_in_bytes;
- // We don't know exactly how much filler we need to align until space is
- // allocated, so assume the worst case.
- int filler_size = Heap::GetMaximumFillToAlign(alignment);
- allocation_size += filler_size;
- if (!EnsureLabMain(allocation_size, origin)) {
- return AllocationResult::Failure();
- }
- int aligned_size_in_bytes;
- AllocationResult result =
- AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment);
- DCHECK(!result.IsFailure());
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
- size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
- aligned_size_in_bytes, allocation_size);
-
- return result;
-}
-
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(!FLAG_enable_third_party_heap);
- AllocationResult result;
-
- if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
- result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
- } else {
- result = AllocateFastUnaligned(size_in_bytes);
- }
-
- return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
- : result;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index c5604254be..ab8b185d70 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -107,7 +107,8 @@ void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
+ chunk);
}
accounting_stats_.Clear();
}
@@ -133,14 +134,6 @@ void PagedSpace::RefillFreeList() {
});
}
- // Also merge old-to-new remembered sets if not scavenging because of
- // data races: One thread might iterate remembered set, while another
- // thread merges them.
- if (compaction_space_kind() !=
- CompactionSpaceKind::kCompactionSpaceForScavenge) {
- p->MergeOldToNewRememberedSets();
- }
-
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
@@ -186,8 +179,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
- p->MergeOldToNewRememberedSets();
-
// Ensure that pages are initialized before objects on it are discovered by
// concurrent markers.
p->InitializationMemoryFence();
@@ -214,7 +205,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK_EQ(0u, other->Capacity());
}
-size_t PagedSpace::CommittedPhysicalMemory() {
+size_t PagedSpace::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) {
DCHECK_EQ(0, committed_physical_memory());
return CommittedMemory();
@@ -240,10 +231,10 @@ void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
}
#if DEBUG
-void PagedSpace::VerifyCommittedPhysicalMemory() {
+void PagedSpace::VerifyCommittedPhysicalMemory() const {
heap()->safepoint()->AssertActive();
size_t size = 0;
- for (Page* page : *this) {
+ for (const Page* page : *this) {
DCHECK(page->SweepingDone());
size += page->CommittedPhysicalMemory();
}
@@ -351,13 +342,9 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
}
-Page* PagedSpace::AllocatePage() {
- return heap()->memory_allocator()->AllocatePage(
- MemoryAllocator::kRegular, AreaSize(), this, executable());
-}
-
Page* PagedSpace::Expand() {
- Page* page = AllocatePage();
+ Page* page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return nullptr;
ConcurrentAllocationMutex guard(this);
AddPage(page);
@@ -368,7 +355,8 @@ Page* PagedSpace::Expand() {
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
size_t size_in_bytes) {
- Page* page = AllocatePage();
+ Page* page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_);
AddPage(page);
@@ -383,9 +371,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
return std::make_pair(object_start, size_in_bytes);
}
-int PagedSpace::CountTotalPages() {
+int PagedSpace::CountTotalPages() const {
int count = 0;
- for (Page* page : *this) {
+ for (const Page* page : *this) {
count++;
USE(page);
}
@@ -459,7 +447,7 @@ void PagedSpace::MakeLinearAllocationAreaIterable() {
}
}
-size_t PagedSpace::Available() {
+size_t PagedSpace::Available() const {
ConcurrentAllocationMutex guard(this);
return free_list_->Available();
}
@@ -528,7 +516,8 @@ void PagedSpace::ReleasePage(Page* page) {
AccountUncommitted(page->size());
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
accounting_stats_.DecreaseCapacity(page->area_size());
- heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
+ page);
}
void PagedSpace::SetReadable() {
@@ -643,14 +632,10 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (IsSweepingAllowedOnThread(local_heap)) {
// Now contribute to sweeping from background thread and then try to
// reallocate.
- Sweeper::FreeSpaceMayContainInvalidatedSlots
- invalidated_slots_in_free_space =
- Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
const int kMaxPagesToSweep = 1;
int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
- invalidated_slots_in_free_space);
+ identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
+ static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
RefillFreeList();
@@ -734,7 +719,7 @@ PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
return std::make_pair(start, used_size_in_bytes);
}
-bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
+bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) const {
// Code space sweeping is only allowed on main thread.
return (local_heap && local_heap->is_main_thread()) ||
identity() != CODE_SPACE;
@@ -1009,40 +994,20 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
- Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
- is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
- : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+ Sweeper::SweepingMode sweeping_mode =
+ is_compaction_space() ? Sweeper::SweepingMode::kEagerDuringGC
+ : Sweeper::SweepingMode::kLazyOrConcurrent;
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- collector->sweeper()->ParallelSweepSpace(identity(), required_freed_bytes,
- max_pages,
- invalidated_slots_in_free_space);
+ collector->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
+ required_freed_bytes, max_pages);
RefillFreeList();
return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
}
-AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (!is_compaction_space()) {
- // Start incremental marking before the actual allocation, this allows the
- // allocation function to mark the object black when incremental marking is
- // running.
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap()->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
- }
-
- AllocationResult result =
- USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
- return result;
-}
-
void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
Address end) {
DCHECK_LE(page->address(), start);
@@ -1065,6 +1030,31 @@ void PagedSpace::ReduceActiveSystemPages(
MemoryAllocator::GetCommitPageSize());
}
+bool PagedSpace::EnsureAllocation(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin,
+ int* out_max_aligned_size) {
+ if (!is_compaction_space()) {
+ // Start incremental marking before the actual allocation, this allows the
+ // allocation function to mark the object black when incremental marking is
+ // running.
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+ }
+
+ // We don't know exactly how much filler we need to align until space is
+ // allocated, so assume the worst case.
+ size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
+ if (out_max_aligned_size) {
+ *out_max_aligned_size = size_in_bytes;
+ }
+ if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
+ return true;
+ }
+ return RefillLabMain(size_in_bytes, origin);
+}
+
// -----------------------------------------------------------------------------
// MapSpace implementation
@@ -1101,7 +1091,7 @@ void MapSpace::SortFreeList() {
}
#ifdef VERIFY_HEAP
-void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
+void MapSpace::VerifyObject(HeapObject object) const { CHECK(object.IsMap()); }
#endif
} // namespace internal
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index 2df7083a84..c1df3afca5 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -69,7 +69,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
- PagedSpace* space_;
+ const PagedSpace* const space_;
PageRange page_range_;
PageRange::iterator current_page_;
#if V8_COMPRESS_POINTERS
@@ -99,19 +99,19 @@ class V8_EXPORT_PRIVATE PagedSpace
bool ContainsSlow(Address addr) const;
// Does the space need executable memory?
- Executability executable() { return executable_; }
+ Executability executable() const { return executable_; }
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
- size_t Capacity() { return accounting_stats_.Capacity(); }
+ size_t Capacity() const { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
+ size_t CommittedPhysicalMemory() const override;
#if DEBUG
- void VerifyCommittedPhysicalMemory();
+ void VerifyCommittedPhysicalMemory() const;
#endif // DEBUG
void IncrementCommittedPhysicalMemory(size_t increment_value);
@@ -131,34 +131,17 @@ class V8_EXPORT_PRIVATE PagedSpace
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- size_t Available() override;
+ size_t Available() const override;
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
- size_t Size() override { return accounting_stats_.Size(); }
+ size_t Size() const override { return accounting_stats_.Size(); }
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
- virtual size_t Waste() { return free_list_->wasted_bytes(); }
-
- // Allocate the requested number of bytes in the space if possible, return a
- // failure object if not.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
- int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space double aligned if
- // possible, return a failure object if not.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space and consider allocation
- // alignment if needed.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
+ virtual size_t Waste() const { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space from a background
// thread.
@@ -257,7 +240,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
- virtual void VerifyObject(HeapObject obj) {}
+ virtual void VerifyObject(HeapObject obj) const {}
#endif
#ifdef DEBUG
@@ -271,19 +254,21 @@ class V8_EXPORT_PRIVATE PagedSpace
static void ResetCodeStatistics(Isolate* isolate);
#endif
- bool CanExpand(size_t size);
+ bool CanExpand(size_t size) const;
// Returns the number of total pages in this space.
- int CountTotalPages();
+ int CountTotalPages() const;
// Return size of allocatable area on a page in this space.
- inline int AreaSize() { return static_cast<int>(area_size_); }
+ inline int AreaSize() const { return static_cast<int>(area_size_); }
- bool is_compaction_space() {
+ bool is_compaction_space() const {
return compaction_space_kind_ != CompactionSpaceKind::kNone;
}
- CompactionSpaceKind compaction_space_kind() { return compaction_space_kind_; }
+ CompactionSpaceKind compaction_space_kind() const {
+ return compaction_space_kind_;
+ }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
@@ -321,9 +306,9 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetLinearAllocationArea(Address top, Address limit);
- Address original_top() { return original_top_; }
+ Address original_top() const { return original_top_; }
- Address original_limit() { return original_limit_; }
+ Address original_limit() const { return original_limit_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
@@ -343,7 +328,7 @@ class V8_EXPORT_PRIVATE PagedSpace
private:
class ConcurrentAllocationMutex {
public:
- explicit ConcurrentAllocationMutex(PagedSpace* space) {
+ explicit ConcurrentAllocationMutex(const PagedSpace* space) {
if (space->SupportsConcurrentAllocation()) {
guard_.emplace(&space->space_mutex_);
}
@@ -352,29 +337,26 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<base::MutexGuard> guard_;
};
- bool SupportsConcurrentAllocation() { return !is_compaction_space(); }
+ bool SupportsConcurrentAllocation() const { return !is_compaction_space(); }
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit);
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
- bool SupportsAllocationObserver() override { return !is_compaction_space(); }
-
- // Slow path of allocation function
- V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin);
+ bool SupportsAllocationObserver() const override {
+ return !is_compaction_space();
+ }
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
- virtual bool snapshotable() { return true; }
+ virtual bool snapshotable() const { return true; }
- bool HasPages() { return first_page() != nullptr; }
+ bool HasPages() const { return first_page() != nullptr; }
// Returns whether sweeping of this space is safe on this thread. Code space
// sweeping is only allowed on the main thread.
- bool IsSweepingAllowedOnThread(LocalHeap* local_heap);
+ bool IsSweepingAllowedOnThread(LocalHeap* local_heap) const;
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
@@ -391,22 +373,9 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<std::pair<Address, size_t>> ExpandBackground(
size_t size_in_bytes);
- Page* AllocatePage();
-
- // Sets up a linear allocation area that fits the given number of bytes.
- // Returns false if there is not enough space and the caller has to retry
- // after collecting garbage.
- inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
- // Allocates an object from the linear allocation area. Assumes that the
- // linear allocation area is large enought to fit the object.
- inline AllocationResult AllocateFastUnaligned(int size_in_bytes);
- // Tries to allocate an aligned object from the linear allocation area.
- // Returns nullptr if the linear allocation area does not fit the object.
- // Otherwise, returns the object pointer and writes the allocation size
- // (object size + alignment filler size) to the size_in_bytes.
- inline AllocationResult AllocateFastAligned(int size_in_bytes,
- int* aligned_size_in_bytes,
- AllocationAlignment alignment);
+ bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin,
+ int* out_max_aligned_size) final;
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin);
@@ -450,7 +419,7 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationStats accounting_stats_;
// Mutex guarding any concurrent access to the space.
- base::Mutex space_mutex_;
+ mutable base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values are protected by pending_allocation_mutex_.
@@ -492,7 +461,7 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
Page* Expand() override;
// The space is temporary and not included in any snapshots.
- bool snapshotable() override { return false; }
+ bool snapshotable() const override { return false; }
// Pages that were allocated in this local space and need to be merged
// to the main space.
std::vector<Page*> new_pages_;
@@ -578,7 +547,7 @@ class MapSpace : public PagedSpace {
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
&paged_allocation_info_) {}
- int RoundSizeDownToObjectAlignment(int size) override {
+ int RoundSizeDownToObjectAlignment(int size) const override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
@@ -589,7 +558,7 @@ class MapSpace : public PagedSpace {
void SortFreeList();
#ifdef VERIFY_HEAP
- void VerifyObject(HeapObject obj) override;
+ void VerifyObject(HeapObject obj) const override;
#endif
private:
@@ -614,7 +583,7 @@ class OldGenerationMemoryChunkIterator {
kCodeLargeObjectState,
kFinishedState
};
- Heap* heap_;
+ Heap* const heap_;
State state_;
PageIterator old_iterator_;
PageIterator code_iterator_;
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 05ca965e08..ae3c27dace 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -253,10 +253,12 @@ size_t ReadOnlyHeap::read_only_object_cache_size() const {
return read_only_object_cache_.size();
}
-ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(
+ const ReadOnlyHeap* ro_heap)
: ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
-ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(
+ const ReadOnlySpace* ro_space)
: ro_space_(ro_space),
current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
? std::vector<ReadOnlyPage*>::iterator()
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index 558a694c94..340986ad23 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -147,13 +147,13 @@ class SoleReadOnlyHeap : public ReadOnlyHeap {
// This class enables iterating over all read-only heap objects.
class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
public:
- explicit ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap);
- explicit ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space);
+ explicit ReadOnlyHeapObjectIterator(const ReadOnlyHeap* ro_heap);
+ explicit ReadOnlyHeapObjectIterator(const ReadOnlySpace* ro_space);
HeapObject Next();
private:
- ReadOnlySpace* const ro_space_;
+ const ReadOnlySpace* const ro_space_;
std::vector<ReadOnlyPage*>::const_iterator current_page_;
Address current_addr_;
};
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index 9265ca5963..236fbc2035 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -329,6 +329,19 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
}
+ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
+ Address area_start, Address area_end,
+ VirtualMemory reservation)
+ : BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
+ std::move(reservation)) {
+ allocated_bytes_ = 0;
+ SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
+ heap->incremental_marking()
+ ->non_atomic_marking_state()
+ ->bitmap(this)
+ ->MarkAllBits();
+}
+
void ReadOnlyPage::MakeHeaderRelocatable() {
heap_ = nullptr;
owner_ = nullptr;
@@ -417,7 +430,7 @@ void ReadOnlySpace::Unseal() {
is_marked_read_only_ = false;
}
-bool ReadOnlySpace::ContainsSlow(Address addr) {
+bool ReadOnlySpace::ContainsSlow(Address addr) const {
BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
for (BasicMemoryChunk* chunk : pages_) {
if (chunk == c) return true;
@@ -429,7 +442,7 @@ namespace {
// Only iterates over a single chunk as the chunk iteration is done externally.
class ReadOnlySpaceObjectIterator : public ObjectIterator {
public:
- ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space,
+ ReadOnlySpaceObjectIterator(const Heap* heap, const ReadOnlySpace* space,
BasicMemoryChunk* chunk)
: cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
@@ -468,7 +481,7 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
- ReadOnlySpace* space_;
+ const ReadOnlySpace* const space_;
};
} // namespace
@@ -497,7 +510,7 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
};
} // namespace
-void ReadOnlySpace::Verify(Isolate* isolate) {
+void ReadOnlySpace::Verify(Isolate* isolate) const {
bool allocation_pointer_found_in_space = top_ == limit_;
VerifyReadOnlyPointersVisitor visitor(isolate->heap());
@@ -545,7 +558,7 @@ void ReadOnlySpace::Verify(Isolate* isolate) {
}
#ifdef DEBUG
-void ReadOnlySpace::VerifyCounters(Heap* heap) {
+void ReadOnlySpace::VerifyCounters(Heap* heap) const {
size_t total_capacity = 0;
size_t total_allocated = 0;
for (BasicMemoryChunk* page : pages_) {
@@ -569,7 +582,7 @@ void ReadOnlySpace::VerifyCounters(Heap* heap) {
#endif // DEBUG
#endif // VERIFY_HEAP
-size_t ReadOnlySpace::CommittedPhysicalMemory() {
+size_t ReadOnlySpace::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
BasicMemoryChunk::UpdateHighWaterMark(top_);
size_t size = 0;
@@ -612,7 +625,7 @@ void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
FreeLinearAllocationArea();
BasicMemoryChunk* chunk =
- heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
+ heap()->memory_allocator()->AllocateReadOnlyPage(this);
capacity_ += AreaSize();
accounting_stats_.IncreaseCapacity(chunk->area_size());
@@ -754,20 +767,6 @@ void ReadOnlySpace::ShrinkPages() {
limit_ = pages_.back()->area_end();
}
-ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
- ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
- page->allocated_bytes_ = 0;
- page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
- heap()
- ->incremental_marking()
- ->non_atomic_marking_state()
- ->bitmap(chunk)
- ->MarkAllBits();
- chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
-
- return page;
-}
-
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
diff --git a/deps/v8/src/heap/read-only-spaces.h b/deps/v8/src/heap/read-only-spaces.h
index 0ca05d8d4c..e76996fec4 100644
--- a/deps/v8/src/heap/read-only-spaces.h
+++ b/deps/v8/src/heap/read-only-spaces.h
@@ -26,6 +26,9 @@ class SnapshotData;
class ReadOnlyPage : public BasicMemoryChunk {
public:
+ ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
+ Address area_start, Address area_end, VirtualMemory reservation);
+
// Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable.
void MakeHeaderRelocatable();
@@ -209,28 +212,26 @@ class ReadOnlySpace : public BaseSpace {
// to write it into the free space nodes that were already created.
void RepairFreeSpacesAfterDeserialization();
- size_t Size() override { return accounting_stats_.Size(); }
- V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() override;
+ size_t Size() const override { return accounting_stats_.Size(); }
+ V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const override;
const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
Address top() const { return top_; }
Address limit() const { return limit_; }
size_t Capacity() const { return capacity_; }
- bool ContainsSlow(Address addr);
+ bool ContainsSlow(Address addr) const;
V8_EXPORT_PRIVATE void ShrinkPages();
#ifdef VERIFY_HEAP
- void Verify(Isolate* isolate);
+ void Verify(Isolate* isolate) const;
#ifdef DEBUG
- void VerifyCounters(Heap* heap);
+ void VerifyCounters(Heap* heap) const;
#endif // DEBUG
#endif // VERIFY_HEAP
// Return size of allocatable area on a page in this space.
int AreaSize() const { return static_cast<int>(area_size_); }
- ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
-
Address FirstPageAddress() const { return pages_.front()->address(); }
protected:
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index b4badca6d6..ed7c0a2e36 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -153,11 +153,8 @@ class RememberedSet : public AllStatic {
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slot_set = chunk->slot_set<type>();
- SlotSet* sweeping_slot_set =
- type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
- if (slot_set != nullptr || sweeping_slot_set != nullptr ||
- typed_slot_set != nullptr ||
+ if (slot_set != nullptr || typed_slot_set != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
@@ -351,46 +348,6 @@ class UpdateTypedSlotHelper {
}
};
-class RememberedSetSweeping {
- public:
- template <AccessMode access_mode>
- static void Insert(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
- if (slot_set == nullptr) {
- slot_set = chunk->AllocateSweepingSlotSet();
- }
- RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
- }
-
- static void Remove(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
- RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
- }
-
- // Given a page and a range of slots in that page, this function removes the
- // slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->sweeping_slot_set();
- RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
- }
-
- // Iterates and filters the remembered set in the given memory chunk with
- // the given callback. The callback should take (Address slot) and return
- // SlotCallbackResult.
- //
- // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
- // threads concurrently inserting slots.
- template <typename Callback>
- static int Iterate(MemoryChunk* chunk, Callback callback,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->sweeping_slot_set();
- return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
- }
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index 12f6706f76..109108a3b4 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -14,6 +14,7 @@
#include "src/handles/handles.h"
#include "src/handles/local-handles.h"
#include "src/handles/persistent-handles.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
@@ -84,18 +85,43 @@ void IsolateSafepoint::TryInitiateGlobalSafepointScope(
InitiateGlobalSafepointScopeRaw(initiator, client_data);
}
+class GlobalSafepointInterruptTask : public CancelableTask {
+ public:
+ explicit GlobalSafepointInterruptTask(Heap* heap)
+ : CancelableTask(heap->isolate()), heap_(heap) {}
+
+ ~GlobalSafepointInterruptTask() override = default;
+ GlobalSafepointInterruptTask(const GlobalSafepointInterruptTask&) = delete;
+ GlobalSafepointInterruptTask& operator=(const GlobalSafepointInterruptTask&) =
+ delete;
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override { heap_->main_thread_local_heap()->Safepoint(); }
+
+ Heap* heap_;
+};
+
void IsolateSafepoint::InitiateGlobalSafepointScopeRaw(
Isolate* initiator, PerClientSafepointData* client_data) {
CHECK_EQ(++active_safepoint_scopes_, 1);
barrier_.Arm();
size_t running =
- SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
+ SetSafepointRequestedFlags(ShouldIncludeMainThread(initiator));
client_data->set_locked_and_running(running);
+
+ if (isolate() != initiator) {
+ // An isolate might be waiting in the event loop. Post a task in order to
+ // wake it up.
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(reinterpret_cast<v8::Isolate*>(isolate()))
+ ->PostTask(std::make_unique<GlobalSafepointInterruptTask>(heap_));
+ }
}
-IsolateSafepoint::IncludeMainThread
-IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
+IsolateSafepoint::IncludeMainThread IsolateSafepoint::ShouldIncludeMainThread(
+ Isolate* initiator) {
const bool is_initiator = isolate() == initiator;
return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
}
@@ -136,7 +162,7 @@ void IsolateSafepoint::LockMutex(LocalHeap* local_heap) {
void IsolateSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
local_heaps_mutex_.AssertHeld();
CHECK_EQ(--active_safepoint_scopes_, 0);
- ClearSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
+ ClearSafepointRequestedFlags(ShouldIncludeMainThread(initiator));
barrier_.Disarm();
local_heaps_mutex_.Unlock();
}
@@ -333,6 +359,12 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
initiator, &clients.back());
});
+ // Make it possible to use AssertActive() on shared isolates.
+ CHECK(shared_isolate_->heap()->safepoint()->local_heaps_mutex_.TryLock());
+
+ // Shared isolates should never have multiple threads.
+ shared_isolate_->heap()->safepoint()->AssertMainThreadIsOnlyThread();
+
// Iterate all clients again to initiate the safepoint for all of them - even
// if that means blocking.
for (PerClientSafepointData& client : clients) {
@@ -356,6 +388,8 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
}
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
+ shared_isolate_->heap()->safepoint()->local_heaps_mutex_.Unlock();
+
IterateClientIsolates([initiator](Isolate* client) {
Heap* client_heap = client->heap();
client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index b64df46f3a..747c0a80b7 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -94,7 +94,7 @@ class IsolateSafepoint final {
void WaitUntilRunningThreadsInSafepoint(
const PerClientSafepointData* client_data);
- IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
+ IncludeMainThread ShouldIncludeMainThread(Isolate* initiator);
void LockMutex(LocalHeap* local_heap);
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 89451be076..b91db2a1dc 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -43,8 +43,11 @@ void ScavengeJob::ScheduleTaskIfNeeded(Heap* heap) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
- taskrunner->PostTask(std::make_unique<Task>(heap->isolate(), this));
- task_pending_ = true;
+ if (taskrunner->NonNestableTasksEnabled()) {
+ taskrunner->PostNonNestableTask(
+ std::make_unique<Task>(heap->isolate(), this));
+ task_pending_ = true;
+ }
}
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 56e002a98c..149510b6a4 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -7,10 +7,11 @@
#include "src/common/globals.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
-#include "src/heap/barrier.h"
#include "src/heap/concurrent-allocator.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
@@ -125,13 +126,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
// Sweeper is stopped during scavenge, so we can directly
// insert into its remembered set here.
- if (chunk->sweeping_slot_set()) {
- RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
- slot.address());
- } else {
- RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
- slot.address());
- }
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
+ slot.address());
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else if (record_slots_ &&
@@ -303,9 +299,8 @@ void ScavengerCollector::CollectGarbage() {
// access to the slots of a page and can completely avoid any locks on
// the page itself.
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
- filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
- return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
- });
+ filter_scope.FilterOldSpaceSweepingPages(
+ [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
const bool is_logging = isolate_->LogObjectRelocation();
for (int i = 0; i < num_scavenge_tasks; ++i) {
@@ -640,17 +635,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
&empty_chunks_local_);
}
- if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
- InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
- RememberedSetSweeping::Iterate(
- page,
- [this, &filter](MaybeObjectSlot slot) {
- if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
- return CheckAndScavengeObject(heap_, slot);
- },
- SlotSet::KEEP_EMPTY_BUCKETS);
- }
-
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were
// processed.
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 0dff0ec133..cca711e863 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -16,7 +16,6 @@
namespace v8 {
namespace internal {
-class OneshotBarrier;
class RootScavengeVisitor;
class Scavenger;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 806da907c0..34e6c1b433 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -829,6 +829,7 @@ void Heap::CreateInitialObjects() {
#ifdef V8_ENABLE_WEBASSEMBLY
set_active_continuation(roots.undefined_value());
set_active_suspender(roots.undefined_value());
+ set_wasm_canonical_rtts(roots.empty_weak_array_list());
#endif // V8_ENABLE_WEBASSEMBLY
set_script_list(roots.empty_weak_array_list());
diff --git a/deps/v8/src/heap/slot-set.cc b/deps/v8/src/heap/slot-set.cc
index ae4dd7a79f..4d4231226d 100644
--- a/deps/v8/src/heap/slot-set.cc
+++ b/deps/v8/src/heap/slot-set.cc
@@ -4,6 +4,9 @@
#include "src/heap/slot-set.h"
+#include "src/base/logging.h"
+#include "src/heap/memory-chunk-layout.h"
+
namespace v8 {
namespace internal {
@@ -58,23 +61,38 @@ TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, size_t capacity) {
return chunk;
}
-void TypedSlotSet::ClearInvalidSlots(
- const std::map<uint32_t, uint32_t>& invalid_ranges) {
+void TypedSlotSet::ClearInvalidSlots(const FreeRangesMap& invalid_ranges) {
+ IterateSlotsInRanges([](TypedSlot* slot) { *slot = ClearedTypedSlot(); },
+ invalid_ranges);
+}
+
+void TypedSlotSet::AssertNoInvalidSlots(const FreeRangesMap& invalid_ranges) {
+ IterateSlotsInRanges(
+ [](TypedSlot* slot) {
+ CHECK_WITH_MSG(false, "No slot in ranges expected.");
+ },
+ invalid_ranges);
+}
+
+template <typename Callback>
+void TypedSlotSet::IterateSlotsInRanges(Callback callback,
+ const FreeRangesMap& ranges) {
+ if (ranges.empty()) return;
+
Chunk* chunk = LoadHead();
while (chunk != nullptr) {
for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
if (type == SlotType::kCleared) continue;
uint32_t offset = OffsetField::decode(slot.type_and_offset);
- std::map<uint32_t, uint32_t>::const_iterator upper_bound =
- invalid_ranges.upper_bound(offset);
- if (upper_bound == invalid_ranges.begin()) continue;
+ FreeRangesMap::const_iterator upper_bound = ranges.upper_bound(offset);
+ if (upper_bound == ranges.begin()) continue;
// upper_bounds points to the invalid range after the given slot. Hence,
// we have to go to the previous element.
upper_bound--;
DCHECK_LE(upper_bound->first, offset);
if (upper_bound->second > offset) {
- slot = ClearedTypedSlot();
+ callback(&slot);
}
}
chunk = LoadNext(chunk);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 7f6f8c3c41..81a48afba7 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -681,6 +681,8 @@ class V8_EXPORT_PRIVATE TypedSlots {
// clearing of invalid slots.
class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
public:
+ using FreeRangesMap = std::map<uint32_t, uint32_t>;
+
enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
@@ -737,12 +739,19 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
// Clears all slots that have the offset in the specified ranges.
// This can run concurrently to Iterate().
- void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
+ void ClearInvalidSlots(const FreeRangesMap& invalid_ranges);
+
+ // Asserts that there are no recorded slots in the specified ranges.
+ void AssertNoInvalidSlots(const FreeRangesMap& invalid_ranges);
// Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
void FreeToBeFreedChunks();
private:
+ template <typename Callback>
+ void IterateSlotsInRanges(Callback callback,
+ const FreeRangesMap& invalid_ranges);
+
// Atomic operations used by Iterate and ClearInvalidSlots;
Chunk* LoadNext(Chunk* chunk) {
return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index fb290feee5..f24df06a1d 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -192,6 +192,122 @@ MemoryChunk* MemoryChunkIterator::Next() {
return chunk;
}
+AllocationResult SpaceWithLinearArea::AllocateFastUnaligned(
+ int size_in_bytes, AllocationOrigin origin) {
+ if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
+ return AllocationResult::Failure();
+ }
+ HeapObject obj =
+ HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return AllocationResult::FromObject(obj);
+}
+
+AllocationResult SpaceWithLinearArea::AllocateFastAligned(
+ int size_in_bytes, int* result_aligned_size_in_bytes,
+ AllocationAlignment alignment, AllocationOrigin origin) {
+ Address top = allocation_info_->top();
+ int filler_size = Heap::GetFillToAlign(top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+ if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
+ return AllocationResult::Failure();
+ }
+ HeapObject obj = HeapObject::FromAddress(
+ allocation_info_->IncrementTop(aligned_size_in_bytes));
+ if (result_aligned_size_in_bytes)
+ *result_aligned_size_in_bytes = aligned_size_in_bytes;
+
+ if (filler_size > 0) {
+ obj = heap()->PrecedeWithFiller(obj, filler_size);
+ }
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return AllocationResult::FromObject(obj);
+}
+
+AllocationResult SpaceWithLinearArea::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
+
+ AllocationResult result;
+
+ if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
+ result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
+ } else {
+ result = AllocateFastUnaligned(size_in_bytes, origin);
+ }
+
+ return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
+ : result;
+}
+
+AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
+ int max_aligned_size;
+ if (!EnsureAllocation(size_in_bytes, kTaggedAligned, origin,
+ &max_aligned_size)) {
+ return AllocationResult::Failure();
+ }
+
+ DCHECK_EQ(max_aligned_size, size_in_bytes);
+ DCHECK_LE(allocation_info_->start(), allocation_info_->top());
+
+ AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
+ DCHECK(!result.IsFailure());
+
+ InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
+ size_in_bytes);
+
+ return result;
+}
+
+AllocationResult SpaceWithLinearArea::AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
+ int max_aligned_size;
+ if (!EnsureAllocation(size_in_bytes, alignment, origin, &max_aligned_size)) {
+ return AllocationResult::Failure();
+ }
+
+ DCHECK_GE(max_aligned_size, size_in_bytes);
+ DCHECK_LE(allocation_info_->start(), allocation_info_->top());
+
+ int aligned_size_in_bytes;
+
+ AllocationResult result = AllocateFastAligned(
+ size_in_bytes, &aligned_size_in_bytes, alignment, origin);
+ DCHECK_GE(max_aligned_size, aligned_size_in_bytes);
+ DCHECK(!result.IsFailure());
+
+ InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
+ aligned_size_in_bytes, max_aligned_size);
+
+ return result;
+}
+
+AllocationResult SpaceWithLinearArea::AllocateRawSlow(
+ int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
+ AllocationResult result =
+ USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 77be40f779..4717879d1d 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -16,7 +16,6 @@
#include "src/heap/base/active-system-pages.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
-#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
@@ -50,6 +49,12 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
// static
constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
+Page::Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
+ Address area_end, VirtualMemory reservation,
+ Executability executable)
+ : MemoryChunk(heap, space, size, area_start, area_end,
+ std::move(reservation), executable, PageSize::kRegular) {}
+
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
@@ -93,33 +98,6 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page;
}
-void Page::MoveOldToNewRememberedSetForSweeping() {
- CHECK_NULL(sweeping_slot_set_);
- sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
- slot_set_[OLD_TO_NEW] = nullptr;
-}
-
-void Page::MergeOldToNewRememberedSets() {
- if (sweeping_slot_set_ == nullptr) return;
-
- if (slot_set_[OLD_TO_NEW]) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- this,
- [this](MaybeObjectSlot slot) {
- Address address = slot.address();
- RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
- return KEEP_SLOT;
- },
- SlotSet::KEEP_EMPTY_BUCKETS);
-
- ReleaseSlotSet<OLD_TO_NEW>();
- }
-
- CHECK_NULL(slot_set_[OLD_TO_NEW]);
- slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
- sweeping_slot_set_ = nullptr;
-}
-
size_t Page::AvailableInFreeList() {
size_t sum = 0;
ForAllFreeListCategories([&sum](FreeListCategory* category) {
@@ -166,7 +144,6 @@ size_t Page::ShrinkToHighWaterMark() {
// area would not be freed when deallocating this page.
DCHECK_NULL(slot_set<OLD_TO_NEW>());
DCHECK_NULL(slot_set<OLD_TO_OLD>());
- DCHECK_NULL(sweeping_slot_set());
size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
MemoryAllocator::GetCommitPageSize());
@@ -258,7 +235,7 @@ void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
- size_t min_size) {
+ size_t min_size) const {
DCHECK_GE(end - start, min_size);
if (!use_lab_) {
@@ -310,7 +287,7 @@ void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
allocations_origins_[static_cast<int>(origin)]++;
}
-void SpaceWithLinearArea::PrintAllocationsOrigins() {
+void SpaceWithLinearArea::PrintAllocationsOrigins() const {
PrintIsolate(
heap()->isolate(),
"Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
@@ -465,6 +442,14 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
allocation_counter_.NextBytes());
}
+#if DEBUG
+void SpaceWithLinearArea::VerifyTop() const {
+ // Ensure validity of LAB: start <= top <= limit
+ DCHECK_LE(allocation_info_->start(), allocation_info_->top());
+ DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
+}
+#endif // DEBUG
+
int MemoryChunk::FreeListsLength() {
int length = 0;
for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 18b760e1a5..f24504fed3 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -21,6 +21,7 @@
#include "src/heap/list.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
+#include "src/heap/slot-set.h"
#include "src/objects/objects.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
@@ -144,12 +145,12 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
// Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace).
- virtual size_t SizeOfObjects() { return Size(); }
+ virtual size_t SizeOfObjects() const { return Size(); }
// Return the available bytes without growing.
- virtual size_t Available() = 0;
+ virtual size_t Available() const = 0;
- virtual int RoundSizeDownToObjectAlignment(int size) {
+ virtual int RoundSizeDownToObjectAlignment(int size) const {
if (id_ == CODE_SPACE) {
return RoundDown(size, kCodeAlignment);
} else {
@@ -224,6 +225,9 @@ class Page : public MemoryChunk {
MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
+ Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
+ Address area_end, VirtualMemory reservation, Executability executable);
+
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
@@ -306,11 +310,25 @@ class Page : public MemoryChunk {
void AllocateFreeListCategories();
void ReleaseFreeListCategories();
- void MoveOldToNewRememberedSetForSweeping();
- void MergeOldToNewRememberedSets();
-
ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
+ template <RememberedSetType remembered_set>
+ void ClearInvalidTypedSlots(const TypedSlotSet::FreeRangesMap& ranges) {
+ TypedSlotSet* typed_slot_set = this->typed_slot_set<remembered_set>();
+ if (typed_slot_set != nullptr) {
+ typed_slot_set->ClearInvalidSlots(ranges);
+ }
+ }
+
+ template <RememberedSetType remembered_set>
+ void AssertNoInvalidTypedSlots(const TypedSlotSet::FreeRangesMap& ranges) {
+ // TODO(dinfuehr): Make this a DCHECK eventually.
+ TypedSlotSet* typed_slot_set = this->typed_slot_set<OLD_TO_OLD>();
+ if (typed_slot_set != nullptr) {
+ typed_slot_set->AssertNoInvalidSlots(ranges);
+ }
+ }
+
private:
friend class MemoryAllocator;
};
@@ -353,6 +371,7 @@ class PageIteratorImpl
using PageIterator = PageIteratorImpl<Page>;
using ConstPageIterator = PageIteratorImpl<const Page>;
using LargePageIterator = PageIteratorImpl<LargePage>;
+using ConstLargePageIterator = PageIteratorImpl<const LargePage>;
class PageRange {
public:
@@ -447,7 +466,7 @@ class SpaceWithLinearArea : public Space {
LinearAllocationArea* allocation_info)
: Space(heap, id, free_list), allocation_info_(allocation_info) {}
- virtual bool SupportsAllocationObserver() = 0;
+ virtual bool SupportsAllocationObserver() const = 0;
// Returns the allocation pointer in this space.
Address top() const { return allocation_info_->top(); }
@@ -485,7 +504,7 @@ class SpaceWithLinearArea : public Space {
// area bounded by [start, end), this function computes the limit to use to
// allow proper observation based on existing observers. min_size specifies
// the minimum size that the limited area should have.
- Address ComputeLimit(Address start, Address end, size_t min_size);
+ Address ComputeLimit(Address start, Address end, size_t min_size) const;
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0;
@@ -493,11 +512,57 @@ class SpaceWithLinearArea : public Space {
void EnableInlineAllocation();
bool IsInlineAllocationEnabled() const { return use_lab_; }
- void PrintAllocationsOrigins();
+ void PrintAllocationsOrigins() const;
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space if possible, return a
+ // failure object if not.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space double aligned if
+ // possible, return a failure object if not.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
protected:
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
+ // Allocates an object from the linear allocation area. Assumes that the
+ // linear allocation area is large enought to fit the object.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
+ // Tries to allocate an aligned object from the linear allocation area.
+ // Returns nullptr if the linear allocation area does not fit the object.
+ // Otherwise, returns the object pointer and writes the allocation size
+ // (object size + alignment filler size) to the size_in_bytes.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
+ AllocationAlignment alignment, AllocationOrigin origin);
+
+ // Slow path of allocation function
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ // Sets up a linear allocation area that fits the given number of bytes.
+ // Returns false if there is not enough space and the caller has to retry
+ // after collecting garbage.
+ // Writes to `max_aligned_size` the actual number of bytes used for checking
+ // that there is enough space.
+ virtual bool EnsureAllocation(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin,
+ int* out_max_aligned_size) = 0;
+
+#if DEBUG
+ V8_EXPORT_PRIVATE void VerifyTop() const;
+#endif // DEBUG
+
LinearAllocationArea* const allocation_info_;
bool use_lab_ = true;
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 5745c4bf1f..ed9385532d 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -9,6 +9,7 @@
#include "src/heap/base/active-system-pages.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/free-list-inl.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
@@ -189,15 +190,6 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
return nullptr;
}
-void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
- base::MutexGuard guard(&mutex_);
-
- ForAllSweepingSpaces([this](AllocationSpace space) {
- SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
- for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
- });
-}
-
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
@@ -205,8 +197,9 @@ void Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
+ });
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
@@ -218,13 +211,14 @@ void Sweeper::EnsureCompleted() {
void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
if (!sweeping_in_progress_) return;
- ParallelSweepSpace(space, 0);
+ ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
}
void Sweeper::SupportConcurrentSweeping() {
ForAllSweepingSpaces([this](AllocationSpace space) {
const int kMaxPagesToSweepPerSpace = 1;
- ParallelSweepSpace(space, 0, kMaxPagesToSweepPerSpace);
+ ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0,
+ kMaxPagesToSweepPerSpace);
});
}
@@ -234,7 +228,7 @@ bool Sweeper::AreSweeperTasksRunning() {
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space,
- bool non_empty_typed_slots, FreeListRebuildingMode free_list_mode,
+ FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
CHECK_GT(free_end, free_start);
size_t freed_bytes = 0;
@@ -258,15 +252,33 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
}
V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
- Address free_start, Address free_end, Page* page,
- bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
+ Address free_start, Address free_end, Page* page, bool record_free_ranges,
+ TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
InvalidatedSlotsCleanup* old_to_new_cleanup) {
DCHECK_LE(free_start, free_end);
- RememberedSetSweeping::RemoveRange(page, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- if (non_empty_typed_slots) {
+ if (sweeping_mode == SweepingMode::kEagerDuringGC) {
+ // New space and in consequence the old-to-new remembered set is always
+ // empty after a full GC, so we do not need to remove from it after the full
+ // GC. However, we wouldn't even be allowed to do that, since the main
+ // thread then owns the old-to-new remembered set. Removing from it from a
+ // sweeper thread would race with the main thread.
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ // While we only add old-to-old slots on live objects, we can still end up
+ // with old-to-old slots in free memory with e.g. right-trimming of objects.
+ RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ } else {
+ DCHECK_NULL(page->slot_set<OLD_TO_OLD>());
+ }
+
+ // Old-to-shared isn't reset after a full GC, so needs to be cleaned both
+ // during and after a full GC.
+ RememberedSet<OLD_TO_SHARED>::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ if (record_free_ranges) {
free_ranges_map->insert(std::pair<uint32_t, uint32_t>(
static_cast<uint32_t>(free_start - page->address()),
static_cast<uint32_t>(free_end - page->address())));
@@ -276,17 +288,27 @@ V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
}
void Sweeper::CleanupInvalidTypedSlotsOfFreeRanges(
- Page* page, const FreeRangesMap& free_ranges_map) {
- if (!free_ranges_map.empty()) {
- TypedSlotSet* old_to_new = page->typed_slot_set<OLD_TO_NEW>();
- if (old_to_new != nullptr) {
- old_to_new->ClearInvalidSlots(free_ranges_map);
- }
- TypedSlotSet* old_to_old = page->typed_slot_set<OLD_TO_OLD>();
- if (old_to_old != nullptr) {
- old_to_old->ClearInvalidSlots(free_ranges_map);
- }
+ Page* page, const TypedSlotSet::FreeRangesMap& free_ranges_map,
+ SweepingMode sweeping_mode) {
+ if (sweeping_mode == SweepingMode::kEagerDuringGC) {
+ page->ClearInvalidTypedSlots<OLD_TO_NEW>(free_ranges_map);
+
+ // Typed old-to-old slot sets are only ever recorded in live code objects.
+ // Also code objects are never right-trimmed, so there cannot be any slots
+ // in a free range.
+ page->AssertNoInvalidTypedSlots<OLD_TO_OLD>(free_ranges_map);
+
+ page->ClearInvalidTypedSlots<OLD_TO_SHARED>(free_ranges_map);
+ return;
}
+
+ DCHECK_EQ(sweeping_mode, SweepingMode::kLazyOrConcurrent);
+
+ // After a full GC there are no old-to-new typed slots. The main thread
+ // could create new slots but not in a free range.
+ page->AssertNoInvalidTypedSlots<OLD_TO_NEW>(free_ranges_map);
+ DCHECK_NULL(page->typed_slot_set<OLD_TO_OLD>());
+ page->ClearInvalidTypedSlots<OLD_TO_SHARED>(free_ranges_map);
}
void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
@@ -305,11 +327,10 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
}
}
-int Sweeper::RawSweep(
- Page* p, FreeListRebuildingMode free_list_mode,
- FreeSpaceTreatmentMode free_space_mode,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
- const base::MutexGuard& page_guard) {
+int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode,
+ SweepingMode sweeping_mode,
+ const base::MutexGuard& page_guard) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
@@ -342,20 +363,21 @@ int Sweeper::RawSweep(
size_t live_bytes = 0;
size_t max_freed_bytes = 0;
- bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
- p->typed_slot_set<OLD_TO_OLD>() != nullptr;
+ bool record_free_ranges = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
+ p->typed_slot_set<OLD_TO_OLD>() != nullptr ||
+ p->typed_slot_set<OLD_TO_SHARED>() != nullptr ||
+ DEBUG_BOOL;
// Clean invalidated slots during the final atomic pause. After resuming
// execution this isn't necessary, invalid old-to-new refs were already
// removed by mark compact's update pointers phase.
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
- if (invalidated_slots_in_free_space ==
- FreeSpaceMayContainInvalidatedSlots::kYes)
+ if (sweeping_mode == SweepingMode::kEagerDuringGC)
old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
// The free ranges map is used for filtering typed slots.
- FreeRangesMap free_ranges_map;
+ TypedSlotSet::FreeRangesMap free_ranges_map;
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
p->object_start_bitmap()->Clear();
@@ -376,11 +398,10 @@ int Sweeper::RawSweep(
max_freed_bytes =
std::max(max_freed_bytes,
FreeAndProcessFreedMemory(free_start, free_end, p, space,
- non_empty_typed_slots,
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
- free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
- &old_to_new_cleanup);
+ free_start, free_end, p, record_free_ranges, &free_ranges_map,
+ sweeping_mode, &old_to_new_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
// Map might be forwarded during GC.
@@ -406,15 +427,14 @@ int Sweeper::RawSweep(
max_freed_bytes =
std::max(max_freed_bytes,
FreeAndProcessFreedMemory(free_start, free_end, p, space,
- non_empty_typed_slots,
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
- free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
- &old_to_new_cleanup);
+ free_start, free_end, p, record_free_ranges, &free_ranges_map,
+ sweeping_mode, &old_to_new_cleanup);
}
// Phase 3: Post process the page.
- CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
+ CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map, sweeping_mode);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
if (active_system_pages_after_sweeping) {
@@ -445,9 +465,9 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
if (page == nullptr) return true;
// Typed slot sets are only recorded on code pages. Code pages
// are not swept concurrently to the application to ensure W^X.
- DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
- !page->typed_slot_set<OLD_TO_OLD>());
- ParallelSweepPage(page, identity);
+ DCHECK_NULL((page->typed_slot_set<OLD_TO_NEW>()));
+ DCHECK_NULL((page->typed_slot_set<OLD_TO_OLD>()));
+ ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
}
return false;
}
@@ -455,21 +475,23 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING,
ThreadKind::kMain);
+ const double start = heap_->MonotonicallyIncreasingTimeInMs();
if (Page* page = GetSweepingPageSafe(identity)) {
- ParallelSweepPage(page, identity);
+ ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
}
+ const double duration = heap_->MonotonicallyIncreasingTimeInMs() - start;
+ heap_->tracer()->AddIncrementalSweepingStep(duration);
return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
-int Sweeper::ParallelSweepSpace(
- AllocationSpace identity, int required_freed_bytes, int max_pages,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
+int Sweeper::ParallelSweepSpace(AllocationSpace identity,
+ SweepingMode sweeping_mode,
+ int required_freed_bytes, int max_pages) {
int max_freed = 0;
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
- int freed =
- ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
+ int freed = ParallelSweepPage(page, identity, sweeping_mode);
++pages_freed;
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// Free list of a never-allocate page will be dropped later on.
@@ -484,9 +506,8 @@ int Sweeper::ParallelSweepSpace(
return max_freed;
}
-int Sweeper::ParallelSweepPage(
- Page* page, AllocationSpace identity,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
+int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
+ SweepingMode sweeping_mode) {
DCHECK(IsValidSweepingSpace(identity));
// The Scavenger may add already swept pages back.
@@ -507,7 +528,7 @@ int Sweeper::ParallelSweepPage(
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
- invalidated_slots_in_free_space, guard);
+ sweeping_mode, guard);
DCHECK(page->SweepingDone());
}
@@ -526,7 +547,7 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
if (IsValidSweepingSpace(space)) {
if (TryRemoveSweepingPageSafe(space, page)) {
// Page was successfully removed and can now be swept.
- ParallelSweepPage(page, space);
+ ParallelSweepPage(page, space, SweepingMode::kLazyOrConcurrent);
} else {
// Some sweeper task already took ownership of that page, wait until
// sweeping is finished.
@@ -593,7 +614,6 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK(!category->is_linked(page->owner()->free_list()));
});
#endif // DEBUG
- page->MoveOldToNewRememberedSetForSweeping();
page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
@@ -690,7 +710,7 @@ void Sweeper::MakeIterable(Page* page) {
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
- FreeSpaceMayContainInvalidatedSlots::kNo, guard);
+ SweepingMode::kLazyOrConcurrent, guard);
}
} // namespace internal
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 9ac9172b51..9b41d1217e 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -11,6 +11,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
+#include "src/heap/slot-set.h"
#include "src/tasks/cancelable-task.h"
namespace v8 {
@@ -29,7 +30,6 @@ class Sweeper {
using IterabilityList = std::vector<Page*>;
using SweepingList = std::vector<Page*>;
using SweptList = std::vector<Page*>;
- using FreeRangesMap = std::map<uint32_t, uint32_t>;
// Pauses the sweeper tasks.
class V8_NODISCARD PauseScope final {
@@ -73,7 +73,7 @@ class Sweeper {
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
- enum class FreeSpaceMayContainInvalidatedSlots { kYes, kNo };
+ enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
@@ -83,24 +83,18 @@ class Sweeper {
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
- int ParallelSweepSpace(
- AllocationSpace identity, int required_freed_bytes, int max_pages = 0,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
- FreeSpaceMayContainInvalidatedSlots::kNo);
- int ParallelSweepPage(
- Page* page, AllocationSpace identity,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
- FreeSpaceMayContainInvalidatedSlots::kNo);
+ int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
+ int required_freed_bytes, int max_pages = 0);
+ int ParallelSweepPage(Page* page, AllocationSpace identity,
+ SweepingMode sweeping_mode);
void EnsurePageIsSwept(Page* page);
void ScheduleIncrementalSweepingTask();
- int RawSweep(
- Page* p, FreeListRebuildingMode free_list_mode,
- FreeSpaceTreatmentMode free_space_mode,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
- const base::MutexGuard& page_guard);
+ int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode,
+ SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
@@ -119,7 +113,6 @@ class Sweeper {
void AddPageForIterability(Page* page);
void StartIterabilityTasks();
void EnsureIterabilityCompleted();
- void MergeOldToNewRememberedSetsForSweptPages();
private:
class IncrementalSweeperTask;
@@ -143,21 +136,21 @@ class Sweeper {
// the operating system.
size_t FreeAndProcessFreedMemory(Address free_start, Address free_end,
Page* page, Space* space,
- bool non_empty_typed_slots,
FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
// Helper function for RawSweep. Handle remembered set entries in the freed
// memory which require clearing.
void CleanupRememberedSetEntriesForFreedMemory(
- Address free_start, Address free_end, Page* page,
- bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
+ Address free_start, Address free_end, Page* page, bool record_free_ranges,
+ TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
InvalidatedSlotsCleanup* old_to_new_cleanup);
// Helper function for RawSweep. Clears invalid typed slots in the given free
// ranges.
void CleanupInvalidTypedSlotsOfFreeRanges(
- Page* page, const FreeRangesMap& free_ranges_map);
+ Page* page, const TypedSlotSet::FreeRangesMap& free_ranges_map,
+ SweepingMode sweeping_mode);
// Helper function for RawSweep. Clears the mark bits and ensures consistency
// of live bytes.
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 4542f4af42..1d4e6db1ca 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -751,9 +751,15 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&api_getter);
{
- GotoIf(IsSideEffectFreeDebuggingActive(), &slow);
- HandleLoadAccessor(p, CAST(holder), handler_word, CAST(handler),
- handler_kind, exit_point);
+ if (p->receiver() != p->lookup_start_object()) {
+ // Force super ICs using API getters into the slow path, so that we get
+ // the correct receiver checks.
+ Goto(&slow);
+ } else {
+ GotoIf(IsSideEffectFreeDebuggingActive(), &slow);
+ HandleLoadAccessor(p, CAST(holder), handler_word, CAST(handler),
+ handler_kind, exit_point);
+ }
}
BIND(&proxy);
@@ -1260,44 +1266,47 @@ void AccessorAssembler::HandleStoreICHandlerCase(
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index);
NameDictionaryLookup<PropertyDictionary>(
- properties, CAST(p->name()), &dictionary_found, &var_name_index, miss);
- BIND(&dictionary_found);
- {
- if (p->IsDefineKeyedOwn()) {
- // Take slow path to throw if a private name already exists.
- GotoIf(IsPrivateSymbol(CAST(p->name())), &if_slow);
- }
- Label if_constant(this), done(this);
- TNode<Uint32T> details =
- LoadDetailsByKeyIndex(properties, var_name_index.value());
- // Check that the property is a writable data property (no accessor).
- const int kTypeAndReadOnlyMask = PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask;
- STATIC_ASSERT(static_cast<int>(PropertyKind::kData) == 0);
- GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
-
- if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
- GotoIf(IsPropertyDetailsConst(details), &if_constant);
- }
-
- StoreValueByKeyIndex<PropertyDictionary>(
- properties, var_name_index.value(), p->value());
- Return(p->value());
+ properties, CAST(p->name()),
+ p->IsAnyDefineOwn() ? &if_slow : &dictionary_found, &var_name_index,
+ miss);
+
+ // When dealing with class fields defined with DefineKeyedOwnIC or
+ // DefineNamedOwnIC, use the slow path to check the existing property.
+ if (!p->IsAnyDefineOwn()) {
+ BIND(&dictionary_found);
+ {
+ Label if_constant(this), done(this);
+ TNode<Uint32T> details =
+ LoadDetailsByKeyIndex(properties, var_name_index.value());
+ // Check that the property is a writable data property (no accessor).
+ const int kTypeAndReadOnlyMask =
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask;
+ STATIC_ASSERT(static_cast<int>(PropertyKind::kData) == 0);
+ GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
- if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
- BIND(&if_constant);
- {
- TNode<Object> prev_value =
- LoadValueByKeyIndex(properties, var_name_index.value());
- BranchIfSameValue(prev_value, p->value(), &done, miss,
- SameValueMode::kNumbersOnly);
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
+ GotoIf(IsPropertyDetailsConst(details), &if_constant);
}
- BIND(&done);
+ StoreValueByKeyIndex<PropertyDictionary>(
+ properties, var_name_index.value(), p->value());
Return(p->value());
+
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
+ BIND(&if_constant);
+ {
+ TNode<Object> prev_value =
+ LoadValueByKeyIndex(properties, var_name_index.value());
+ BranchIfSameValue(prev_value, p->value(), &done, miss,
+ SameValueMode::kNumbersOnly);
+ }
+
+ BIND(&done);
+ Return(p->value());
+ }
}
}
-
BIND(&if_fast_smi);
{
Label data(this), accessor(this), shared_struct_field(this),
@@ -4125,7 +4134,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
BIND(&no_feedback);
{
Comment("StoreInArrayLiteralIC_NoFeedback");
- TailCallBuiltin(Builtin::kSetPropertyInLiteral, p->context(), p->receiver(),
+ TailCallBuiltin(Builtin::kCreateDataProperty, p->context(), p->receiver(),
p->name(), p->value());
}
@@ -4773,7 +4782,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
ForEachEnumerableOwnProperty(
context, source_map, CAST(source), kPropertyAdditionOrder,
[=](TNode<Name> key, TNode<Object> value) {
- SetPropertyInLiteral(context, result, key, value);
+ CreateDataProperty(context, result, key, value);
},
&call_runtime);
Goto(&done);
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index f49ee84e3c..afd4fe9ec7 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -25,10 +25,10 @@ template CallOptimization::CallOptimization(LocalIsolate* isolate,
Context CallOptimization::GetAccessorContext(Map holder_map) const {
if (is_constant_call()) {
- return constant_function_->context().native_context();
+ return constant_function_->native_context();
}
JSFunction constructor = JSFunction::cast(holder_map.GetConstructor());
- return constructor.context().native_context();
+ return constructor.native_context();
}
bool CallOptimization::IsCrossContextLazyAccessorPair(Context native_context,
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 148fd24810..b0572bc23e 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -514,7 +514,6 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name,
if (it.IsFound()) {
return result;
} else if (!ShouldThrowReferenceError()) {
- LOG(isolate(), SuspectReadEvent(*name, *object));
return result;
}
}
@@ -759,7 +758,7 @@ bool IC::IsTransitionOfMonomorphicTarget(Map source_map, Map target_map) {
MapHandles map_list;
map_list.push_back(handle(target_map, isolate_));
transitioned_map = source_map.FindElementsKindTransitionedMap(
- isolate(), map_list, ConcurrencyMode::kNotConcurrent);
+ isolate(), map_list, ConcurrencyMode::kSynchronous);
}
return transitioned_map == target_map;
}
@@ -1425,7 +1424,7 @@ void KeyedLoadIC::LoadElementPolymorphicHandlers(
// generate an elements kind transition for this kind of receivers.
if (receiver_map->is_stable()) {
Map tmap = receiver_map->FindElementsKindTransitionedMap(
- isolate(), *receiver_maps, ConcurrencyMode::kNotConcurrent);
+ isolate(), *receiver_maps, ConcurrencyMode::kSynchronous);
if (!tmap.is_null()) {
receiver_map->NotifyLeafMapLayoutChange(isolate());
}
@@ -1779,7 +1778,8 @@ Maybe<bool> DefineOwnDataProperty(LookupIterator* it,
}
case LookupIterator::NOT_FOUND:
return Object::AddDataProperty(it, value, NONE,
- Nothing<ShouldThrow>(), store_origin);
+ Nothing<ShouldThrow>(), store_origin,
+ EnforceDefineSemantics::kDefine);
}
}
case LookupIterator::ACCESS_CHECK:
@@ -1796,7 +1796,7 @@ Maybe<bool> DefineOwnDataProperty(LookupIterator* it,
return JSObject::DefineOwnPropertyIgnoreAttributes(
it, value, NONE, should_throw, JSObject::DONT_FORCE_FIELD,
- JSObject::EnforceDefineSemantics::kDefine);
+ EnforceDefineSemantics::kDefine, store_origin);
}
} // namespace
@@ -1806,10 +1806,15 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
if (MigrateDeprecated(isolate(), object)) {
+ // KeyedStoreIC should handle DefineKeyedOwnIC with deprecated maps directly
+ // instead of reusing this method.
+ DCHECK(!IsDefineKeyedOwnIC());
+ DCHECK(!name->IsPrivateName());
+
PropertyKey key(isolate(), name);
LookupIterator it(
isolate(), object, key,
- IsAnyDefineOwn() ? LookupIterator::OWN : LookupIterator::DEFAULT);
+ IsDefineNamedOwnIC() ? LookupIterator::OWN : LookupIterator::DEFAULT);
DCHECK_IMPLIES(IsDefineNamedOwnIC(), it.IsFound() && it.HolderIsReceiver());
// TODO(v8:12548): refactor DefinedNamedOwnIC and SetNamedIC as subclasses
// of StoreIC so their logic doesn't get mixed here.
@@ -1867,14 +1872,16 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
}
}
- // For IsDefineNamedOwnIC(), we can't simply do CreateDataProperty below
+ // For IsAnyDefineOwn(), we can't simply do CreateDataProperty below
// because we need to check the attributes before UpdateCaches updates
// the state of the LookupIterator.
LookupIterator::State original_state = it.state();
// We'll defer the check for JSProxy and objects with named interceptors,
// because the defineProperty traps need to be called first if they are
- // present.
- if (IsDefineNamedOwnIC() && !object->IsJSProxy() &&
+ // present. We can also skip this for private names since they are not
+ // bound by configurability or extensibility checks, and errors would've
+ // been thrown if the private field already exists in the object.
+ if (IsAnyDefineOwn() && !name->IsPrivateName() && !object->IsJSProxy() &&
!Handle<JSObject>::cast(object)->HasNamedInterceptor()) {
Maybe<bool> can_define = JSReceiver::CheckIfCanDefine(
isolate(), &it, value, Nothing<ShouldThrow>());
@@ -1895,12 +1902,17 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// of StoreIC so their logic doesn't get mixed here.
// ES #sec-definefield
// ES #sec-runtime-semantics-propertydefinitionevaluation
- if (IsDefineNamedOwnIC()) {
- // Private property should be defined via DefineKeyedOwnIC or
- // KeyedStoreIC with private symbols.
- DCHECK(!name->IsPrivate());
- MAYBE_RETURN_NULL(DefineOwnDataProperty(
- &it, original_state, value, Nothing<ShouldThrow>(), store_origin));
+ // IsAnyDefineOwn() can be true when this method is reused by KeyedStoreIC.
+ if (IsAnyDefineOwn()) {
+ if (name->IsPrivateName()) {
+ // We should define private fields without triggering traps or checking
+ // extensibility.
+ MAYBE_RETURN_NULL(
+ JSReceiver::AddPrivateField(&it, value, Nothing<ShouldThrow>()));
+ } else {
+ MAYBE_RETURN_NULL(DefineOwnDataProperty(
+ &it, original_state, value, Nothing<ShouldThrow>(), store_origin));
+ }
} else {
MAYBE_RETURN_NULL(Object::SetProperty(&it, value, store_origin));
}
@@ -1982,9 +1994,9 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
// If the interceptor is on the receiver...
if (lookup->HolderIsReceiverOrHiddenPrototype() && !info.non_masking()) {
// ...return a store interceptor Smi handler if there is a setter
- // interceptor and it's not DefineNamedOwnIC (which should call the
- // definer)...
- if (!info.setter().IsUndefined(isolate()) && !IsDefineNamedOwnIC()) {
+ // interceptor and it's not DefineNamedOwnIC or DefineKeyedOwnIC
+ // (which should call the definer)...
+ if (!info.setter().IsUndefined(isolate()) && !IsAnyDefineOwn()) {
return MaybeObjectHandle(StoreHandler::StoreInterceptor(isolate()));
}
// ...otherwise return a slow-case Smi handler, which invokes the
@@ -2412,7 +2424,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
} else {
{
Map tmap = receiver_map->FindElementsKindTransitionedMap(
- isolate(), receiver_maps, ConcurrencyMode::kNotConcurrent);
+ isolate(), receiver_maps, ConcurrencyMode::kSynchronous);
if (!tmap.is_null()) {
if (receiver_map->is_stable()) {
receiver_map->NotifyLeafMapLayoutChange(isolate());
@@ -2846,7 +2858,7 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
kind = vector->GetKind(vector_slot);
}
- DCHECK(IsStoreICKind(kind) || IsDefineNamedOwnICKind(kind));
+ DCHECK(IsSetNamedICKind(kind) || IsDefineNamedOwnICKind(kind));
StoreIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
@@ -3138,11 +3150,15 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
StoreOwnElement(isolate, Handle<JSArray>::cast(object), key, value);
return *value;
} else {
- DCHECK(IsKeyedStoreICKind(kind) || IsStoreICKind(kind) ||
+ DCHECK(IsKeyedStoreICKind(kind) || IsSetNamedICKind(kind) ||
IsDefineKeyedOwnICKind(kind));
RETURN_RESULT_OR_FAILURE(
- isolate, Runtime::SetObjectProperty(isolate, object, key, value,
- StoreOrigin::kMaybeKeyed));
+ isolate,
+ IsDefineKeyedOwnICKind(kind)
+ ? Runtime::DefineObjectOwnProperty(isolate, object, key, value,
+ StoreOrigin::kMaybeKeyed)
+ : Runtime::SetObjectProperty(isolate, object, key, value,
+ StoreOrigin::kMaybeKeyed));
}
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index f1c489efbe..61a8171e89 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -53,7 +53,7 @@ class IC {
return IsLoadIC() || IsLoadGlobalIC() || IsKeyedLoadIC();
}
bool IsAnyStore() const {
- return IsStoreIC() || IsDefineNamedOwnIC() || IsStoreGlobalIC() ||
+ return IsSetNamedIC() || IsDefineNamedOwnIC() || IsStoreGlobalIC() ||
IsKeyedStoreIC() || IsStoreInArrayLiteralICKind(kind()) ||
IsDefineKeyedOwnIC();
}
@@ -121,7 +121,7 @@ class IC {
bool IsLoadGlobalIC() const { return IsLoadGlobalICKind(kind_); }
bool IsKeyedLoadIC() const { return IsKeyedLoadICKind(kind_); }
bool IsStoreGlobalIC() const { return IsStoreGlobalICKind(kind_); }
- bool IsStoreIC() const { return IsStoreICKind(kind_); }
+ bool IsSetNamedIC() const { return IsSetNamedICKind(kind_); }
bool IsDefineNamedOwnIC() const { return IsDefineNamedOwnICKind(kind_); }
bool IsStoreInArrayLiteralIC() const {
return IsStoreInArrayLiteralICKind(kind_);
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index cef7a2fa28..dbab0d92ca 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -17,16 +17,28 @@ namespace v8 {
namespace internal {
enum class StoreMode {
- // TODO(v8:12548): rename to kSet and kDefineKeyedOwnInLiteral
- kOrdinary,
- kInLiteral,
-
- // kDefineNamedOwn performs an ordinary property store without traversing the
- // prototype chain. In the case of private fields, it will throw if the
- // field does not already exist.
- // kDefineKeyedOwn is similar to kDefineNamedOwn, but for private class
- // fields, it will throw if the field does already exist.
+ // kSet implements [[Set]] in the spec and traverses the prototype
+ // chain to invoke setters. it's used by KeyedStoreIC and StoreIC to
+ // set the properties when there is no feedback.
+ kSet,
+ // kDefineKeyedOwnInLiteral implements [[CreateDataProperty]] in the spec,
+ // and it assumes that the receiver is a JSObject that is created by us.
+ // It is used by Object.fromEntries(), CloneObjectIC and
+ // StoreInArrayLiteralIC to define a property in an object without
+ // traversing the prototype chain.
+ // TODO(v8:12548): merge this into the more generic kDefineKeyedOwn.
+ kDefineKeyedOwnInLiteral,
+ // kDefineNamedOwn implements [[CreateDataProperty]] but it can deal with
+ // user-defined receivers such as a JSProxy. It also assumes that the key
+ // is statically known. It's used to initialize named roperties in object
+ // literals and named public class fields.
kDefineNamedOwn,
+ // kDefineKeyedOwn implements [[CreateDataProperty]], but it can deal with
+ // user-defined receivers such as a JSProxy, and for private class fields,
+ // it will throw if the field does already exist. It's different from
+ // kDefineNamedOwn in that it does not assume the key is statically known.
+ // It's used to initialized computed public class fields and private
+ // class fields.
kDefineKeyedOwn
};
@@ -44,19 +56,20 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
void StoreIC_NoFeedback();
- // Generates code for [[Set]] operation, the |unique_name| is supposed to be
- // unique otherwise this code will always go to runtime.
- void SetProperty(TNode<Context> context, TNode<JSReceiver> receiver,
- TNode<BoolT> is_simple_receiver, TNode<Name> unique_name,
- TNode<Object> value, LanguageMode language_mode);
-
- // [[Set]], but more generic than the above. This impl does essentially the
- // same as "KeyedStoreGeneric" but does not use feedback slot and uses a
- // hardcoded LanguageMode instead of trying to deduce it from the feedback
- // slot's kind.
- void SetProperty(TNode<Context> context, TNode<Object> receiver,
- TNode<Object> key, TNode<Object> value,
- LanguageMode language_mode);
+ // Generates code for [[Set]] or [[CreateDataProperty]] operation,
+ // the |unique_name| is supposed to be unique otherwise this code will
+ // always go to runtime.
+ void StoreProperty(TNode<Context> context, TNode<JSReceiver> receiver,
+ TNode<BoolT> is_simple_receiver, TNode<Name> unique_name,
+ TNode<Object> value, LanguageMode language_mode);
+
+ // This does [[Set]] or [[CreateDataProperty]] but it's more generic than
+ // the above. It is essentially the same as "KeyedStoreGeneric" but does not
+ // use feedback slot and uses a hardcoded LanguageMode instead of trying
+ // to deduce it from the feedback slot's kind.
+ void StoreProperty(TNode<Context> context, TNode<Object> receiver,
+ TNode<Object> key, TNode<Object> value,
+ LanguageMode language_mode);
private:
StoreMode mode_;
@@ -69,7 +82,7 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
enum UseStubCache { kUseStubCache, kDontUseStubCache };
- // Helper that is used by the public KeyedStoreGeneric and by SetProperty.
+ // Helper that is used by the public KeyedStoreGeneric and by StoreProperty.
void KeyedStoreGeneric(TNode<Context> context, TNode<Object> receiver,
TNode<Object> key, TNode<Object> value,
Maybe<LanguageMode> language_mode);
@@ -147,28 +160,33 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
TNode<Name> name,
Label* slow);
- bool IsKeyedStore() const { return mode_ == StoreMode::kOrdinary; }
- bool IsStoreInLiteral() const { return mode_ == StoreMode::kInLiteral; }
+ bool IsSet() const { return mode_ == StoreMode::kSet; }
+ bool IsDefineKeyedOwnInLiteral() const {
+ return mode_ == StoreMode::kDefineKeyedOwnInLiteral;
+ }
bool IsDefineNamedOwn() const { return mode_ == StoreMode::kDefineNamedOwn; }
bool IsDefineKeyedOwn() const { return mode_ == StoreMode::kDefineKeyedOwn; }
+ bool IsAnyDefineOwn() const {
+ return IsDefineNamedOwn() || IsDefineKeyedOwn();
+ }
- bool ShouldCheckPrototype() const { return IsKeyedStore(); }
- bool ShouldReconfigureExisting() const { return IsStoreInLiteral(); }
- bool ShouldCallSetter() const { return IsKeyedStore(); }
+ bool ShouldCheckPrototype() const { return IsSet(); }
+ bool ShouldReconfigureExisting() const { return IsDefineKeyedOwnInLiteral(); }
+ bool ShouldCallSetter() const { return IsSet(); }
bool ShouldCheckPrototypeValidity() const {
// We don't do this for "in-literal" stores, because it is impossible for
// the target object to be a "prototype".
// We don't need the prototype validity check for "own" stores, because
// we don't care about the prototype chain.
// Thus, we need the prototype check only for ordinary stores.
- DCHECK_IMPLIES(!IsKeyedStore(), IsStoreInLiteral() || IsDefineNamedOwn() ||
- IsDefineKeyedOwn());
- return IsKeyedStore();
+ DCHECK_IMPLIES(!IsSet(), IsDefineKeyedOwnInLiteral() ||
+ IsDefineNamedOwn() || IsDefineKeyedOwn());
+ return IsSet();
}
};
void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state) {
- KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kSet);
assembler.KeyedStoreGeneric();
}
@@ -179,7 +197,7 @@ void DefineKeyedOwnGenericGenerator::Generate(
}
void StoreICNoFeedbackGenerator::Generate(compiler::CodeAssemblerState* state) {
- KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kSet);
assembler.StoreIC_NoFeedback();
}
@@ -195,24 +213,25 @@ void KeyedStoreGenericGenerator::SetProperty(
compiler::CodeAssemblerState* state, TNode<Context> context,
TNode<JSReceiver> receiver, TNode<BoolT> is_simple_receiver,
TNode<Name> name, TNode<Object> value, LanguageMode language_mode) {
- KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
- assembler.SetProperty(context, receiver, is_simple_receiver, name, value,
- language_mode);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kSet);
+ assembler.StoreProperty(context, receiver, is_simple_receiver, name, value,
+ language_mode);
}
void KeyedStoreGenericGenerator::SetProperty(
compiler::CodeAssemblerState* state, TNode<Context> context,
TNode<Object> receiver, TNode<Object> key, TNode<Object> value,
LanguageMode language_mode) {
- KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
- assembler.SetProperty(context, receiver, key, value, language_mode);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kSet);
+ assembler.StoreProperty(context, receiver, key, value, language_mode);
}
-void KeyedStoreGenericGenerator::SetPropertyInLiteral(
+void KeyedStoreGenericGenerator::CreateDataProperty(
compiler::CodeAssemblerState* state, TNode<Context> context,
TNode<JSObject> receiver, TNode<Object> key, TNode<Object> value) {
- KeyedStoreGenericAssembler assembler(state, StoreMode::kInLiteral);
- assembler.SetProperty(context, receiver, key, value, LanguageMode::kStrict);
+ KeyedStoreGenericAssembler assembler(state,
+ StoreMode::kDefineKeyedOwnInLiteral);
+ assembler.StoreProperty(context, receiver, key, value, LanguageMode::kStrict);
}
void KeyedStoreGenericAssembler::BranchIfPrototypesMayHaveReadOnlyElements(
@@ -378,7 +397,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
{
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, PACKED_ELEMENTS, kHeaderSize);
- if (!IsStoreInLiteral()) {
+ if (!IsDefineKeyedOwnInLiteral()) {
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
// If we know that we're storing beyond the previous array length, we
@@ -481,7 +500,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
{
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, kHeaderSize);
- if (!IsStoreInLiteral()) {
+ if (!IsDefineKeyedOwnInLiteral()) {
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
{
@@ -818,53 +837,56 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
Label descriptor_found(this), lookup_transition(this);
TVARIABLE(IntPtrT, var_name_index);
- DescriptorLookup(name, descriptors, bitfield3, &descriptor_found,
+ DescriptorLookup(name, descriptors, bitfield3,
+ IsAnyDefineOwn() ? slow : &descriptor_found,
&var_name_index, &lookup_transition);
- BIND(&descriptor_found);
- {
- if (IsDefineKeyedOwn()) {
- // Take slow path to throw if a private name already exists.
- GotoIf(IsPrivateSymbol(name), slow);
- }
- TNode<IntPtrT> name_index = var_name_index.value();
- TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, name_index);
- Label data_property(this);
- JumpIfDataProperty(details, &data_property,
- ShouldReconfigureExisting() ? nullptr : &readonly);
-
- if (ShouldCallSetter()) {
- // Accessor case.
- // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
- LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
- name_index, details, &var_accessor_pair);
- var_accessor_holder = receiver;
- Goto(&accessor);
- } else {
- // Handle accessor to data property reconfiguration in runtime.
- Goto(slow);
- }
-
- BIND(&data_property);
+ // When dealing with class fields defined with DefineKeyedOwnIC or
+ // DefineNamedOwnIC, use the slow path to check the existing property.
+ if (!IsAnyDefineOwn()) {
+ BIND(&descriptor_found);
{
- Label shared(this);
- GotoIf(IsJSSharedStructInstanceType(instance_type), &shared);
+ TNode<IntPtrT> name_index = var_name_index.value();
+ TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, name_index);
+ Label data_property(this);
+ JumpIfDataProperty(details, &data_property,
+ ShouldReconfigureExisting() ? nullptr : &readonly);
- CheckForAssociatedProtector(name, slow);
- OverwriteExistingFastDataProperty(receiver, receiver_map, descriptors,
- name_index, details, p->value(), slow,
- false);
- exit_point->Return(p->value());
+ if (ShouldCallSetter()) {
+ // Accessor case.
+ // TODO(jkummerow): Implement a trimmed-down
+ // LoadAccessorFromFastObject.
+ LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+ name_index, details, &var_accessor_pair);
+ var_accessor_holder = receiver;
+ Goto(&accessor);
+ } else {
+ // Handle accessor to data property reconfiguration in runtime.
+ Goto(slow);
+ }
- BIND(&shared);
+ BIND(&data_property);
{
- StoreJSSharedStructField(p->context(), receiver, receiver_map,
- descriptors, name_index, details,
- p->value());
+ Label shared(this);
+ GotoIf(IsJSSharedStructInstanceType(instance_type), &shared);
+
+ CheckForAssociatedProtector(name, slow);
+ OverwriteExistingFastDataProperty(receiver, receiver_map, descriptors,
+ name_index, details, p->value(),
+ slow, false);
exit_point->Return(p->value());
+
+ BIND(&shared);
+ {
+ StoreJSSharedStructField(p->context(), receiver, receiver_map,
+ descriptors, name_index, details,
+ p->value());
+ exit_point->Return(p->value());
+ }
}
}
}
+
BIND(&lookup_transition);
{
Comment("lookup transition");
@@ -891,56 +913,59 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index), not_found(this);
TNode<PropertyDictionary> properties = CAST(LoadSlowProperties(receiver));
- NameDictionaryLookup<PropertyDictionary>(
- properties, name, &dictionary_found, &var_name_index, &not_found);
- BIND(&dictionary_found);
- {
- Label check_const(this), overwrite(this), done(this);
- if (IsDefineKeyedOwn()) {
- // Take slow path to throw if a private name already exists.
- GotoIf(IsPrivateSymbol(name), slow);
- }
- TNode<Uint32T> details =
- LoadDetailsByKeyIndex(properties, var_name_index.value());
- JumpIfDataProperty(details, &check_const,
- ShouldReconfigureExisting() ? nullptr : &readonly);
- if (ShouldCallSetter()) {
- // Accessor case.
- var_accessor_pair =
- LoadValueByKeyIndex(properties, var_name_index.value());
- var_accessor_holder = receiver;
- Goto(&accessor);
- } else {
- // We must reconfigure an accessor property to a data property
- // here, let the runtime take care of that.
- Goto(slow);
- }
+ // When dealing with class fields defined with DefineKeyedOwnIC or
+ // DefineNamedOwnIC, use the slow path to check the existing property.
+ NameDictionaryLookup<PropertyDictionary>(
+ properties, name, IsAnyDefineOwn() ? slow : &dictionary_found,
+ &var_name_index, &not_found);
- BIND(&check_const);
+ if (!IsAnyDefineOwn()) {
+ BIND(&dictionary_found);
{
- if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
- GotoIfNot(IsPropertyDetailsConst(details), &overwrite);
- TNode<Object> prev_value =
- LoadValueByKeyIndex(properties, var_name_index.value());
+ Label check_const(this), overwrite(this), done(this);
+ TNode<Uint32T> details =
+ LoadDetailsByKeyIndex(properties, var_name_index.value());
+ JumpIfDataProperty(details, &check_const,
+ ShouldReconfigureExisting() ? nullptr : &readonly);
- BranchIfSameValue(prev_value, p->value(), &done, slow,
- SameValueMode::kNumbersOnly);
+ if (ShouldCallSetter()) {
+ // Accessor case.
+ var_accessor_pair =
+ LoadValueByKeyIndex(properties, var_name_index.value());
+ var_accessor_holder = receiver;
+ Goto(&accessor);
} else {
- Goto(&overwrite);
+ // We must reconfigure an accessor property to a data property
+ // here, let the runtime take care of that.
+ Goto(slow);
}
- }
- BIND(&overwrite);
- {
- CheckForAssociatedProtector(name, slow);
- StoreValueByKeyIndex<PropertyDictionary>(
- properties, var_name_index.value(), p->value());
- Goto(&done);
- }
+ BIND(&check_const);
+ {
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
+ GotoIfNot(IsPropertyDetailsConst(details), &overwrite);
+ TNode<Object> prev_value =
+ LoadValueByKeyIndex(properties, var_name_index.value());
- BIND(&done);
- exit_point->Return(p->value());
+ BranchIfSameValue(prev_value, p->value(), &done, slow,
+ SameValueMode::kNumbersOnly);
+ } else {
+ Goto(&overwrite);
+ }
+ }
+
+ BIND(&overwrite);
+ {
+ CheckForAssociatedProtector(name, slow);
+ StoreValueByKeyIndex<PropertyDictionary>(
+ properties, var_name_index.value(), p->value());
+ Goto(&done);
+ }
+
+ BIND(&done);
+ exit_point->Return(p->value());
+ }
}
BIND(&not_found);
@@ -1022,34 +1047,29 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
}
}
- if (!ShouldReconfigureExisting()) {
+ if (!ShouldReconfigureExisting() && !IsAnyDefineOwn()) {
BIND(&readonly);
{
- if (IsDefineKeyedOwn() || IsDefineNamedOwn()) {
- Goto(slow);
- } else {
- LanguageMode language_mode;
- if (maybe_language_mode.To(&language_mode)) {
- if (language_mode == LanguageMode::kStrict) {
- TNode<String> type = Typeof(p->receiver());
- ThrowTypeError(p->context(),
- MessageTemplate::kStrictReadOnlyProperty, name, type,
- p->receiver());
- } else {
- exit_point->Return(p->value());
- }
+ LanguageMode language_mode;
+ if (maybe_language_mode.To(&language_mode)) {
+ if (language_mode == LanguageMode::kStrict) {
+ TNode<String> type = Typeof(p->receiver());
+ ThrowTypeError(p->context(), MessageTemplate::kStrictReadOnlyProperty,
+ name, type, p->receiver());
} else {
- CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
- SmiConstant(MessageTemplate::kStrictReadOnlyProperty),
- name, Typeof(p->receiver()), p->receiver());
exit_point->Return(p->value());
}
+ } else {
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
+ SmiConstant(MessageTemplate::kStrictReadOnlyProperty), name,
+ Typeof(p->receiver()), p->receiver());
+ exit_point->Return(p->value());
}
}
}
}
-// Helper that is used by the public KeyedStoreGeneric and by SetProperty.
+// Helper that is used by the public KeyedStoreGeneric and by StoreProperty.
void KeyedStoreGenericAssembler::KeyedStoreGeneric(
TNode<Context> context, TNode<Object> receiver_maybe_smi, TNode<Object> key,
TNode<Object> value, Maybe<LanguageMode> language_mode) {
@@ -1098,7 +1118,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
BIND(&slow);
{
- if (IsKeyedStore() || IsDefineNamedOwn()) {
+ if (IsSet() || IsDefineNamedOwn()) {
// The DefineNamedOwnIC hacky reuse should never reach here.
CSA_DCHECK(this, BoolConstant(!IsDefineNamedOwn()));
Comment("KeyedStoreGeneric_slow");
@@ -1108,7 +1128,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
TailCallRuntime(Runtime::kDefineObjectOwnProperty, context, receiver, key,
value);
} else {
- DCHECK(IsStoreInLiteral());
+ DCHECK(IsDefineKeyedOwnInLiteral());
TailCallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral_Simple, context,
receiver, key, value);
}
@@ -1126,11 +1146,11 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
KeyedStoreGeneric(context, receiver, name, value, Nothing<LanguageMode>());
}
-void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
- TNode<Object> receiver,
- TNode<Object> key,
- TNode<Object> value,
- LanguageMode language_mode) {
+void KeyedStoreGenericAssembler::StoreProperty(TNode<Context> context,
+ TNode<Object> receiver,
+ TNode<Object> key,
+ TNode<Object> value,
+ LanguageMode language_mode) {
KeyedStoreGeneric(context, receiver, key, value, Just(language_mode));
}
@@ -1173,12 +1193,12 @@ void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
}
}
-void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
- TNode<JSReceiver> receiver,
- TNode<BoolT> is_simple_receiver,
- TNode<Name> unique_name,
- TNode<Object> value,
- LanguageMode language_mode) {
+void KeyedStoreGenericAssembler::StoreProperty(TNode<Context> context,
+ TNode<JSReceiver> receiver,
+ TNode<BoolT> is_simple_receiver,
+ TNode<Name> unique_name,
+ TNode<Object> value,
+ LanguageMode language_mode) {
StoreICParameters p(context, receiver, unique_name, value, {},
UndefinedConstant(), StoreICMode::kDefault);
@@ -1196,7 +1216,7 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
BIND(&slow);
{
- if (IsStoreInLiteral()) {
+ if (IsDefineKeyedOwnInLiteral()) {
CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral_Simple, context,
receiver, unique_name, value);
} else {
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 6618ed3b26..e2a82a0910 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -28,10 +28,10 @@ class KeyedStoreGenericGenerator {
TNode<Object> key, TNode<Object> value,
LanguageMode language_mode);
- static void SetPropertyInLiteral(compiler::CodeAssemblerState* state,
- TNode<Context> context,
- TNode<JSObject> receiver, TNode<Object> key,
- TNode<Object> value);
+ static void CreateDataProperty(compiler::CodeAssemblerState* state,
+ TNode<Context> context,
+ TNode<JSObject> receiver, TNode<Object> key,
+ TNode<Object> value);
};
class DefineKeyedOwnGenericGenerator {
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index fdf186be36..d4c4122c22 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -1185,6 +1185,37 @@ void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
descriptors.Replace(entry, &d);
}
+
+void InitializeJSArrayMaps(Isolate* isolate, Handle<Context> native_context,
+ Handle<Map> initial_map) {
+ // Replace all of the cached initial array maps in the native context with
+ // the appropriate transitioned elements kind maps.
+ Handle<Map> current_map = initial_map;
+ ElementsKind kind = current_map->elements_kind();
+ DCHECK_EQ(GetInitialFastElementsKind(), kind);
+ DCHECK_EQ(PACKED_SMI_ELEMENTS, kind);
+ DCHECK_EQ(Context::ArrayMapIndex(kind),
+ Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX);
+ native_context->set(Context::ArrayMapIndex(kind), *current_map,
+ UPDATE_WRITE_BARRIER, kReleaseStore);
+ for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
+ i < kFastElementsKindCount; ++i) {
+ Handle<Map> new_map;
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
+ Map maybe_elements_transition = current_map->ElementsTransitionMap(
+ isolate, ConcurrencyMode::kSynchronous);
+ if (!maybe_elements_transition.is_null()) {
+ new_map = handle(maybe_elements_transition, isolate);
+ } else {
+ new_map = Map::CopyAsElementsKind(isolate, current_map, next_kind,
+ INSERT_TRANSITION);
+ }
+ DCHECK_EQ(next_kind, new_map->elements_kind());
+ native_context->set(Context::ArrayMapIndex(next_kind), *new_map,
+ UPDATE_WRITE_BARRIER, kReleaseStore);
+ current_map = new_map;
+ }
+}
} // namespace
void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
@@ -1710,8 +1741,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::ARRAY_FUNCTION_INDEX);
InstallSpeciesGetter(isolate_, array_function);
- // Cache the array maps, needed by ArrayConstructorStub
- CacheInitialJSArrayMaps(isolate_, native_context(), initial_map);
+ // Create the initial array map for Array.prototype which is required by
+ // the used ArrayConstructorStub.
+ // This is repeated after properly instantiating the Array.prototype.
+ InitializeJSArrayMaps(isolate_, native_context(), initial_map);
// Set up %ArrayPrototype%.
// The %ArrayPrototype% has TERMINAL_FAST_ELEMENTS_KIND in order to ensure
@@ -1722,6 +1755,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSFunction::SetPrototype(array_function, proto);
native_context()->set_initial_array_prototype(*proto);
+ InitializeJSArrayMaps(isolate_, native_context(),
+
+ handle(array_function->initial_map(), isolate_));
+
SimpleInstallFunction(isolate_, array_function, "isArray",
Builtin::kArrayIsArray, 1, true);
SimpleInstallFunction(isolate_, array_function, "from", Builtin::kArrayFrom,
@@ -2387,8 +2424,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, promise_fun, "all", Builtin::kPromiseAll, 1, true);
native_context()->set_promise_all(*promise_all);
- InstallFunctionWithBuiltinId(isolate_, promise_fun, "allSettled",
- Builtin::kPromiseAllSettled, 1, true);
+ Handle<JSFunction> promise_all_settled =
+ InstallFunctionWithBuiltinId(isolate_, promise_fun, "allSettled",
+ Builtin::kPromiseAllSettled, 1, true);
+ native_context()->set_promise_all_settled(*promise_all_settled);
Handle<JSFunction> promise_any = InstallFunctionWithBuiltinId(
isolate_, promise_fun, "any", Builtin::kPromiseAny, 1, true);
@@ -4058,6 +4097,8 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
Handle<Map> rab_gsab_initial_map = factory()->NewMap(
JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithEmbedderFields,
GetCorrespondingRabGsabElementsKind(elements_kind), 0);
+ rab_gsab_initial_map->SetConstructor(*result);
+
native_context()->set(rab_gsab_initial_map_index, *rab_gsab_initial_map,
UPDATE_WRITE_BARRIER, kReleaseStore);
Map::SetPrototype(isolate(), rab_gsab_initial_map, prototype);
@@ -4453,13 +4494,14 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_best_fit_matcher)
void Genesis::InitializeGlobal_harmony_shadow_realm() {
if (!FLAG_harmony_shadow_realm) return;
+ Factory* factory = isolate()->factory();
// -- S h a d o w R e a l m
// #sec-shadowrealm-objects
Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- Handle<JSFunction> shadow_realm_fun = InstallFunction(
- isolate_, global, "ShadowRealm", JS_SHADOW_REALM_TYPE,
- JSShadowRealm::kHeaderSize, 0, factory()->the_hole_value(),
- Builtin::kShadowRealmConstructor);
+ Handle<JSFunction> shadow_realm_fun =
+ InstallFunction(isolate_, global, "ShadowRealm", JS_SHADOW_REALM_TYPE,
+ JSShadowRealm::kHeaderSize, 0, factory->the_hole_value(),
+ Builtin::kShadowRealmConstructor);
shadow_realm_fun->shared().set_length(0);
shadow_realm_fun->shared().DontAdaptArguments();
@@ -4467,7 +4509,7 @@ void Genesis::InitializeGlobal_harmony_shadow_realm() {
Handle<JSObject> prototype(
JSObject::cast(shadow_realm_fun->instance_prototype()), isolate());
- InstallToStringTag(isolate_, prototype, factory()->ShadowRealm_string());
+ InstallToStringTag(isolate_, prototype, factory->ShadowRealm_string());
SimpleInstallFunction(isolate_, prototype, "evaluate",
Builtin::kShadowRealmPrototypeEvaluate, 1, true);
@@ -4475,15 +4517,38 @@ void Genesis::InitializeGlobal_harmony_shadow_realm() {
Builtin::kShadowRealmPrototypeImportValue, 2, true);
{ // --- W r a p p e d F u n c t i o n
- Handle<Map> map = factory()->NewMap(JS_WRAPPED_FUNCTION_TYPE,
- JSWrappedFunction::kHeaderSize,
- TERMINAL_FAST_ELEMENTS_KIND, 0);
+ Handle<Map> map = factory->NewMap(JS_WRAPPED_FUNCTION_TYPE,
+ JSWrappedFunction::kHeaderSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 0);
map->SetConstructor(native_context()->object_function());
map->set_is_callable(true);
Handle<JSObject> empty_function(native_context()->function_prototype(),
isolate());
Map::SetPrototype(isolate(), map, empty_function);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ Map::EnsureDescriptorSlack(isolate_, map, 2);
+ { // length
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex ==
+ 0);
+ Descriptor d = Descriptor::AccessorConstant(
+ factory->length_string(), factory->wrapped_function_length_accessor(),
+ roc_attribs);
+ map->AppendDescriptor(isolate(), &d);
+ }
+
+ { // name
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex ==
+ 1);
+ Descriptor d = Descriptor::AccessorConstant(
+ factory->name_string(), factory->wrapped_function_name_accessor(),
+ roc_attribs);
+ map->AppendDescriptor(isolate(), &d);
+ }
+
native_context()->set_wrapped_function_map(*map);
}
}
@@ -5954,24 +6019,29 @@ bool Genesis::InstallExtension(Isolate* isolate,
return false;
}
}
- bool result = CompileExtension(isolate, extension);
- if (!result) {
+ if (!CompileExtension(isolate, extension)) {
// If this failed, it either threw an exception, or the isolate is
// terminating.
DCHECK(isolate->has_pending_exception() ||
(isolate->has_scheduled_exception() &&
isolate->scheduled_exception() ==
ReadOnlyRoots(isolate).termination_exception()));
- // We print out the name of the extension that fail to install.
- // When an error is thrown during bootstrapping we automatically print
- // the line number at which this happened to the console in the isolate
- // error throwing functionality.
- base::OS::PrintError("Error installing extension '%s'.\n",
- current->extension()->name());
- isolate->clear_pending_exception();
+ if (isolate->has_pending_exception()) {
+ // We print out the name of the extension that fail to install.
+ // When an error is thrown during bootstrapping we automatically print
+ // the line number at which this happened to the console in the isolate
+ // error throwing functionality.
+ base::OS::PrintError("Error installing extension '%s'.\n",
+ current->extension()->name());
+ isolate->clear_pending_exception();
+ }
+ return false;
}
+
+ DCHECK(!isolate->has_pending_exception() &&
+ !isolate->has_scheduled_exception());
extension_states->set_state(current, INSTALLED);
- return result;
+ return true;
}
bool Genesis::ConfigureGlobalObject(
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index edac725e8e..87baefd277 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -69,7 +69,7 @@ void AdvanceStartupState(V8StartupState expected_next_state) {
// isolate->Dispose();
// v8::V8::Dispose();
// v8::V8::DisposePlatform();
- FATAL("Wrong intialization order: got %d expected %d!",
+ FATAL("Wrong initialization order: got %d expected %d!",
static_cast<int>(current_state), static_cast<int>(next_state));
}
if (!v8_startup_state_.compare_exchange_strong(current_state, next_state)) {
@@ -135,11 +135,8 @@ void V8::Initialize() {
// Update logging information before enforcing flag implications.
bool* log_all_flags[] = {&FLAG_turbo_profiling_log_builtins,
&FLAG_log_all,
- &FLAG_log_api,
&FLAG_log_code,
&FLAG_log_code_disassemble,
- &FLAG_log_handles,
- &FLAG_log_suspect,
&FLAG_log_source_code,
&FLAG_log_function_events,
&FLAG_log_internal_timer_events,
@@ -162,6 +159,10 @@ void V8::Initialize() {
// Profiling flags depend on logging.
FLAG_log |= FLAG_perf_prof || FLAG_perf_basic_prof || FLAG_ll_prof ||
FLAG_prof || FLAG_prof_cpp;
+ FLAG_log |= FLAG_gdbjit;
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+ FLAG_log |= FLAG_enable_system_instrumentation;
+#endif
}
FlagList::EnforceFlagImplications();
diff --git a/deps/v8/src/init/v8.h b/deps/v8/src/init/v8.h
index 3f8fe14b45..8184e16b03 100644
--- a/deps/v8/src/init/v8.h
+++ b/deps/v8/src/init/v8.h
@@ -24,6 +24,8 @@ class V8 : public AllStatic {
// Report process out of memory. Implementation found in api.cc.
// This function will not return, but will terminate the execution.
+ // IMPORTANT: Update the Google-internal crash processer if this signature
+ // changes to be able to extract detailed v8::internal::HeapStats on OOM.
[[noreturn]] static void FatalProcessOutOfMemory(Isolate* isolate,
const char* location,
bool is_heap_oom = false);
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 733fab3dff..02487acfc2 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -150,6 +150,8 @@ v8_source_set("inspector") {
"v8-stack-trace-impl.h",
"v8-value-utils.cc",
"v8-value-utils.h",
+ "v8-webdriver-serializer.cc",
+ "v8-webdriver-serializer.h",
"value-mirror.cc",
"value-mirror.h",
]
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 8349a4995a..3a146c4a5e 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -249,6 +249,8 @@ class InjectedScript::ProtocolPromiseHandler {
// we try to capture a fresh stack trace.
if (maybeMessage.ToLocal(&message)) {
v8::Local<v8::Value> exception = result;
+ session->inspector()->client()->dispatchError(scope.context(), message,
+ exception);
protocol::PtrMaybe<protocol::Runtime::ExceptionDetails> exceptionDetails;
response = scope.injectedScript()->createExceptionDetails(
message, exception, m_objectGroup, &exceptionDetails);
@@ -572,6 +574,14 @@ Response InjectedScript::wrapObjectMirror(
&customPreview);
if (customPreview) (*result)->setCustomPreview(std::move(customPreview));
}
+ if (wrapMode == WrapMode::kGenerateWebDriverValue) {
+ int maxDepth = 1;
+ std::unique_ptr<protocol::Runtime::WebDriverValue> webDriverValue;
+ response = mirror.buildWebDriverValue(context, maxDepth, &webDriverValue);
+ if (!response.IsSuccess()) return response;
+ (*result)->setWebDriverValue(std::move(webDriverValue));
+ }
+
return Response::Success();
}
@@ -848,6 +858,8 @@ Response InjectedScript::wrapEvaluateResult(
return Response::ServerError("Execution was terminated");
}
v8::Local<v8::Value> exception = tryCatch.Exception();
+ m_context->inspector()->client()->dispatchError(
+ m_context->context(), tryCatch.Message(), exception);
Response response =
wrapObject(exception, objectGroup,
exception->IsNativeError() ? WrapMode::kNoPreview
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index b87c099513..638df79637 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -31,7 +31,12 @@ class V8RuntimeAgentImpl;
class V8StackTraceImpl;
struct V8StackTraceId;
-enum class WrapMode { kForceValue, kNoPreview, kWithPreview };
+enum class WrapMode {
+ kForceValue,
+ kNoPreview,
+ kWithPreview,
+ kGenerateWebDriverValue
+};
using protocol::Response;
using TerminateExecutionCallback =
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 96cc6c5a30..6357370dc0 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -256,6 +256,7 @@ void V8RuntimeAgentImpl::evaluate(
Maybe<bool> maybeAwaitPromise, Maybe<bool> throwOnSideEffect,
Maybe<double> timeout, Maybe<bool> disableBreaks, Maybe<bool> maybeReplMode,
Maybe<bool> allowUnsafeEvalBlockedByCSP, Maybe<String16> uniqueContextId,
+ Maybe<bool> generateWebDriverValue,
std::unique_ptr<EvaluateCallback> callback) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
"EvaluateScript");
@@ -319,20 +320,23 @@ void V8RuntimeAgentImpl::evaluate(
return;
}
- WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
- : WrapMode::kNoPreview;
- if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
+ WrapMode wrap_mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview;
+ if (returnByValue.fromMaybe(false)) wrap_mode = WrapMode::kForceValue;
+ if (generateWebDriverValue.fromMaybe(false))
+ wrap_mode = WrapMode::kGenerateWebDriverValue;
// REPL mode always returns a promise that must be awaited.
const bool await = replMode || maybeAwaitPromise.fromMaybe(false);
if (!await || scope.tryCatch().HasCaught()) {
wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
- scope.tryCatch(), objectGroup.fromMaybe(""), mode,
- callback.get());
+ scope.tryCatch(), objectGroup.fromMaybe(""),
+ wrap_mode, callback.get());
return;
}
scope.injectedScript()->addPromiseCallback(
- m_session, maybeResultValue, objectGroup.fromMaybe(""), mode, replMode,
+ m_session, maybeResultValue, objectGroup.fromMaybe(""), wrap_mode,
+ replMode,
EvaluateCallbackWrapper<EvaluateCallback>::wrap(std::move(callback)));
}
@@ -366,7 +370,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
Maybe<bool> userGesture, Maybe<bool> awaitPromise,
Maybe<int> executionContextId, Maybe<String16> objectGroup,
- Maybe<bool> throwOnSideEffect,
+ Maybe<bool> throwOnSideEffect, Maybe<bool> generateWebDriverValue,
std::unique_ptr<CallFunctionOnCallback> callback) {
if (objectId.isJust() && executionContextId.isJust()) {
callback->sendFailure(Response::ServerError(
@@ -378,9 +382,11 @@ void V8RuntimeAgentImpl::callFunctionOn(
"Either ObjectId or executionContextId must be specified"));
return;
}
- WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
- : WrapMode::kNoPreview;
- if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
+ WrapMode wrap_mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview;
+ if (returnByValue.fromMaybe(false)) wrap_mode = WrapMode::kForceValue;
+ if (generateWebDriverValue.fromMaybe(false))
+ wrap_mode = WrapMode::kGenerateWebDriverValue;
if (objectId.isJust()) {
InjectedScript::ObjectScope scope(m_session, objectId.fromJust());
Response response = scope.initialize();
@@ -390,7 +396,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
}
innerCallFunctionOn(
m_session, scope, scope.object(), expression,
- std::move(optionalArguments), silent.fromMaybe(false), mode,
+ std::move(optionalArguments), silent.fromMaybe(false), wrap_mode,
userGesture.fromMaybe(false), awaitPromise.fromMaybe(false),
objectGroup.isJust() ? objectGroup.fromMaybe(String16())
: scope.objectGroupName(),
@@ -412,7 +418,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
}
innerCallFunctionOn(
m_session, scope, scope.context()->Global(), expression,
- std::move(optionalArguments), silent.fromMaybe(false), mode,
+ std::move(optionalArguments), silent.fromMaybe(false), wrap_mode,
userGesture.fromMaybe(false), awaitPromise.fromMaybe(false),
objectGroup.fromMaybe(""), throwOnSideEffect.fromMaybe(false),
std::move(callback));
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index ca46de5ec4..43b5ea009e 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -75,6 +75,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<double> timeout, Maybe<bool> disableBreaks,
Maybe<bool> replMode, Maybe<bool> allowUnsafeEvalBlockedByCSP,
Maybe<String16> uniqueContextId,
+ Maybe<bool> generateWebDriverValue,
std::unique_ptr<EvaluateCallback>) override;
void awaitPromise(const String16& promiseObjectId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview,
@@ -86,6 +87,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<bool> generatePreview, Maybe<bool> userGesture,
Maybe<bool> awaitPromise, Maybe<int> executionContextId,
Maybe<String16> objectGroup, Maybe<bool> throwOnSideEffect,
+ Maybe<bool> generateWebDriverValue,
std::unique_ptr<CallFunctionOnCallback>) override;
Response releaseObject(const String16& objectId) override;
Response getProperties(
diff --git a/deps/v8/src/inspector/v8-webdriver-serializer.cc b/deps/v8/src/inspector/v8-webdriver-serializer.cc
new file mode 100644
index 0000000000..8139fe565a
--- /dev/null
+++ b/deps/v8/src/inspector/v8-webdriver-serializer.cc
@@ -0,0 +1,375 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-webdriver-serializer.h"
+
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-date.h"
+#include "include/v8-exception.h"
+#include "include/v8-regexp.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/value-mirror.h"
+
+namespace v8_inspector {
+
+using protocol::Response;
+// private
+protocol::Response _serializeRecursively(
+ v8::Local<v8::Value> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Value>* result) {
+ std::unique_ptr<ValueMirror> mirror = ValueMirror::create(context, value);
+ std::unique_ptr<protocol::Runtime::WebDriverValue> webDriver_value;
+ Response response =
+ mirror->buildWebDriverValue(context, max_depth - 1, &webDriver_value);
+ if (!response.IsSuccess()) return response;
+ if (!webDriver_value) return Response::InternalError();
+
+ std::unique_ptr<protocol::DictionaryValue> result_dict =
+ protocol::DictionaryValue::create();
+
+ result_dict->setValue(
+ protocol::String("type"),
+ protocol::StringValue::create(webDriver_value->getType()));
+ if (webDriver_value->hasValue())
+ result_dict->setValue(protocol::String("value"),
+ webDriver_value->getValue(nullptr)->clone());
+
+ (*result) = std::move(result_dict);
+ return Response::Success();
+}
+
+String16 descriptionForObject(v8::Isolate* isolate,
+ v8::Local<v8::Object> object) {
+ return toProtocolString(isolate, object->GetConstructorName());
+}
+
+String16 descriptionForDate(v8::Local<v8::Context> context,
+ v8::Local<v8::Date> date) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::String> description;
+ if (!date->ToString(context).ToLocal(&description)) {
+ return descriptionForObject(isolate, date);
+ }
+ return toProtocolString(isolate, description);
+}
+
+String16 _descriptionForRegExpFlags(v8::Local<v8::RegExp> value) {
+ String16Builder result_string_builder;
+ v8::RegExp::Flags flags = value->GetFlags();
+ if (flags & v8::RegExp::Flags::kHasIndices) result_string_builder.append('d');
+ if (flags & v8::RegExp::Flags::kGlobal) result_string_builder.append('g');
+ if (flags & v8::RegExp::Flags::kIgnoreCase) result_string_builder.append('i');
+ if (flags & v8::RegExp::Flags::kLinear) result_string_builder.append('l');
+ if (flags & v8::RegExp::Flags::kMultiline) result_string_builder.append('m');
+ if (flags & v8::RegExp::Flags::kDotAll) result_string_builder.append('s');
+ if (flags & v8::RegExp::Flags::kUnicode) result_string_builder.append('u');
+ if (flags & v8::RegExp::Flags::kSticky) result_string_builder.append('y');
+ return result_string_builder.toString();
+}
+
+protocol::Response _serializeRegexp(
+ v8::Local<v8::RegExp> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Regexp)
+ .build();
+
+ std::unique_ptr<protocol::DictionaryValue> result_value =
+ protocol::DictionaryValue::create();
+
+ result_value->setValue(protocol::String("pattern"),
+ protocol::StringValue::create(toProtocolString(
+ context->GetIsolate(), value->GetSource())));
+
+ String16 flags = _descriptionForRegExpFlags(value);
+ if (!flags.isEmpty())
+ result_value->setValue(protocol::String("flags"),
+ protocol::StringValue::create(flags));
+
+ (*result)->setValue(std::move(result_value));
+ return Response::Success();
+}
+
+protocol::Response _serializeDate(
+ v8::Local<v8::Date> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Date)
+ .build();
+
+ (*result)->setValue(protocol::StringValue::create(
+ descriptionForDate(context, value.As<v8::Date>())));
+ return Response::Success();
+}
+
+protocol::Response _serializeArrayValue(
+ v8::Local<v8::Array> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Value>* result) {
+ std::unique_ptr<protocol::ListValue> result_value =
+ protocol::ListValue::create();
+ uint32_t length = value->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> element_value;
+ std::unique_ptr<protocol::Value> element_protocol_value;
+ if (!value->Get(context, i).ToLocal(&element_value))
+ return Response::InternalError();
+
+ Response response = _serializeRecursively(element_value, context, max_depth,
+ &element_protocol_value);
+ if (!response.IsSuccess()) return response;
+
+ result_value->pushValue(std::move(element_protocol_value));
+ }
+ *result = std::move(result_value);
+ return Response::Success();
+}
+
+protocol::Response _serializeArray(
+ v8::Local<v8::Array> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Array)
+ .build();
+
+ if (max_depth <= 0) return Response::Success();
+
+ std::unique_ptr<protocol::Value> result_value;
+ Response response =
+ _serializeArrayValue(value, context, max_depth, &result_value);
+ if (!response.IsSuccess()) return response;
+
+ (*result)->setValue(std::move(result_value));
+ return Response::Success();
+}
+
+protocol::Response _serializeMap(
+ v8::Local<v8::Map> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Map)
+ .build();
+
+ if (max_depth <= 0) return Response::Success();
+
+ std::unique_ptr<protocol::ListValue> result_value =
+ protocol::ListValue::create();
+
+ v8::Local<v8::Array> properties_and_values = value->AsArray();
+
+ uint32_t length = properties_and_values->Length();
+ for (uint32_t i = 0; i < length; i += 2) {
+ v8::Local<v8::Value> key_value, property_value;
+ std::unique_ptr<protocol::Value> key_protocol_value,
+ property_protocol_value;
+
+ if (!properties_and_values->Get(context, i).ToLocal(&key_value))
+ return Response::InternalError();
+ if (!properties_and_values->Get(context, i + 1).ToLocal(&property_value))
+ return Response::InternalError();
+ if (property_value->IsUndefined()) continue;
+
+ if (key_value->IsString()) {
+ key_protocol_value = protocol::StringValue::create(
+ toProtocolString(context->GetIsolate(), key_value.As<v8::String>()));
+ } else {
+ Response response = _serializeRecursively(key_value, context, max_depth,
+ &key_protocol_value);
+ if (!response.IsSuccess()) return response;
+ }
+
+ Response response = _serializeRecursively(
+ property_value, context, max_depth, &property_protocol_value);
+ if (!response.IsSuccess()) return response;
+
+ std::unique_ptr<protocol::ListValue> value_list =
+ protocol::ListValue::create();
+
+ // command->pushValue(protocol::StringValue::create(method));
+ value_list->pushValue(std::move(key_protocol_value));
+ value_list->pushValue(std::move(property_protocol_value));
+
+ result_value->pushValue(std::move(value_list));
+ }
+
+ (*result)->setValue(std::move(result_value));
+ return Response::Success();
+}
+
+protocol::Response _serializeSet(
+ v8::Local<v8::Set> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Set)
+ .build();
+
+ if (max_depth <= 0) return Response::Success();
+
+ std::unique_ptr<protocol::Value> result_value;
+ Response response =
+ _serializeArrayValue(value->AsArray(), context, max_depth, &result_value);
+ if (!response.IsSuccess()) return response;
+
+ (*result)->setValue(std::move(result_value));
+ return Response::Success();
+}
+
+protocol::Response _serializeObjectValue(
+ v8::Local<v8::Object> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Value>* result) {
+ std::unique_ptr<protocol::ListValue> result_list =
+ protocol::ListValue::create();
+ // Iterate through object's properties.
+ v8::Local<v8::Array> property_names;
+ if (!value->GetOwnPropertyNames(context).ToLocal(&property_names))
+ return Response::InternalError();
+ uint32_t length = property_names->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> key_value, property_value;
+ std::unique_ptr<protocol::Value> key_protocol_value,
+ property_protocol_value;
+
+ if (!property_names->Get(context, i).ToLocal(&key_value))
+ return Response::InternalError();
+
+ if (key_value->IsString()) {
+ v8::Maybe<bool> hasRealNamedProperty =
+ value->HasRealNamedProperty(context, key_value.As<v8::String>());
+ // Don't access properties with interceptors.
+ if (hasRealNamedProperty.IsNothing() || !hasRealNamedProperty.FromJust())
+ continue;
+ key_protocol_value = protocol::StringValue::create(
+ toProtocolString(context->GetIsolate(), key_value.As<v8::String>()));
+ } else {
+ Response response = _serializeRecursively(key_value, context, max_depth,
+ &key_protocol_value);
+ if (!response.IsSuccess()) return response;
+ }
+
+ if (!value->Get(context, key_value).ToLocal(&property_value))
+ return Response::InternalError();
+ if (property_value->IsUndefined()) continue;
+
+ Response response = _serializeRecursively(
+ property_value, context, max_depth, &property_protocol_value);
+ if (!response.IsSuccess()) return response;
+
+ std::unique_ptr<protocol::ListValue> value_list =
+ protocol::ListValue::create();
+
+ value_list->pushValue(std::move(key_protocol_value));
+ value_list->pushValue(std::move(property_protocol_value));
+
+ result_list->pushValue(std::move(value_list));
+ }
+ (*result) = std::move(result_list);
+ return Response::Success();
+}
+
+protocol::Response _serializeObject(
+ v8::Local<v8::Object> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Object)
+ .build();
+
+ if (max_depth <= 0) return Response::Success();
+
+ std::unique_ptr<protocol::Value> result_value;
+ Response response = _serializeObjectValue(value.As<v8::Object>(), context,
+ max_depth, &result_value);
+ if (!response.IsSuccess()) return response;
+
+ (*result)->setValue(std::move(result_value));
+ return Response::Success();
+}
+
+protocol::Response V8WebDriverSerializer::serializeV8Value(
+ v8::Local<v8::Object> value, v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) {
+ if (value->IsArray()) {
+ Response response =
+ _serializeArray(value.As<v8::Array>(), context, max_depth, result);
+ return response;
+ }
+ if (value->IsRegExp()) {
+ Response response =
+ _serializeRegexp(value.As<v8::RegExp>(), context, max_depth, result);
+ return response;
+ }
+ if (value->IsDate()) {
+ Response response =
+ _serializeDate(value.As<v8::Date>(), context, max_depth, result);
+ return response;
+ }
+ if (value->IsMap()) {
+ Response response =
+ _serializeMap(value.As<v8::Map>(), context, max_depth, result);
+ return response;
+ }
+ if (value->IsSet()) {
+ Response response =
+ _serializeSet(value.As<v8::Set>(), context, max_depth, result);
+ return response;
+ }
+ if (value->IsWeakMap()) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Weakmap)
+ .build();
+ return Response::Success();
+ }
+ if (value->IsWeakSet()) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Weakset)
+ .build();
+ return Response::Success();
+ }
+ if (value->IsNativeError()) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Error)
+ .build();
+ return Response::Success();
+ }
+ if (value->IsProxy()) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Proxy)
+ .build();
+ return Response::Success();
+ }
+ if (value->IsPromise()) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Promise)
+ .build();
+ return Response::Success();
+ }
+ if (value->IsTypedArray()) {
+ *result =
+ protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Typedarray)
+ .build();
+ return Response::Success();
+ }
+ if (value->IsArrayBuffer()) {
+ *result =
+ protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Arraybuffer)
+ .build();
+ return Response::Success();
+ }
+ if (value->IsFunction()) {
+ *result =
+ protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Function)
+ .build();
+ return Response::Success();
+ }
+
+ // Serialize as an Object.
+ Response response =
+ _serializeObject(value.As<v8::Object>(), context, max_depth, result);
+ return response;
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-webdriver-serializer.h b/deps/v8/src/inspector/v8-webdriver-serializer.h
new file mode 100644
index 0000000000..02443bed07
--- /dev/null
+++ b/deps/v8/src/inspector/v8-webdriver-serializer.h
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8_WEBDRIVER_SERIALIZER_H_
+#define V8_INSPECTOR_V8_WEBDRIVER_SERIALIZER_H_
+
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-regexp.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/v8-value-utils.h"
+
+namespace v8_inspector {
+class V8WebDriverSerializer {
+ public:
+ static protocol::Response serializeV8Value(
+ v8::Local<v8::Object> value, v8::Local<v8::Context> context,
+ int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result);
+};
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8_WEBDRIVER_SERIALIZER_H_
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 62514cdcc1..c2fa9b46cc 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -21,6 +21,7 @@
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/v8-value-utils.h"
+#include "src/inspector/v8-webdriver-serializer.h"
namespace v8_inspector {
@@ -209,10 +210,9 @@ String16 descriptionForSymbol(v8::Local<v8::Context> context,
String16 descriptionForBigInt(v8::Local<v8::Context> context,
v8::Local<v8::BigInt> value) {
v8::Isolate* isolate = context->GetIsolate();
- v8::TryCatch tryCatch(isolate);
- v8::Local<v8::String> description;
- if (!value->ToString(context).ToLocal(&description)) return String16();
- return toProtocolString(isolate, description) + "n";
+ v8::Local<v8::String> description =
+ v8::debug::GetBigIntDescription(isolate, value);
+ return toProtocolString(isolate, description);
}
String16 descriptionForPrimitiveType(v8::Local<v8::Context> context,
@@ -309,11 +309,7 @@ String16 descriptionForObject(v8::Isolate* isolate,
String16 descriptionForDate(v8::Local<v8::Context> context,
v8::Local<v8::Date> date) {
v8::Isolate* isolate = context->GetIsolate();
- v8::TryCatch tryCatch(isolate);
- v8::Local<v8::String> description;
- if (!date->ToString(context).ToLocal(&description)) {
- return descriptionForObject(isolate, date);
- }
+ v8::Local<v8::String> description = v8::debug::GetDateDescription(date);
return toProtocolString(isolate, description);
}
@@ -443,6 +439,46 @@ class PrimitiveValueMirror final : public ValueMirror {
(*preview)->setSubtype(RemoteObject::SubtypeEnum::Null);
}
+ protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result)
+ const override {
+ // https://w3c.github.io/webdriver-bidi/#data-types-protocolValue-primitiveProtocolValue-serialization
+
+ if (m_value->IsUndefined()) {
+ *result =
+ protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Undefined)
+ .build();
+ return Response::Success();
+ }
+ if (m_value->IsNull()) {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Null)
+ .build();
+ return Response::Success();
+ }
+ if (m_value->IsString()) {
+ *result =
+ protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::String)
+ .setValue(protocol::StringValue::create(toProtocolString(
+ context->GetIsolate(), m_value.As<v8::String>())))
+ .build();
+ return Response::Success();
+ }
+ if (m_value->IsBoolean()) {
+ *result =
+ protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Boolean)
+ .setValue(protocol::FundamentalValue::create(
+ m_value.As<v8::Boolean>()->Value()))
+ .build();
+ return Response::Success();
+ }
+ return Response::ServerError("unexpected primitive type");
+ }
+
private:
v8::Local<v8::Value> m_value;
String16 m_type;
@@ -493,6 +529,25 @@ class NumberMirror final : public ValueMirror {
.build();
}
+ protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result)
+ const override {
+ // https://w3c.github.io/webdriver-bidi/#data-types-protocolValue-primitiveProtocolValue-serialization
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Number)
+ .build();
+
+ bool unserializable = false;
+ String16 descriptionValue = description(&unserializable);
+ if (unserializable) {
+ (*result)->setValue(protocol::StringValue::create(descriptionValue));
+ } else {
+ (*result)->setValue(toProtocolValue(m_value.As<v8::Number>()->Value()));
+ }
+ return Response::Success();
+ }
+
private:
String16 description(bool* unserializable) const {
*unserializable = true;
@@ -520,7 +575,7 @@ class BigIntMirror final : public ValueMirror {
*result = RemoteObject::create()
.setType(RemoteObject::TypeEnum::Bigint)
.setUnserializableValue(description)
- .setDescription(description)
+ .setDescription(abbreviateString(description, kMiddle))
.build();
return Response::Success();
}
@@ -544,7 +599,8 @@ class BigIntMirror final : public ValueMirror {
*preview =
ObjectPreview::create()
.setType(RemoteObject::TypeEnum::Bigint)
- .setDescription(descriptionForBigInt(context, m_value))
+ .setDescription(abbreviateString(
+ descriptionForBigInt(context, m_value), kMiddle))
.setOverflow(false)
.setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
.build();
@@ -552,6 +608,20 @@ class BigIntMirror final : public ValueMirror {
v8::Local<v8::Value> v8Value() const override { return m_value; }
+ protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result)
+ const override {
+ // https://w3c.github.io/webdriver-bidi/#data-types-protocolValue-primitiveProtocolValue-serialization
+
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Bigint)
+ .setValue(protocol::StringValue::create(
+ descriptionForBigInt(context, m_value)))
+ .build();
+ return Response::Success();
+ }
+
private:
v8::Local<v8::BigInt> m_value;
};
@@ -588,6 +658,17 @@ class SymbolMirror final : public ValueMirror {
v8::Local<v8::Value> v8Value() const override { return m_symbol; }
+ protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result)
+ const override {
+ // https://w3c.github.io/webdriver-bidi/#data-types-protocolValue-RemoteValue-serialization
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Symbol)
+ .build();
+ return Response::Success();
+ }
+
private:
v8::Local<v8::Symbol> m_symbol;
};
@@ -632,6 +713,16 @@ class LocationMirror final : public ValueMirror {
}
v8::Local<v8::Value> v8Value() const override { return m_value; }
+ protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result)
+ const override {
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Object)
+ .build();
+ return Response::Success();
+ }
+
private:
static std::unique_ptr<LocationMirror> create(v8::Local<v8::Value> value,
int scriptId, int lineNumber,
@@ -709,6 +800,18 @@ class FunctionMirror final : public ValueMirror {
.build();
}
+ protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result)
+ const override {
+ // https://w3c.github.io/webdriver-bidi/#data-types-protocolValue-RemoteValue-serialization
+ *result =
+ protocol::Runtime::WebDriverValue::create()
+ .setType(protocol::Runtime::WebDriverValue::TypeEnum::Function)
+ .build();
+ return Response::Success();
+ }
+
private:
v8::Local<v8::Function> m_value;
};
@@ -994,6 +1097,41 @@ class ObjectMirror final : public ValueMirror {
if (m_hasSubtype) (*result)->setSubtype(m_subtype);
}
+ protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result)
+ const override {
+ // https://w3c.github.io/webdriver-bidi/#data-types-protocolValue-RemoteValue-serialization
+
+ // Check if embedder implemented custom serialization.
+ std::unique_ptr<v8_inspector::WebDriverValue> embedder_serialized_result =
+ clientFor(context)->serializeToWebDriverValue(m_value, max_depth);
+
+ if (embedder_serialized_result) {
+ // Embedder-implemented serialization.
+ *result = protocol::Runtime::WebDriverValue::create()
+ .setType(toString16(embedder_serialized_result->type))
+ .build();
+
+ if (!embedder_serialized_result->value.IsEmpty()) {
+ // Embedder-implemented serialization has value.
+ std::unique_ptr<protocol::Value> protocol_value;
+ Response response = toProtocolValue(
+ context, embedder_serialized_result->value.ToLocalChecked(),
+ &protocol_value);
+ if (!response.IsSuccess()) return response;
+
+ (*result)->setValue(std::move(protocol_value));
+ }
+ return Response::Success();
+ }
+
+ // No embedder-implemented serialization. Serialize as V8 Object.
+ Response response = V8WebDriverSerializer::serializeV8Value(
+ m_value, context, max_depth, result);
+ return response;
+ }
+
private:
void buildObjectPreviewInternal(
v8::Local<v8::Context> context, bool forEntry,
diff --git a/deps/v8/src/inspector/value-mirror.h b/deps/v8/src/inspector/value-mirror.h
index b487d51b7d..dc643748f1 100644
--- a/deps/v8/src/inspector/value-mirror.h
+++ b/deps/v8/src/inspector/value-mirror.h
@@ -66,6 +66,9 @@ class ValueMirror {
v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
std::unique_ptr<protocol::Runtime::ObjectPreview>*) const {}
virtual v8::Local<v8::Value> v8Value() const = 0;
+ virtual protocol::Response buildWebDriverValue(
+ v8::Local<v8::Context> context, int max_depth,
+ std::unique_ptr<protocol::Runtime::WebDriverValue>* result) const = 0;
class PropertyAccumulator {
public:
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 9c9c72d476..6d3fe7e76e 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -3952,10 +3952,7 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
DCHECK(!property->IsSuperAccess());
AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
Register object = VisitForRegisterValue(property->obj());
- Register key =
- assign_type == PRIVATE_GETTER_ONLY || assign_type == PRIVATE_METHOD
- ? Register()
- : VisitForRegisterValue(property->key());
+ Register key = VisitForRegisterValue(property->key());
return AssignmentLhsData::PrivateMethodOrAccessor(assign_type, property,
object, key);
}
@@ -4588,23 +4585,21 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
lhs_data.super_property_args().Truncate(3));
break;
}
+ // BuildAssignment() will throw an error about the private method being
+ // read-only.
case PRIVATE_METHOD: {
- // The property access is invalid, but if the brand check fails too, we
- // need to return the error from the brand check.
Property* property = lhs_data.expr()->AsProperty();
BuildPrivateBrandCheck(property, lhs_data.object());
- BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateMethodWrite,
- lhs_data.expr()->AsProperty());
+ builder()->LoadAccumulatorWithRegister(lhs_data.key());
break;
}
- case PRIVATE_GETTER_ONLY: {
- // The property access is invalid, but if the brand check fails too, we
- // need to return the error from the brand check.
+ // For read-only properties, BuildAssignment() will throw an error about
+ // the missing setter.
+ case PRIVATE_GETTER_ONLY:
+ case PRIVATE_GETTER_AND_SETTER: {
Property* property = lhs_data.expr()->AsProperty();
BuildPrivateBrandCheck(property, lhs_data.object());
- BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateSetterAccess,
- lhs_data.expr()->AsProperty());
-
+ BuildPrivateGetterAccess(lhs_data.object(), lhs_data.key());
break;
}
case PRIVATE_SETTER_ONLY: {
@@ -4616,12 +4611,6 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
lhs_data.expr()->AsProperty());
break;
}
- case PRIVATE_GETTER_AND_SETTER: {
- Property* property = lhs_data.expr()->AsProperty();
- BuildPrivateBrandCheck(property, lhs_data.object());
- BuildPrivateGetterAccess(lhs_data.object(), lhs_data.key());
- break;
- }
}
BinaryOperation* binop = expr->binary_operation();
@@ -4651,6 +4640,7 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
builder()->BinaryOperation(binop->op(), old_value, feedback_index(slot));
}
builder()->SetExpressionPosition(expr);
+
BuildAssignment(lhs_data, expr->op(), expr->lookup_hoisting_mode());
builder()->Bind(&short_circuit);
}
@@ -6167,6 +6157,29 @@ void BytecodeGenerator::BuildLiteralCompareNil(
}
}
+void BytecodeGenerator::BuildLiteralStrictCompareBoolean(Literal* literal) {
+ DCHECK(literal->IsBooleanLiteral());
+ if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+ if (literal->AsBooleanLiteral()) {
+ builder()->JumpIfTrue(ToBooleanMode::kAlreadyBoolean,
+ test_result->NewThenLabel());
+ } else {
+ builder()->JumpIfFalse(ToBooleanMode::kAlreadyBoolean,
+ test_result->NewThenLabel());
+ }
+ if (test_result->fallthrough() != TestFallthrough::kElse) {
+ builder()->Jump(test_result->NewElseLabel());
+ }
+ test_result->SetResultConsumedByTest();
+ } else {
+ Register result = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(result);
+ builder()->LoadBoolean(literal->AsBooleanLiteral());
+ builder()->CompareReference(result);
+ }
+}
+
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Expression* sub_expr;
Literal* literal;
@@ -6182,6 +6195,11 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} else {
builder()->CompareTypeOf(literal_flag);
}
+ } else if (expr->IsLiteralStrictCompareBoolean(&sub_expr, &literal)) {
+ DCHECK(expr->op() == Token::EQ_STRICT);
+ VisitForAccumulatorValue(sub_expr);
+ builder()->SetExpressionPosition(expr);
+ BuildLiteralStrictCompareBoolean(literal);
} else if (expr->IsLiteralCompareUndefined(&sub_expr)) {
VisitForAccumulatorValue(sub_expr);
builder()->SetExpressionPosition(expr);
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 8fae6c077a..075578433c 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -128,7 +128,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return object_;
}
Register key() const {
- DCHECK(assign_type_ == KEYED_PROPERTY ||
+ DCHECK(assign_type_ == KEYED_PROPERTY || assign_type_ == PRIVATE_METHOD ||
+ assign_type_ == PRIVATE_GETTER_ONLY ||
assign_type_ == PRIVATE_SETTER_ONLY ||
assign_type_ == PRIVATE_GETTER_AND_SETTER);
return key_;
@@ -262,6 +263,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal);
void BuildLiteralCompareNil(Token::Value compare_op,
BytecodeArrayBuilder::NilValue nil);
+ void BuildLiteralStrictCompareBoolean(Literal* literal);
void BuildReturn(int source_position);
void BuildAsyncReturn(int source_position);
void BuildAsyncGeneratorReturn();
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index cb8fc81b70..6e47f14d05 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -124,6 +124,8 @@ std::string Register::ToString() const {
return std::string("<context>");
} else if (is_function_closure()) {
return std::string("<closure>");
+ } else if (*this == virtual_accumulator()) {
+ return std::string("<accumulator>");
} else if (is_parameter()) {
int parameter_index = ToParameterIndex();
if (parameter_index == 0) {
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 11640bcf3c..9d9465a676 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -77,12 +77,15 @@ void LoopBuilder::JumpToHeader(int loop_depth, LoopBuilder* const parent_loop) {
// they are a nested inner loop too, a Jump to its parent's JumpToHeader.
parent_loop->JumpToLoopEnd();
} else {
- // Pass the proper loop nesting level to the backwards branch, to trigger
- // on-stack replacement when armed for the given loop nesting depth.
- int level = std::min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
- // Loop must have closed form, i.e. all loop elements are within the loop,
- // the loop header precedes the body and next elements in the loop.
- builder()->JumpLoop(&loop_header_, level, source_position_);
+ // Pass the proper loop depth to the backwards branch for triggering OSR.
+ // For purposes of OSR, the loop depth is capped at `kMaxOsrUrgency - 1`.
+ // Once that urgency is reached, all loops become OSR candidates.
+ //
+ // The loop must have closed form, i.e. all loop elements are within the
+ // loop, the loop header precedes the body and next elements in the loop.
+ builder()->JumpLoop(&loop_header_,
+ std::min(loop_depth, BytecodeArray::kMaxOsrUrgency - 1),
+ source_position_);
}
}
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index e06053b628..415db8cb64 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -1307,16 +1307,18 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// length of the back-edge, so we just have to correct for the non-zero offset
// of the first bytecode.
- const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
TNode<Int32T> profiling_weight =
Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
Int32Constant(kFirstBytecodeOffset));
UpdateInterruptBudget(profiling_weight, true);
}
-TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
- return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
- BytecodeArray::kOsrLoopNestingLevelOffset);
+TNode<Int16T> InterpreterAssembler::LoadOsrUrgencyAndInstallTarget() {
+ // We're loading a 16-bit field, mask it.
+ return UncheckedCast<Int16T>(Word32And(
+ LoadObjectField<Int16T>(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
+ 0xFFFF));
}
void InterpreterAssembler::Abort(AbortReason abort_reason) {
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 9855dedda3..6f17fadd43 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -234,8 +234,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
- // Returns the OSR nesting level from the bytecode header.
- TNode<Int8T> LoadOsrNestingLevel();
+ // Returns the OSR urgency and install target from the bytecode header.
+ TNode<Int16T> LoadOsrUrgencyAndInstallTarget();
// Dispatch to the bytecode.
void Dispatch();
@@ -266,6 +266,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Perform OnStackReplacement.
void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump);
+ // The BytecodeOffset() is the offset from the ByteCodeArray pointer; to
+ // translate into runtime `BytecodeOffset` (defined in utils.h as the offset
+ // from the start of the bytecode section), this constant has to be applied.
+ static constexpr int kFirstBytecodeOffset =
+ BytecodeArray::kHeaderSize - kHeapObjectTag;
+
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
TNode<IntPtrT> BytecodeOffset();
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 47d0060700..50e9fff51e 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -1915,7 +1915,6 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, TrueConstant(), 0);
}
@@ -1926,7 +1925,6 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqualConstant(accumulator, TrueConstant(), 0);
}
@@ -1937,7 +1935,6 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, FalseConstant(), 0);
}
@@ -1948,7 +1945,6 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqualConstant(accumulator, FalseConstant(), 0);
}
@@ -2166,26 +2162,50 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
// JumpLoop <imm> <loop_depth>
//
// Jump by the number of bytes represented by the immediate operand |imm|. Also
-// performs a loop nesting check, a stack check, and potentially triggers OSR in
-// case the current OSR level matches (or exceeds) the specified |loop_depth|.
+// performs a loop nesting check, a stack check, and potentially triggers OSR.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
TNode<Int32T> loop_depth = BytecodeOperandImm(1);
- TNode<Int8T> osr_level = LoadOsrNestingLevel();
+ TNode<Int16T> osr_urgency_and_install_target =
+ LoadOsrUrgencyAndInstallTarget();
TNode<Context> context = GetContext();
- // Check if OSR points at the given {loop_depth} are armed by comparing it to
- // the current {osr_level} loaded from the header of the BytecodeArray.
- Label ok(this), osr_armed(this, Label::kDeferred);
- TNode<BoolT> condition = Int32GreaterThanOrEqual(loop_depth, osr_level);
- Branch(condition, &ok, &osr_armed);
+ // OSR requests can be triggered either through urgency (when > the current
+ // loop depth), or an explicit install target (= the lower bits of the
+ // targeted bytecode offset).
+ Label ok(this), maybe_osr(this, Label::kDeferred);
+ Branch(Int32GreaterThanOrEqual(loop_depth, osr_urgency_and_install_target),
+ &ok, &maybe_osr);
BIND(&ok);
// The backward jump can trigger a budget interrupt, which can handle stack
// interrupts, so we don't need to explicitly handle them here.
JumpBackward(relative_jump);
- BIND(&osr_armed);
+ BIND(&maybe_osr);
+ Label osr(this);
+ // OSR based on urgency, i.e. is the OSR urgency greater than the current
+ // loop depth?
+ STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
+ TNode<Word32T> osr_urgency = Word32And(osr_urgency_and_install_target,
+ BytecodeArray::OsrUrgencyBits::kMask);
+ GotoIf(Int32GreaterThan(osr_urgency, loop_depth), &osr);
+
+ // OSR based on the install target offset, i.e. does the current bytecode
+ // offset match the install target offset?
+ //
+ // if (((offset << kShift) & kMask) == (target & kMask)) { ... }
+ static constexpr int kShift = BytecodeArray::OsrInstallTargetBits::kShift;
+ static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
+ // Note: We OR in 1 to avoid 0 offsets, see Code::OsrInstallTargetFor.
+ TNode<Word32T> actual = Word32Or(
+ Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()), kFirstBytecodeOffset),
+ Int32Constant(1));
+ actual = Word32And(Word32Shl(UncheckedCast<Int32T>(actual), kShift), kMask);
+ TNode<Word32T> expected = Word32And(osr_urgency_and_install_target, kMask);
+ Branch(Word32Equal(actual, expected), &osr, &ok);
+
+ BIND(&osr);
OnStackReplacement(context, relative_jump);
}
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index e9b71d56df..1134459a25 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -333,7 +333,6 @@ namespace internal {
SC(sub_string_runtime, V8.SubStringRuntime) \
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
SC(stack_interrupts, V8.StackInterrupts) \
- SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index a0e2223412..e2de9db07c 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -44,12 +44,22 @@
#include "src/utils/memcopy.h"
#include "src/utils/version.h"
+#ifdef ENABLE_GDB_JIT_INTERFACE
+#include "src/diagnostics/gdb-jit.h"
+#endif // ENABLE_GDB_JIT_INTERFACE
+
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_OS_WIN
+#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+#include "src/diagnostics/system-jit-win.h"
+#endif
+#endif // V8_OS_WIN
+
namespace v8 {
namespace internal {
@@ -203,6 +213,7 @@ CodeEventLogger::~CodeEventLogger() = default;
void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
Handle<AbstractCode> code,
const char* comment) {
+ DCHECK(is_listening_to_code_events());
name_buffer_->Init(tag);
name_buffer_->AppendBytes(comment);
LogRecordedBuffer(code, MaybeHandle<SharedFunctionInfo>(),
@@ -212,6 +223,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
Handle<AbstractCode> code,
Handle<Name> name) {
+ DCHECK(is_listening_to_code_events());
name_buffer_->Init(tag);
name_buffer_->AppendName(*name);
LogRecordedBuffer(code, MaybeHandle<SharedFunctionInfo>(),
@@ -222,6 +234,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name) {
+ DCHECK(is_listening_to_code_events());
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(*shared, *code));
name_buffer_->AppendByte(' ');
@@ -234,6 +247,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line,
int column) {
+ DCHECK(is_listening_to_code_events());
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(*shared, *code));
name_buffer_->AppendBytes(shared->DebugNameCStr().get());
@@ -256,6 +270,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
wasm::WasmName name,
const char* source_url,
int /*code_offset*/, int /*script_id*/) {
+ DCHECK(is_listening_to_code_events());
name_buffer_->Init(tag);
DCHECK(!name.empty());
name_buffer_->AppendBytes(name.begin(), name.length());
@@ -273,6 +288,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
void CodeEventLogger::RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) {
+ DCHECK(is_listening_to_code_events());
name_buffer_->Init(CodeEventListener::REG_EXP_TAG);
name_buffer_->AppendString(*source);
LogRecordedBuffer(code, MaybeHandle<SharedFunctionInfo>(),
@@ -1074,20 +1090,6 @@ void Logger::IntPtrTEvent(const char* name, intptr_t value) {
msg.WriteToLogFile();
}
-void Logger::HandleEvent(const char* name, Address* location) {
- if (!FLAG_log_handles) return;
- MSG_BUILDER();
- msg << name << kNext << reinterpret_cast<void*>(location);
- msg.WriteToLogFile();
-}
-
-void Logger::WriteApiSecurityCheck() {
- DCHECK(FLAG_log_api);
- MSG_BUILDER();
- msg << "api" << kNext << "check-security";
- msg.WriteToLogFile();
-}
-
void Logger::SharedLibraryEvent(const std::string& library_path,
uintptr_t start, uintptr_t end,
intptr_t aslr_slide) {
@@ -1159,39 +1161,6 @@ bool Logger::is_logging() {
TIMER_EVENTS_LIST(V)
#undef V
-void Logger::WriteApiNamedPropertyAccess(const char* tag, JSObject holder,
- Object property_name) {
- DCHECK(FLAG_log_api);
- DCHECK(property_name.IsName());
- MSG_BUILDER();
- msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
- << Name::cast(property_name);
- msg.WriteToLogFile();
-}
-
-void Logger::WriteApiIndexedPropertyAccess(const char* tag, JSObject holder,
- uint32_t index) {
- DCHECK(FLAG_log_api);
- MSG_BUILDER();
- msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
- << index;
- msg.WriteToLogFile();
-}
-
-void Logger::WriteApiObjectAccess(const char* tag, JSReceiver object) {
- DCHECK(FLAG_log_api);
- MSG_BUILDER();
- msg << "api" << kNext << tag << kNext << object.class_name();
- msg.WriteToLogFile();
-}
-
-void Logger::WriteApiEntryCall(const char* name) {
- DCHECK(FLAG_log_api);
- MSG_BUILDER();
- msg << "api" << kNext << name;
- msg.WriteToLogFile();
-}
-
void Logger::NewEvent(const char* name, void* object, size_t size) {
if (!FLAG_log) return;
MSG_BUILDER();
@@ -1392,7 +1361,7 @@ void Logger::FeedbackVectorEvent(FeedbackVector vector, AbstractCode code) {
msg << kNext << reinterpret_cast<void*>(vector.address()) << kNext
<< vector.length();
msg << kNext << reinterpret_cast<void*>(code.InstructionStart());
- msg << kNext << vector.optimization_marker();
+ msg << kNext << vector.tiering_state();
msg << kNext << vector.maybe_has_optimized_code();
msg << kNext << vector.invocation_count();
msg << kNext << vector.profiler_ticks() << kNext;
@@ -1611,15 +1580,6 @@ void Logger::MoveEventInternal(LogEventsAndTags event, Address from,
msg.WriteToLogFile();
}
-void Logger::SuspectReadEvent(Name name, Object obj) {
- if (!FLAG_log_suspect) return;
- MSG_BUILDER();
- String class_name = obj.IsJSObject() ? JSObject::cast(obj).class_name()
- : ReadOnlyRoots(isolate_).empty_string();
- msg << "suspect-read" << kNext << class_name << kNext << name;
- msg.WriteToLogFile();
-}
-
namespace {
void AppendFunctionMessage(Log::MessageBuilder& msg, const char* reason,
int script_id, double time_delta, int start_position,
@@ -1931,6 +1891,8 @@ void Logger::LogCompiledFunctions() {
existing_code_logger_.LogCompiledFunctions();
}
+void Logger::LogBuiltins() { existing_code_logger_.LogBuiltins(); }
+
void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
HeapObjectIterator iterator(heap);
@@ -2048,6 +2010,26 @@ bool Logger::SetUp(Isolate* isolate) {
"--perf-basic-prof should be statically disabled on non-Linux platforms");
#endif
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (i::FLAG_gdbjit) {
+ auto code_event_handler = i::GDBJITInterface::EventHandler;
+ DCHECK_NOT_NULL(code_event_handler);
+ gdb_jit_logger_ = std::make_unique<JitLogger>(isolate, code_event_handler);
+ AddCodeEventListener(gdb_jit_logger_.get());
+ CHECK(isolate->code_event_dispatcher()->IsListeningToCodeEvents());
+ }
+#endif // ENABLE_GDB_JIT_INTERFACE
+
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+ if (i::FLAG_enable_system_instrumentation) {
+ auto code_event_handler = i::ETWJITInterface::EventHandler;
+ DCHECK_NOT_NULL(code_event_handler);
+ etw_jit_logger_ = std::make_unique<JitLogger>(isolate, code_event_handler);
+ AddCodeEventListener(etw_jit_logger_.get());
+ CHECK(isolate->code_event_dispatcher()->IsListeningToCodeEvents());
+ }
+#endif // defined(V8_OS_WIN)
+
if (FLAG_ll_prof) {
ll_logger_ =
std::make_unique<LowLevelLogger>(isolate, log_file_name.str().c_str());
@@ -2066,6 +2048,14 @@ bool Logger::SetUp(Isolate* isolate) {
return true;
}
+void Logger::LateSetup(Isolate* isolate) {
+ if (!isolate->code_event_dispatcher()->IsListeningToCodeEvents()) return;
+ Builtins::EmitCodeCreateEvents(isolate);
+#if V8_ENABLE_WEBASSEMBLY
+ wasm::GetWasmEngine()->EnableCodeLogging(isolate);
+#endif
+}
+
void Logger::SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler) {
if (jit_logger_) {
@@ -2082,6 +2072,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
if (options & kJitCodeEventEnumExisting) {
HandleScope scope(isolate_);
LogCodeObjects();
+ LogBuiltins();
LogCompiledFunctions();
}
}
@@ -2154,8 +2145,6 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
case CodeKind::BASELINE:
case CodeKind::MAGLEV:
return; // We log this later using LogCompiledFunctions.
- case CodeKind::BYTECODE_HANDLER:
- return; // We log it later by walking the dispatch table.
case CodeKind::FOR_TESTING:
description = "STUB code";
tag = CodeEventListener::STUB_TAG;
@@ -2164,6 +2153,11 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
description = "Regular expression code";
tag = CodeEventListener::REG_EXP_TAG;
break;
+ case CodeKind::BYTECODE_HANDLER:
+ description =
+ isolate_->builtins()->name(abstract_code->GetCode().builtin_id());
+ tag = CodeEventListener::BYTECODE_HANDLER_TAG;
+ break;
case CodeKind::BUILTIN:
if (Code::cast(object).is_interpreter_trampoline_builtin() &&
ToCodeT(Code::cast(object)) !=
@@ -2213,6 +2207,16 @@ void ExistingCodeLogger::LogCodeObjects() {
}
}
+void ExistingCodeLogger::LogBuiltins() {
+ Builtins* builtins = isolate_->builtins();
+ DCHECK(builtins->is_initialized());
+ for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
+ ++builtin) {
+ Code code = FromCodeT(builtins->code(builtin));
+ LogCodeObject(code);
+ }
+}
+
void ExistingCodeLogger::LogCompiledFunctions() {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 8bb0c5f931..a5205988ce 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -88,6 +88,7 @@ class ExistingCodeLogger {
: isolate_(isolate), listener_(listener) {}
void LogCodeObjects();
+ void LogBuiltins();
void LogCompiledFunctions();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
@@ -122,6 +123,9 @@ class Logger : public CodeEventListener {
// Acquires resources for logging if the right flags are set.
bool SetUp(Isolate* isolate);
+ // Additional steps taken after the logger has been set up.
+ void LateSetup(Isolate* isolate);
+
// Frees resources acquired in SetUp.
// When a temporary file is used for the log, returns its stream descriptor,
// leaving the file open.
@@ -141,17 +145,10 @@ class Logger : public CodeEventListener {
// Emits an event with an int value -> (name, value).
void IntPtrTEvent(const char* name, intptr_t value);
- // Emits an event with an handle value -> (name, location).
- void HandleEvent(const char* name, Address* location);
-
// Emits memory management events for C allocated structures.
void NewEvent(const char* name, void* object, size_t size);
void DeleteEvent(const char* name, void* object);
- // Emits an event that an undefined property was read from an
- // object.
- void SuspectReadEvent(Name name, Object obj);
-
// ==== Events logged by --log-function-events ====
void FunctionEvent(const char* reason, int script_id, double time_delta_ms,
int start_position, int end_position,
@@ -166,30 +163,6 @@ class Logger : public CodeEventListener {
void ScriptEvent(ScriptEventType type, int script_id);
void ScriptDetails(Script script);
- // ==== Events logged by --log-api. ====
- void ApiSecurityCheck() {
- if (!FLAG_log_api) return;
- WriteApiSecurityCheck();
- }
- void ApiNamedPropertyAccess(const char* tag, JSObject holder, Object name) {
- if (!FLAG_log_api) return;
- WriteApiNamedPropertyAccess(tag, holder, name);
- }
- void ApiIndexedPropertyAccess(const char* tag, JSObject holder,
- uint32_t index) {
- if (!FLAG_log_api) return;
- WriteApiIndexedPropertyAccess(tag, holder, index);
- }
-
- void ApiObjectAccess(const char* tag, JSReceiver obj) {
- if (!FLAG_log_api) return;
- WriteApiObjectAccess(tag, obj);
- }
- void ApiEntryCall(const char* name) {
- if (!FLAG_log_api) return;
- WriteApiEntryCall(name);
- }
-
// ==== Events logged by --log-code. ====
V8_EXPORT_PRIVATE void AddCodeEventListener(CodeEventListener* listener);
V8_EXPORT_PRIVATE void RemoveCodeEventListener(CodeEventListener* listener);
@@ -303,6 +276,7 @@ class Logger : public CodeEventListener {
V8_EXPORT_PRIVATE void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
V8_EXPORT_PRIVATE void LogCodeObjects();
+ V8_EXPORT_PRIVATE void LogBuiltins();
// Logs all Maps found on the heap.
void LogAllMaps();
@@ -373,6 +347,12 @@ class Logger : public CodeEventListener {
#endif
std::unique_ptr<LowLevelLogger> ll_logger_;
std::unique_ptr<JitLogger> jit_logger_;
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ std::unique_ptr<JitLogger> gdb_jit_logger_;
+#endif
+#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+ std::unique_ptr<JitLogger> etw_jit_logger_;
+#endif
std::set<int> logged_source_code_;
uint32_t next_source_info_id_ = 0;
@@ -458,6 +438,8 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
const char* reason) override {}
void WeakCodeClearEvent() override {}
+ bool is_listening_to_code_events() override { return true; }
+
protected:
Isolate* isolate_;
diff --git a/deps/v8/src/logging/runtime-call-stats.h b/deps/v8/src/logging/runtime-call-stats.h
index ff2893fe16..ddee0baaf8 100644
--- a/deps/v8/src/logging/runtime-call-stats.h
+++ b/deps/v8/src/logging/runtime-call-stats.h
@@ -322,6 +322,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BranchConditionDuplication) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
@@ -486,7 +487,9 @@ class RuntimeCallTimer final {
V(WebSnapshotDeserialize_Functions) \
V(WebSnapshotDeserialize_Maps) \
V(WebSnapshotDeserialize_Objects) \
- V(WebSnapshotDeserialize_Strings)
+ V(WebSnapshotDeserialize_Strings) \
+ V(WrappedFunctionLengthGetter) \
+ V(WrappedFunctionNameGetter)
#define FOR_EACH_HANDLER_COUNTER(V) \
V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
@@ -684,7 +687,7 @@ class WorkerThreadRuntimeCallStats final {
// Creating a WorkerThreadRuntimeCallStatsScope will provide a thread-local
// runtime call stats table, and will dump the table to an immediate trace event
// when it is destroyed.
-class V8_NODISCARD WorkerThreadRuntimeCallStatsScope final {
+class V8_EXPORT_PRIVATE V8_NODISCARD WorkerThreadRuntimeCallStatsScope final {
public:
WorkerThreadRuntimeCallStatsScope() = default;
explicit WorkerThreadRuntimeCallStatsScope(
diff --git a/deps/v8/src/maglev/OWNERS b/deps/v8/src/maglev/OWNERS
index dca7476a04..291b217d6e 100644
--- a/deps/v8/src/maglev/OWNERS
+++ b/deps/v8/src/maglev/OWNERS
@@ -1,3 +1,4 @@
leszeks@chromium.org
jgruber@chromium.org
verwaest@chromium.org
+victorgomes@chromium.org
diff --git a/deps/v8/src/maglev/maglev-code-gen-state.h b/deps/v8/src/maglev/maglev-code-gen-state.h
index ecf8bbccda..14a83c0321 100644
--- a/deps/v8/src/maglev/maglev-code-gen-state.h
+++ b/deps/v8/src/maglev/maglev-code-gen-state.h
@@ -19,16 +19,18 @@ namespace v8 {
namespace internal {
namespace maglev {
-class MaglevCodeGenState {
+class InterpreterFrameState;
+
+class DeferredCodeInfo {
public:
- class DeferredCodeInfo {
- public:
- virtual void Generate(MaglevCodeGenState* code_gen_state,
- Label* return_label) = 0;
- Label deferred_code_label;
- Label return_label;
- };
+ virtual void Generate(MaglevCodeGenState* code_gen_state,
+ Label* return_label) = 0;
+ Label deferred_code_label;
+ Label return_label;
+};
+class MaglevCodeGenState {
+ public:
MaglevCodeGenState(MaglevCompilationUnit* compilation_unit,
SafepointTableBuilder* safepoint_table_builder)
: compilation_unit_(compilation_unit),
@@ -40,14 +42,19 @@ class MaglevCodeGenState {
void PushDeferredCode(DeferredCodeInfo* deferred_code) {
deferred_code_.push_back(deferred_code);
}
- void EmitDeferredCode() {
- for (auto& deferred_code : deferred_code_) {
- masm()->RecordComment("-- Deferred block");
- masm()->bind(&deferred_code->deferred_code_label);
- deferred_code->Generate(this, &deferred_code->return_label);
- masm()->int3();
- }
+ const std::vector<DeferredCodeInfo*>& deferred_code() const {
+ return deferred_code_;
}
+ void PushEagerDeopt(EagerDeoptInfo* info) { eager_deopts_.push_back(info); }
+ void PushLazyDeopt(LazyDeoptInfo* info) { lazy_deopts_.push_back(info); }
+ const std::vector<EagerDeoptInfo*>& eager_deopts() const {
+ return eager_deopts_;
+ }
+ const std::vector<LazyDeoptInfo*>& lazy_deopts() const {
+ return lazy_deopts_;
+ }
+ inline void DefineSafepointStackSlots(
+ SafepointTableBuilder::Safepoint& safepoint) const;
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
@@ -86,6 +93,8 @@ class MaglevCodeGenState {
MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_;
+ std::vector<EagerDeoptInfo*> eager_deopts_;
+ std::vector<LazyDeoptInfo*> lazy_deopts_;
int vreg_slots_ = 0;
// Allow marking some codegen paths as unsupported, so that we can test maglev
@@ -97,9 +106,24 @@ class MaglevCodeGenState {
// Some helpers for codegen.
// TODO(leszeks): consider moving this to a separate header.
+inline constexpr int GetFramePointerOffsetForStackSlot(int index) {
+ return StandardFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize;
+}
+
+inline int GetFramePointerOffsetForStackSlot(
+ const compiler::AllocatedOperand& operand) {
+ return GetFramePointerOffsetForStackSlot(operand.index());
+}
+
+inline int GetSafepointIndexForStackSlot(int i) {
+ // Safepoint tables also contain slots for all fixed frame slots (both
+ // above and below the fp).
+ return StandardFrameConstants::kFixedSlotCount + i;
+}
+
inline MemOperand GetStackSlot(int index) {
- return MemOperand(rbp, StandardFrameConstants::kExpressionsOffset -
- index * kSystemPointerSize);
+ return MemOperand(rbp, GetFramePointerOffsetForStackSlot(index));
}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
@@ -122,10 +146,17 @@ inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
-inline int GetSafepointIndexForStackSlot(int i) {
- // Safepoint tables also contain slots for all fixed frame slots (both
- // above and below the fp).
- return StandardFrameConstants::kFixedSlotCount + i;
+inline void MaglevCodeGenState::DefineSafepointStackSlots(
+ SafepointTableBuilder::Safepoint& safepoint) const {
+ DCHECK_EQ(compilation_unit()->stack_value_repr().size(), vreg_slots());
+ int stack_slot = 0;
+ for (ValueRepresentation repr : compilation_unit()->stack_value_repr()) {
+ if (repr == ValueRepresentation::kTagged) {
+ safepoint.DefineTaggedStackSlot(
+ GetSafepointIndexForStackSlot(stack_slot));
+ }
+ stack_slot++;
+ }
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-code-generator.cc b/deps/v8/src/maglev/maglev-code-generator.cc
index f578d53777..d57420ae3e 100644
--- a/deps/v8/src/maglev/maglev-code-generator.cc
+++ b/deps/v8/src/maglev/maglev-code-generator.cc
@@ -7,6 +7,9 @@
#include "src/codegen/code-desc.h"
#include "src/codegen/register.h"
#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/translation-array.h"
+#include "src/execution/frame-constants.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
@@ -15,10 +18,10 @@
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
+#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
-
namespace maglev {
#define __ masm()->
@@ -41,8 +44,6 @@ using StackToRegisterMoves =
class MaglevCodeGeneratingNodeProcessor {
public:
- static constexpr bool kNeedsCheckpointStates = true;
-
explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
@@ -51,6 +52,8 @@ class MaglevCodeGeneratingNodeProcessor {
__ int3();
}
+ __ BailoutIfDeoptimized(rbx);
+
__ EnterFrame(StackFrame::BASELINE);
// Save arguments in frame.
@@ -75,18 +78,14 @@ class MaglevCodeGeneratingNodeProcessor {
}
// We don't emit proper safepoint data yet; instead, define a single
- // safepoint at the end of the code object, with all-tagged stack slots.
- // TODO(jgruber): Real safepoint handling.
+ // safepoint at the end of the code object.
+ // TODO(v8:7700): Add better safepoint handling when we support stack reuse.
SafepointTableBuilder::Safepoint safepoint =
safepoint_table_builder()->DefineSafepoint(masm());
- for (int i = 0; i < code_gen_state_->vreg_slots(); i++) {
- safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(i));
- }
+ code_gen_state_->DefineSafepointStackSlots(safepoint);
}
- void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
- code_gen_state_->EmitDeferredCode();
- }
+ void PostProcessGraph(MaglevCompilationUnit*, Graph*) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
if (FLAG_code_comments) {
@@ -305,6 +304,15 @@ class MaglevCodeGeneratingNodeProcessor {
MaglevCodeGenState* code_gen_state_;
};
+constexpr int DeoptStackSlotIndexFromFPOffset(int offset) {
+ return 1 - offset / kSystemPointerSize;
+}
+
+int DeoptStackSlotFromStackSlot(const compiler::AllocatedOperand& operand) {
+ return DeoptStackSlotIndexFromFPOffset(
+ GetFramePointerOffsetForStackSlot(operand));
+}
+
} // namespace
class MaglevCodeGeneratorImpl final {
@@ -315,8 +323,12 @@ class MaglevCodeGeneratorImpl final {
}
private:
+ static constexpr int kFunctionLiteralIndex = 0;
+ static constexpr int kOptimizedOutConstantIndex = 1;
+
MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
: safepoint_table_builder_(compilation_unit->zone()),
+ translation_array_builder_(compilation_unit->zone()),
code_gen_state_(compilation_unit, safepoint_table_builder()),
processor_(compilation_unit, &code_gen_state_),
graph_(graph) {}
@@ -328,7 +340,207 @@ class MaglevCodeGeneratorImpl final {
return BuildCodeObject();
}
- void EmitCode() { processor_.ProcessGraph(graph_); }
+ void EmitCode() {
+ processor_.ProcessGraph(graph_);
+ EmitDeferredCode();
+ EmitDeopts();
+ }
+
+ void EmitDeferredCode() {
+ for (DeferredCodeInfo* deferred_code : code_gen_state_.deferred_code()) {
+ __ RecordComment("-- Deferred block");
+ __ bind(&deferred_code->deferred_code_label);
+ deferred_code->Generate(&code_gen_state_, &deferred_code->return_label);
+ __ Trap();
+ }
+ }
+
+ void EmitDeopts() {
+ deopt_exit_start_offset_ = __ pc_offset();
+
+ __ RecordComment("-- Non-lazy deopts");
+ for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
+ EmitEagerDeopt(deopt_info);
+
+ __ bind(&deopt_info->deopt_entry_label);
+ __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, 0,
+ &deopt_info->deopt_entry_label,
+ DeoptimizeKind::kEager, nullptr, nullptr);
+ }
+
+ __ RecordComment("-- Lazy deopts");
+ int last_updated_safepoint = 0;
+ for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
+ EmitLazyDeopt(deopt_info);
+
+ __ bind(&deopt_info->deopt_entry_label);
+ __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, 0,
+ &deopt_info->deopt_entry_label,
+ DeoptimizeKind::kLazy, nullptr, nullptr);
+
+ last_updated_safepoint =
+ safepoint_table_builder_.UpdateDeoptimizationInfo(
+ deopt_info->deopting_call_return_pc,
+ deopt_info->deopt_entry_label.pos(), last_updated_safepoint,
+ deopt_info->deopt_index);
+ }
+ }
+
+ void EmitEagerDeopt(EagerDeoptInfo* deopt_info) {
+ int frame_count = 1;
+ int jsframe_count = 1;
+ int update_feedback_count = 0;
+ deopt_info->deopt_index = translation_array_builder_.BeginTranslation(
+ frame_count, jsframe_count, update_feedback_count);
+
+ // Returns are used for updating an accumulator or register after a lazy
+ // deopt.
+ const int return_offset = 0;
+ const int return_count = 0;
+ translation_array_builder_.BeginInterpretedFrame(
+ deopt_info->state.bytecode_position, kFunctionLiteralIndex,
+ code_gen_state_.register_count(), return_offset, return_count);
+
+ EmitDeoptFrameValues(
+ *code_gen_state_.compilation_unit(), deopt_info->state.register_frame,
+ deopt_info->input_locations, interpreter::Register::invalid_value());
+ }
+
+ void EmitLazyDeopt(LazyDeoptInfo* deopt_info) {
+ int frame_count = 1;
+ int jsframe_count = 1;
+ int update_feedback_count = 0;
+ deopt_info->deopt_index = translation_array_builder_.BeginTranslation(
+ frame_count, jsframe_count, update_feedback_count);
+
+ // Return offsets are counted from the end of the translation frame, which
+ // is the array [parameters..., locals..., accumulator].
+ int return_offset;
+ if (deopt_info->result_location ==
+ interpreter::Register::virtual_accumulator()) {
+ return_offset = 0;
+ } else if (deopt_info->result_location.is_parameter()) {
+ // This is slightly tricky to reason about because of zero indexing and
+ // fence post errors. As an example, consider a frame with 2 locals and
+ // 2 parameters, where we want argument index 1 -- looking at the array
+ // in reverse order we have:
+ // [acc, r1, r0, a1, a0]
+ // ^
+ // and this calculation gives, correctly:
+ // 2 + 2 - 1 = 3
+ return_offset = code_gen_state_.register_count() +
+ code_gen_state_.parameter_count() -
+ deopt_info->result_location.ToParameterIndex();
+ } else {
+ return_offset = code_gen_state_.register_count() -
+ deopt_info->result_location.index();
+ }
+ // TODO(leszeks): Support lazy deopts with multiple return values.
+ int return_count = 1;
+ translation_array_builder_.BeginInterpretedFrame(
+ deopt_info->state.bytecode_position, kFunctionLiteralIndex,
+ code_gen_state_.register_count(), return_offset, return_count);
+
+ EmitDeoptFrameValues(
+ *code_gen_state_.compilation_unit(), deopt_info->state.register_frame,
+ deopt_info->input_locations, deopt_info->result_location);
+ }
+
+ void EmitDeoptFrameSingleValue(ValueNode* value,
+ const InputLocation& input_location) {
+ const compiler::AllocatedOperand& operand =
+ compiler::AllocatedOperand::cast(input_location.operand());
+ if (operand.IsRegister()) {
+ if (value->properties().is_untagged_value()) {
+ translation_array_builder_.StoreInt32Register(operand.GetRegister());
+ } else {
+ translation_array_builder_.StoreRegister(operand.GetRegister());
+ }
+ } else {
+ if (value->properties().is_untagged_value()) {
+ translation_array_builder_.StoreInt32StackSlot(
+ DeoptStackSlotFromStackSlot(operand));
+ } else {
+ translation_array_builder_.StoreStackSlot(
+ DeoptStackSlotFromStackSlot(operand));
+ }
+ }
+ }
+
+ void EmitDeoptFrameValues(
+ const MaglevCompilationUnit& compilation_unit,
+ const CompactInterpreterFrameState* checkpoint_state,
+ const InputLocation* input_locations,
+ interpreter::Register result_location) {
+ // Closure
+ int closure_index = DeoptStackSlotIndexFromFPOffset(
+ StandardFrameConstants::kFunctionOffset);
+ translation_array_builder_.StoreStackSlot(closure_index);
+
+ // TODO(leszeks): The input locations array happens to be in the same order
+ // as parameters+locals+accumulator are accessed here. We should make this
+ // clearer and guard against this invariant failing.
+ const InputLocation* input_location = input_locations;
+
+ // Parameters
+ {
+ int i = 0;
+ checkpoint_state->ForEachParameter(
+ compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
+ DCHECK_EQ(reg.ToParameterIndex(), i);
+ if (reg != result_location) {
+ EmitDeoptFrameSingleValue(value, *input_location);
+ } else {
+ translation_array_builder_.StoreLiteral(
+ kOptimizedOutConstantIndex);
+ }
+ i++;
+ input_location++;
+ });
+ }
+
+ // Context
+ int context_index =
+ DeoptStackSlotIndexFromFPOffset(StandardFrameConstants::kContextOffset);
+ translation_array_builder_.StoreStackSlot(context_index);
+
+ // Locals
+ {
+ int i = 0;
+ checkpoint_state->ForEachLocal(
+ compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
+ DCHECK_LE(i, reg.index());
+ if (reg == result_location) {
+ input_location++;
+ return;
+ }
+ while (i < reg.index()) {
+ translation_array_builder_.StoreLiteral(
+ kOptimizedOutConstantIndex);
+ i++;
+ }
+ DCHECK_EQ(i, reg.index());
+ EmitDeoptFrameSingleValue(value, *input_location);
+ i++;
+ input_location++;
+ });
+ while (i < code_gen_state_.register_count()) {
+ translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
+ i++;
+ }
+ }
+
+ // Accumulator
+ {
+ if (checkpoint_state->liveness()->AccumulatorIsLive() &&
+ result_location != interpreter::Register::virtual_accumulator()) {
+ ValueNode* value = checkpoint_state->accumulator(compilation_unit);
+ EmitDeoptFrameSingleValue(value, *input_location);
+ } else {
+ translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
+ }
+ }
+ }
void EmitMetadata() {
// Final alignment before starting on the metadata section.
@@ -345,9 +557,84 @@ class MaglevCodeGeneratorImpl final {
kNoHandlerTableOffset);
return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
.set_stack_slots(stack_slot_count_with_fixed_frame())
+ .set_deoptimization_data(GenerateDeoptimizationData())
.TryBuild();
}
+ Handle<DeoptimizationData> GenerateDeoptimizationData() {
+ int eager_deopt_count =
+ static_cast<int>(code_gen_state_.eager_deopts().size());
+ int lazy_deopt_count =
+ static_cast<int>(code_gen_state_.lazy_deopts().size());
+ int deopt_count = lazy_deopt_count + eager_deopt_count;
+ if (deopt_count == 0) {
+ return DeoptimizationData::Empty(isolate());
+ }
+ Handle<DeoptimizationData> data =
+ DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
+
+ Handle<TranslationArray> translation_array =
+ translation_array_builder_.ToTranslationArray(isolate()->factory());
+
+ data->SetTranslationByteArray(*translation_array);
+ data->SetInlinedFunctionCount(Smi::zero());
+ // TODO(leszeks): Support optimization IDs
+ data->SetOptimizationId(Smi::zero());
+
+ DCHECK_NE(deopt_exit_start_offset_, -1);
+ data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
+ data->SetEagerDeoptCount(Smi::FromInt(eager_deopt_count));
+ data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count));
+
+ data->SetSharedFunctionInfo(
+ *code_gen_state_.compilation_unit()->shared_function_info().object());
+
+ // TODO(leszeks): Proper literals array.
+ Handle<DeoptimizationLiteralArray> literals =
+ isolate()->factory()->NewDeoptimizationLiteralArray(2);
+ literals->set(
+ kFunctionLiteralIndex,
+ *code_gen_state_.compilation_unit()->shared_function_info().object());
+ literals->set(kOptimizedOutConstantIndex,
+ ReadOnlyRoots(isolate()).optimized_out());
+ data->SetLiteralArray(*literals);
+
+ // TODO(leszeks): Fix once we have inlining.
+ Handle<PodArray<InliningPosition>> inlining_positions =
+ PodArray<InliningPosition>::New(isolate(), 0);
+ data->SetInliningPositions(*inlining_positions);
+
+ // TODO(leszeks): Fix once we have OSR.
+ BytecodeOffset osr_offset = BytecodeOffset::None();
+ data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(-1));
+
+ // Populate deoptimization entries.
+ int i = 0;
+ for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
+ DCHECK_NE(deopt_info->deopt_index, -1);
+ data->SetBytecodeOffset(i, deopt_info->state.bytecode_position);
+ data->SetTranslationIndex(i, Smi::FromInt(deopt_info->deopt_index));
+ data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label.pos()));
+#ifdef DEBUG
+ data->SetNodeId(i, Smi::FromInt(i));
+#endif // DEBUG
+ i++;
+ }
+ for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
+ DCHECK_NE(deopt_info->deopt_index, -1);
+ data->SetBytecodeOffset(i, deopt_info->state.bytecode_position);
+ data->SetTranslationIndex(i, Smi::FromInt(deopt_info->deopt_index));
+ data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label.pos()));
+#ifdef DEBUG
+ data->SetNodeId(i, Smi::FromInt(i));
+#endif // DEBUG
+ i++;
+ }
+
+ return data;
+ }
+
int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
int stack_slot_count_with_fixed_frame() const {
return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
@@ -360,11 +647,17 @@ class MaglevCodeGeneratorImpl final {
SafepointTableBuilder* safepoint_table_builder() {
return &safepoint_table_builder_;
}
+ TranslationArrayBuilder* translation_array_builder() {
+ return &translation_array_builder_;
+ }
SafepointTableBuilder safepoint_table_builder_;
+ TranslationArrayBuilder translation_array_builder_;
MaglevCodeGenState code_gen_state_;
GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
Graph* const graph_;
+
+ int deopt_exit_start_offset_ = -1;
};
// static
diff --git a/deps/v8/src/maglev/maglev-compilation-info.cc b/deps/v8/src/maglev/maglev-compilation-info.cc
index 630d341a66..0b018b1913 100644
--- a/deps/v8/src/maglev/maglev-compilation-info.cc
+++ b/deps/v8/src/maglev/maglev-compilation-info.cc
@@ -69,11 +69,17 @@ MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
zone()->New<compiler::CompilationDependencies>(broker(), zone());
USE(deps); // The deps register themselves in the heap broker.
+ // Heap broker initialization may already use IsPendingAllocation.
+ isolate->heap()->PublishPendingAllocations();
+
broker()->SetTargetNativeContextRef(
handle(function->native_context(), isolate));
broker()->InitializeAndStartSerializing();
broker()->StopSerializing();
+ // Serialization may have allocated.
+ isolate->heap()->PublishPendingAllocations();
+
toplevel_compilation_unit_ =
MaglevCompilationUnit::New(zone(), this, function);
}
diff --git a/deps/v8/src/maglev/maglev-compilation-info.h b/deps/v8/src/maglev/maglev-compilation-info.h
index 70490de218..d8d52402c8 100644
--- a/deps/v8/src/maglev/maglev-compilation-info.h
+++ b/deps/v8/src/maglev/maglev-compilation-info.h
@@ -63,9 +63,6 @@ class MaglevCompilationInfo final {
void set_graph(Graph* graph) { graph_ = graph; }
Graph* graph() const { return graph_; }
- void set_codet(MaybeHandle<CodeT> codet) { codet_ = codet; }
- MaybeHandle<CodeT> codet() const { return codet_; }
-
// Flag accessors (for thread-safe access to global flags).
// TODO(v8:7700): Consider caching these.
#define V(Name) \
@@ -103,9 +100,6 @@ class MaglevCompilationInfo final {
// Produced off-thread during ExecuteJobImpl.
Graph* graph_ = nullptr;
- // Produced during FinalizeJobImpl.
- MaybeHandle<CodeT> codet_;
-
#define V(Name) const bool Name##_;
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.cc b/deps/v8/src/maglev/maglev-compilation-unit.cc
index f35f418de7..5662cecb41 100644
--- a/deps/v8/src/maglev/maglev-compilation-unit.cc
+++ b/deps/v8/src/maglev/maglev-compilation-unit.cc
@@ -6,6 +6,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-graph-labeller.h"
#include "src/objects/js-function-inl.h"
namespace v8 {
@@ -15,13 +16,14 @@ namespace maglev {
MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationInfo* info,
Handle<JSFunction> function)
: info_(info),
- bytecode_(
- MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
+ shared_function_info_(MakeRef(broker(), function->shared())),
+ bytecode_(shared_function_info_.GetBytecodeArray()),
feedback_(MakeRef(broker(), function->feedback_vector())),
bytecode_analysis_(bytecode_.object(), zone(), BytecodeOffset::None(),
true),
register_count_(bytecode_.register_count()),
- parameter_count_(bytecode_.parameter_count()) {}
+ parameter_count_(bytecode_.parameter_count()),
+ stack_value_repr_(info->zone()) {}
compiler::JSHeapBroker* MaglevCompilationUnit::broker() const {
return info_->broker();
@@ -40,6 +42,12 @@ MaglevGraphLabeller* MaglevCompilationUnit::graph_labeller() const {
return info_->graph_labeller();
}
+void MaglevCompilationUnit::RegisterNodeInGraphLabeller(const Node* node) {
+ if (has_graph_labeller()) {
+ graph_labeller()->RegisterNode(node);
+ }
+}
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.h b/deps/v8/src/maglev/maglev-compilation-unit.h
index 52e1a775d6..9060aba476 100644
--- a/deps/v8/src/maglev/maglev-compilation-unit.h
+++ b/deps/v8/src/maglev/maglev-compilation-unit.h
@@ -13,8 +13,10 @@ namespace v8 {
namespace internal {
namespace maglev {
+enum class ValueRepresentation;
class MaglevCompilationInfo;
class MaglevGraphLabeller;
+class Node;
// Per-unit data, i.e. once per top-level function and once per inlined
// function.
@@ -30,24 +32,43 @@ class MaglevCompilationUnit : public ZoneObject {
MaglevCompilationInfo* info() const { return info_; }
compiler::JSHeapBroker* broker() const;
Isolate* isolate() const;
+ LocalIsolate* local_isolate() const;
Zone* zone() const;
int register_count() const { return register_count_; }
int parameter_count() const { return parameter_count_; }
bool has_graph_labeller() const;
MaglevGraphLabeller* graph_labeller() const;
+ const compiler::SharedFunctionInfoRef& shared_function_info() const {
+ return shared_function_info_;
+ }
const compiler::BytecodeArrayRef& bytecode() const { return bytecode_; }
const compiler::FeedbackVectorRef& feedback() const { return feedback_; }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return bytecode_analysis_;
}
+ void RegisterNodeInGraphLabeller(const Node* node);
+
+ const ZoneVector<ValueRepresentation>& stack_value_repr() const {
+ return stack_value_repr_;
+ }
+
+ void push_stack_value_repr(ValueRepresentation r) {
+ stack_value_repr_.push_back(r);
+ }
+
private:
MaglevCompilationInfo* const info_;
+ const compiler::SharedFunctionInfoRef shared_function_info_;
const compiler::BytecodeArrayRef bytecode_;
const compiler::FeedbackVectorRef feedback_;
const compiler::BytecodeAnalysis bytecode_analysis_;
const int register_count_;
const int parameter_count_;
+
+ // TODO(victorgomes): Compress these values, if only tagged/untagged, we could
+ // use a binary vector? We might also want to deal with safepoints properly.
+ ZoneVector<ValueRepresentation> stack_value_repr_;
};
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-compiler.cc b/deps/v8/src/maglev/maglev-compiler.cc
index f4a23d869e..e1de2cc21b 100644
--- a/deps/v8/src/maglev/maglev-compiler.cc
+++ b/deps/v8/src/maglev/maglev-compiler.cc
@@ -31,6 +31,7 @@
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph-verifier.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
@@ -46,8 +47,6 @@ namespace maglev {
class NumberingProcessor {
public:
- static constexpr bool kNeedsCheckpointStates = false;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) { node_id_ = 1; }
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
@@ -62,17 +61,21 @@ class NumberingProcessor {
class UseMarkingProcessor {
public:
- static constexpr bool kNeedsCheckpointStates = true;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
- void Process(NodeBase* node, const ProcessingState& state) {
- if (node->properties().can_deopt()) MarkCheckpointNodes(node, state);
+ template <typename NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ if constexpr (NodeT::kProperties.can_eager_deopt()) {
+ MarkCheckpointNodes(node, node->eager_deopt_info(), state);
+ }
for (Input& input : *node) {
input.node()->mark_use(node->id(), &input);
}
+ if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ MarkCheckpointNodes(node, node->lazy_deopt_info(), state);
+ }
}
void Process(Phi* node, const ProcessingState& state) {
@@ -105,30 +108,40 @@ class UseMarkingProcessor {
}
private:
- void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
- const InterpreterFrameState* checkpoint_state =
- state.checkpoint_frame_state();
+ void MarkCheckpointNodes(NodeBase* node, const EagerDeoptInfo* deopt_info,
+ const ProcessingState& state) {
+ const CompactInterpreterFrameState* register_frame =
+ deopt_info->state.register_frame;
int use_id = node->id();
+ int index = 0;
- for (int i = 0; i < state.parameter_count(); i++) {
- interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
- ValueNode* node = checkpoint_state->get(reg);
- if (node) node->mark_use(use_id, nullptr);
- }
- for (int i = 0; i < state.register_count(); i++) {
- interpreter::Register reg = interpreter::Register(i);
- ValueNode* node = checkpoint_state->get(reg);
- if (node) node->mark_use(use_id, nullptr);
- }
- if (checkpoint_state->accumulator()) {
- checkpoint_state->accumulator()->mark_use(use_id, nullptr);
- }
+ register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ node->mark_use(use_id, &deopt_info->input_locations[index++]);
+ });
+ }
+ void MarkCheckpointNodes(NodeBase* node, const LazyDeoptInfo* deopt_info,
+ const ProcessingState& state) {
+ const CompactInterpreterFrameState* register_frame =
+ deopt_info->state.register_frame;
+ int use_id = node->id();
+ int index = 0;
+
+ register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ // Skip over the result location.
+ if (reg == deopt_info->result_location) return;
+ node->mark_use(use_id, &deopt_info->input_locations[index++]);
+ });
}
};
// static
-void MaglevCompiler::Compile(MaglevCompilationUnit* toplevel_compilation_unit) {
- MaglevCompiler compiler(toplevel_compilation_unit);
+void MaglevCompiler::Compile(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* toplevel_compilation_unit) {
+ MaglevCompiler compiler(local_isolate, toplevel_compilation_unit);
compiler.Compile();
}
@@ -142,7 +155,13 @@ void MaglevCompiler::Compile() {
new MaglevGraphLabeller());
}
- MaglevGraphBuilder graph_builder(toplevel_compilation_unit_);
+ // TODO(v8:7700): Support exceptions in maglev. We currently bail if exception
+ // handler table is non-empty.
+ if (toplevel_compilation_unit_->bytecode().handler_table_size() > 0) {
+ return;
+ }
+
+ MaglevGraphBuilder graph_builder(local_isolate(), toplevel_compilation_unit_);
graph_builder.Build();
@@ -156,6 +175,13 @@ void MaglevCompiler::Compile() {
PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
}
+#ifdef DEBUG
+ {
+ GraphProcessor<MaglevGraphVerifier> verifier(toplevel_compilation_unit_);
+ verifier.ProcessGraph(graph_builder.graph());
+ }
+#endif
+
{
GraphMultiProcessor<NumberingProcessor, UseMarkingProcessor,
MaglevVregAllocator>
@@ -184,11 +210,20 @@ void MaglevCompiler::Compile() {
MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
MaglevCompilationUnit* toplevel_compilation_unit) {
Graph* const graph = toplevel_compilation_unit->info()->graph();
- if (graph == nullptr) return {}; // Compilation failed.
+ if (graph == nullptr) {
+ // Compilation failed.
+ toplevel_compilation_unit->shared_function_info()
+ .object()
+ ->set_maglev_compilation_failed(true);
+ return {};
+ }
Handle<Code> code;
if (!MaglevCodeGenerator::Generate(toplevel_compilation_unit, graph)
.ToHandle(&code)) {
+ toplevel_compilation_unit->shared_function_info()
+ .object()
+ ->set_maglev_compilation_failed(true);
return {};
}
@@ -201,6 +236,7 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
}
Isolate* const isolate = toplevel_compilation_unit->isolate();
+ isolate->native_context()->AddOptimizedCode(ToCodeT(*code));
return ToCodeT(code, isolate);
}
diff --git a/deps/v8/src/maglev/maglev-compiler.h b/deps/v8/src/maglev/maglev-compiler.h
index 79b71552d1..37fb5d0340 100644
--- a/deps/v8/src/maglev/maglev-compiler.h
+++ b/deps/v8/src/maglev/maglev-compiler.h
@@ -24,7 +24,8 @@ class Graph;
class MaglevCompiler {
public:
// May be called from any thread.
- static void Compile(MaglevCompilationUnit* toplevel_compilation_unit);
+ static void Compile(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* toplevel_compilation_unit);
// Called on the main thread after Compile has completed.
// TODO(v8:7700): Move this to a different class?
@@ -32,8 +33,10 @@ class MaglevCompiler {
MaglevCompilationUnit* toplevel_compilation_unit);
private:
- explicit MaglevCompiler(MaglevCompilationUnit* toplevel_compilation_unit)
- : toplevel_compilation_unit_(toplevel_compilation_unit) {}
+ explicit MaglevCompiler(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* toplevel_compilation_unit)
+ : local_isolate_(local_isolate),
+ toplevel_compilation_unit_(toplevel_compilation_unit) {}
void Compile();
@@ -41,8 +44,9 @@ class MaglevCompiler {
return toplevel_compilation_unit_->broker();
}
Zone* zone() { return toplevel_compilation_unit_->zone(); }
- Isolate* isolate() { return toplevel_compilation_unit_->isolate(); }
+ LocalIsolate* local_isolate() { return local_isolate_; }
+ LocalIsolate* const local_isolate_;
MaglevCompilationUnit* const toplevel_compilation_unit_;
};
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
index 762de2455a..0c001f4e5f 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
@@ -4,6 +4,7 @@
#include "src/maglev/maglev-concurrent-dispatcher.h"
+#include "src/codegen/compiler.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/isolate.h"
@@ -85,10 +86,8 @@ std::unique_ptr<MaglevCompilationJob> MaglevCompilationJob::New(
MaglevCompilationJob::MaglevCompilationJob(
std::unique_ptr<MaglevCompilationInfo>&& info)
- : OptimizedCompilationJob(nullptr, kMaglevCompilerName),
+ : OptimizedCompilationJob(kMaglevCompilerName, State::kReadyToPrepare),
info_(std::move(info)) {
- // TODO(jgruber, v8:7700): Remove the OptimizedCompilationInfo (which should
- // be renamed to TurbofanCompilationInfo) from OptimizedCompilationJob.
DCHECK(FLAG_maglev);
}
@@ -102,18 +101,26 @@ CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
RuntimeCallStats* stats, LocalIsolate* local_isolate) {
LocalIsolateScope scope{info(), local_isolate};
- maglev::MaglevCompiler::Compile(info()->toplevel_compilation_unit());
+ maglev::MaglevCompiler::Compile(local_isolate,
+ info()->toplevel_compilation_unit());
// TODO(v8:7700): Actual return codes.
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
- info()->set_codet(maglev::MaglevCompiler::GenerateCode(
- info()->toplevel_compilation_unit()));
- // TODO(v8:7700): Actual return codes.
+ Handle<CodeT> codet;
+ if (!maglev::MaglevCompiler::GenerateCode(info()->toplevel_compilation_unit())
+ .ToHandle(&codet)) {
+ return CompilationJob::FAILED;
+ }
+ info()->function()->set_code(*codet);
return CompilationJob::SUCCEEDED;
}
+Handle<JSFunction> MaglevCompilationJob::function() const {
+ return info_->function();
+}
+
// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
// processing the incoming queue on a worker thread.
class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
@@ -134,8 +141,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
CHECK_EQ(status, CompilationJob::SUCCEEDED);
outgoing_queue()->Enqueue(std::move(job));
}
- // TODO(v8:7700):
- // isolate_->stack_guard()->RequestInstallMaglevCode();
+ isolate()->stack_guard()->RequestInstallMaglevCode();
}
size_t GetMaxConcurrency(size_t) const override {
@@ -180,12 +186,16 @@ void MaglevConcurrentDispatcher::EnqueueJob(
}
void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
+ HandleScope handle_scope(isolate_);
while (!outgoing_queue_.IsEmpty()) {
std::unique_ptr<MaglevCompilationJob> job;
outgoing_queue_.Dequeue(&job);
CompilationJob::Status status = job->FinalizeJob(isolate_);
- // TODO(v8:7700): Use the result.
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ // TODO(v8:7700): Use the result and check if job succeed
+ // when all the bytecodes are implemented.
+ if (status == CompilationJob::SUCCEEDED) {
+ Compiler::FinalizeMaglevCompilationJob(job.get(), isolate_);
+ }
}
}
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
index 0b2a086e5a..fa0e40ac09 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
@@ -21,6 +21,13 @@ namespace maglev {
class MaglevCompilationInfo;
+// TODO(v8:7700): While basic infrastructure now exists, there are many TODOs
+// that should still be addressed soon:
+// - Full tracing support through --trace-opt.
+// - Concurrent codegen.
+// - Concurrent Code object creation (optional?).
+// - Test support for concurrency (see %FinalizeOptimization).
+
// Exports needed functionality without exposing implementation details.
class ExportedMaglevCompilationInfo final {
public:
@@ -47,6 +54,8 @@ class MaglevCompilationJob final : public OptimizedCompilationJob {
LocalIsolate* local_isolate) override;
Status FinalizeJobImpl(Isolate* isolate) override;
+ Handle<JSFunction> function() const;
+
private:
explicit MaglevCompilationJob(std::unique_ptr<MaglevCompilationInfo>&& info);
diff --git a/deps/v8/src/maglev/maglev-graph-builder.cc b/deps/v8/src/maglev/maglev-graph-builder.cc
index b38bece1d5..c7026214cc 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.cc
+++ b/deps/v8/src/maglev/maglev-graph-builder.cc
@@ -4,12 +4,16 @@
#include "src/maglev/maglev-graph-builder.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
+#include "src/compiler/processed-feedback.h"
#include "src/handles/maybe-handles-inl.h"
-#include "src/ic/handler-configuration.h"
+#include "src/ic/handler-configuration-inl.h"
+#include "src/maglev/maglev-ir.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/name-inl.h"
+#include "src/objects/property-cell.h"
#include "src/objects/slots-inl.h"
namespace v8 {
@@ -17,8 +21,22 @@ namespace internal {
namespace maglev {
-MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
- : compilation_unit_(compilation_unit),
+namespace {
+
+int LoadSimpleFieldHandler(FieldIndex field_index) {
+ int config = LoadHandler::KindBits::encode(LoadHandler::Kind::kField) |
+ LoadHandler::IsInobjectBits::encode(field_index.is_inobject()) |
+ LoadHandler::IsDoubleBits::encode(field_index.is_double()) |
+ LoadHandler::FieldIndexBits::encode(field_index.index());
+ return config;
+}
+
+} // namespace
+
+MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* compilation_unit)
+ : local_isolate_(local_isolate),
+ compilation_unit_(compilation_unit),
iterator_(bytecode().object()),
jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
// Overallocate merge_states_ by one to allow always looking up the
@@ -69,8 +87,6 @@ MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
interpreter::Register new_target_or_generator_register =
bytecode().incoming_new_target_or_generator_register();
- const compiler::BytecodeLivenessState* liveness =
- bytecode_analysis().GetInLivenessFor(0);
int register_index = 0;
// TODO(leszeks): Don't emit if not needed.
ValueNode* undefined_value =
@@ -78,19 +94,16 @@ MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
if (new_target_or_generator_register.is_valid()) {
int new_target_index = new_target_or_generator_register.index();
for (; register_index < new_target_index; register_index++) {
- StoreRegister(interpreter::Register(register_index), undefined_value,
- liveness);
+ StoreRegister(interpreter::Register(register_index), undefined_value);
}
StoreRegister(
new_target_or_generator_register,
// TODO(leszeks): Expose in Graph.
- AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister),
- liveness);
+ AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister));
register_index++;
}
for (; register_index < register_count(); register_index++) {
- StoreRegister(interpreter::Register(register_index), undefined_value,
- liveness);
+ StoreRegister(interpreter::Register(register_index), undefined_value);
}
BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
@@ -109,37 +122,48 @@ MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
#define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \
void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); }
-template <Operation kOperation, typename... Args>
-ValueNode* MaglevGraphBuilder::AddNewOperationNode(
- std::initializer_list<ValueNode*> inputs, Args&&... args) {
- switch (kOperation) {
-#define CASE(Name) \
- case Operation::k##Name: \
- return AddNewNode<Generic##Name>(inputs, std::forward<Args>(args)...);
- OPERATION_LIST(CASE)
-#undef CASE
- }
-}
+namespace {
+template <Operation kOperation>
+struct NodeForOperationHelper;
+
+#define NODE_FOR_OPERATION_HELPER(Name) \
+ template <> \
+ struct NodeForOperationHelper<Operation::k##Name> { \
+ using generic_type = Generic##Name; \
+ };
+OPERATION_LIST(NODE_FOR_OPERATION_HELPER)
+#undef NODE_FOR_OPERATION_HELPER
+
+template <Operation kOperation>
+using GenericNodeForOperation =
+ typename NodeForOperationHelper<kOperation>::generic_type;
+} // namespace
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
FeedbackSlot slot_index = GetSlotOperand(0);
- ValueNode* value = GetAccumulator();
- ValueNode* node = AddNewOperationNode<kOperation>(
- {value}, compiler::FeedbackSource{feedback(), slot_index});
- SetAccumulator(node);
- MarkPossibleSideEffect();
+ ValueNode* value = GetAccumulatorTaggedValue();
+ SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
+ {value}, compiler::FeedbackSource{feedback(), slot_index}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericBinaryOperationNode() {
- ValueNode* left = LoadRegister(0);
+ ValueNode* left = LoadRegisterTaggedValue(0);
+ ValueNode* right = GetAccumulatorTaggedValue();
+ FeedbackSlot slot_index = GetSlotOperand(1);
+ SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
+ {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildGenericBinarySmiOperationNode() {
+ ValueNode* left = GetAccumulatorTaggedValue();
+ Smi constant = Smi::FromInt(iterator_.GetImmediateOperand(0));
+ ValueNode* right = AddNewNode<SmiConstant>({}, constant);
FeedbackSlot slot_index = GetSlotOperand(1);
- ValueNode* right = GetAccumulator();
- ValueNode* node = AddNewOperationNode<kOperation>(
- {left, right}, compiler::FeedbackSource{feedback(), slot_index});
- SetAccumulator(node);
- MarkPossibleSideEffect();
+ SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
+ {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
}
template <Operation kOperation>
@@ -150,11 +174,69 @@ void MaglevGraphBuilder::VisitUnaryOperation() {
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() {
+ FeedbackNexus nexus = FeedbackNexusForOperand(1);
+
+ if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ if (nexus.kind() == FeedbackSlotKind::kBinaryOp) {
+ BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
+
+ if (hint == BinaryOperationHint::kSignedSmall) {
+ ValueNode *left, *right;
+ if (IsRegisterEqualToAccumulator(0)) {
+ left = right = LoadRegisterSmiUntaggedValue(0);
+ } else {
+ left = LoadRegisterSmiUntaggedValue(0);
+ right = GetAccumulatorSmiUntaggedValue();
+ }
+
+ if (kOperation == Operation::kAdd) {
+ SetAccumulator(AddNewNode<Int32AddWithOverflow>({left, right}));
+ return;
+ }
+ }
+ }
+ }
+
// TODO(victorgomes): Use feedback info and create optimized versions.
BuildGenericBinaryOperationNode<kOperation>();
}
-void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); }
+template <Operation kOperation>
+void MaglevGraphBuilder::VisitBinarySmiOperation() {
+ FeedbackNexus nexus = FeedbackNexusForOperand(1);
+
+ if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ if (nexus.kind() == FeedbackSlotKind::kBinaryOp) {
+ BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
+
+ if (hint == BinaryOperationHint::kSignedSmall) {
+ ValueNode* left = GetAccumulatorSmiUntaggedValue();
+ int32_t constant = iterator_.GetImmediateOperand(0);
+
+ if (kOperation == Operation::kAdd) {
+ if (constant == 0) {
+ // For addition of zero, when the accumulator passed the Smi check,
+ // it already has the right value, so we can just return.
+ return;
+ }
+ // TODO(victorgomes): We could create an Int32Add node that receives
+ // a constant and avoid a register move.
+ ValueNode* right = AddNewNode<Int32Constant>({}, constant);
+ SetAccumulator(AddNewNode<Int32AddWithOverflow>({left, right}));
+ return;
+ }
+ }
+ }
+ }
+
+ // TODO(victorgomes): Use feedback info and create optimized versions.
+ BuildGenericBinarySmiOperationNode<kOperation>();
+}
+
+void MaglevGraphBuilder::VisitLdar() {
+ MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
+ interpreter::Register::virtual_accumulator());
+}
void MaglevGraphBuilder::VisitLdaZero() {
SetAccumulator(AddNewNode<SmiConstant>({}, Smi::zero()));
@@ -178,20 +260,42 @@ void MaglevGraphBuilder::VisitLdaTrue() {
void MaglevGraphBuilder::VisitLdaFalse() {
SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kFalseValue));
}
-MAGLEV_UNIMPLEMENTED_BYTECODE(LdaConstant)
+void MaglevGraphBuilder::VisitLdaConstant() {
+ SetAccumulator(GetConstant(GetRefOperand<HeapObject>(0)));
+}
MAGLEV_UNIMPLEMENTED_BYTECODE(LdaContextSlot)
MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableContextSlot)
-MAGLEV_UNIMPLEMENTED_BYTECODE(LdaCurrentContextSlot)
-MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableCurrentContextSlot)
-void MaglevGraphBuilder::VisitStar() {
- StoreRegister(
- iterator_.GetRegisterOperand(0), GetAccumulator(),
- bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+void MaglevGraphBuilder::VisitLdaCurrentContextSlot() {
+ ValueNode* context = GetContext();
+ int slot_index = iterator_.GetIndexOperand(0);
+
+ // TODO(leszeks): Passing a LoadHandler to LoadField here is a bit of
+ // a hack, maybe we should have a LoadRawOffset or similar.
+ SetAccumulator(AddNewNode<LoadField>(
+ {context},
+ LoadSimpleFieldHandler(FieldIndex::ForInObjectOffset(
+ Context::OffsetOfElementAt(slot_index), FieldIndex::kTagged))));
}
+void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
+ // TODO(leszeks): Consider context specialising.
+ VisitLdaCurrentContextSlot();
+}
+void MaglevGraphBuilder::VisitStar() {
+ MoveNodeBetweenRegisters(interpreter::Register::virtual_accumulator(),
+ iterator_.GetRegisterOperand(0));
+}
+#define SHORT_STAR_VISITOR(Name, ...) \
+ void MaglevGraphBuilder::Visit##Name() { \
+ MoveNodeBetweenRegisters( \
+ interpreter::Register::virtual_accumulator(), \
+ interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name)); \
+ }
+SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
+#undef SHORT_STAR_VISITOR
+
void MaglevGraphBuilder::VisitMov() {
- StoreRegister(
- iterator_.GetRegisterOperand(1), LoadRegister(0),
- bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+ MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
+ iterator_.GetRegisterOperand(1));
}
MAGLEV_UNIMPLEMENTED_BYTECODE(PushContext)
MAGLEV_UNIMPLEMENTED_BYTECODE(PopContext)
@@ -200,6 +304,56 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndetectable)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestNull)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndefined)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestTypeOf)
+
+void MaglevGraphBuilder::BuildPropertyCellAccess(
+ const compiler::PropertyCellRef& property_cell) {
+ // TODO(leszeks): A bunch of this is copied from
+ // js-native-context-specialization.cc -- I wonder if we can unify it
+ // somehow.
+ bool was_cached = property_cell.Cache();
+ CHECK(was_cached);
+
+ compiler::ObjectRef property_cell_value = property_cell.value();
+ if (property_cell_value.IsTheHole()) {
+ // The property cell is no longer valid.
+ EmitUnconditionalDeopt();
+ return;
+ }
+
+ PropertyDetails property_details = property_cell.property_details();
+ PropertyCellType property_cell_type = property_details.cell_type();
+ DCHECK_EQ(PropertyKind::kData, property_details.kind());
+
+ if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+ SetAccumulator(GetConstant(property_cell_value));
+ return;
+ }
+
+ // Record a code dependency on the cell if we can benefit from the
+ // additional feedback, or the global property is configurable (i.e.
+ // can be deleted or reconfigured to an accessor property).
+ if (property_cell_type != PropertyCellType::kMutable ||
+ property_details.IsConfigurable()) {
+ broker()->dependencies()->DependOnGlobalProperty(property_cell);
+ }
+
+ // Load from constant/undefined global property can be constant-folded.
+ if (property_cell_type == PropertyCellType::kConstant ||
+ property_cell_type == PropertyCellType::kUndefined) {
+ SetAccumulator(GetConstant(property_cell_value));
+ return;
+ }
+
+ ValueNode* property_cell_node =
+ AddNewNode<Constant>({}, property_cell.AsHeapObject());
+ // TODO(leszeks): Padding a LoadHandler to LoadField here is a bit of
+ // a hack, maybe we should have a LoadRawOffset or similar.
+ SetAccumulator(AddNewNode<LoadField>(
+ {property_cell_node},
+ LoadSimpleFieldHandler(FieldIndex::ForInObjectOffset(
+ PropertyCell::kValueOffset, FieldIndex::kTagged))));
+}
+
void MaglevGraphBuilder::VisitLdaGlobal() {
// LdaGlobal <name_index> <slot>
@@ -207,13 +361,26 @@ void MaglevGraphBuilder::VisitLdaGlobal() {
static const int kSlotOperandIndex = 1;
compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
- FeedbackSlot slot_index = GetSlotOperand(kSlotOperandIndex);
- ValueNode* context = GetContext();
+ const compiler::ProcessedFeedback& access_feedback =
+ broker()->GetFeedbackForGlobalAccess(compiler::FeedbackSource(
+ feedback(), GetSlotOperand(kSlotOperandIndex)));
+
+ if (access_feedback.IsInsufficient()) {
+ EmitUnconditionalDeopt();
+ return;
+ }
+
+ const compiler::GlobalAccessFeedback& global_access_feedback =
+ access_feedback.AsGlobalAccess();
- USE(slot_index); // TODO(v8:7700): Use the feedback info.
+ if (global_access_feedback.IsPropertyCell()) {
+ BuildPropertyCellAccess(global_access_feedback.property_cell());
+ } else {
+ // TODO(leszeks): Handle the IsScriptContextSlot case.
- SetAccumulator(AddNewNode<LoadGlobal>({context}, name));
- MarkPossibleSideEffect();
+ ValueNode* context = GetContext();
+ SetAccumulator(AddNewNode<LoadGlobal>({context}, name));
+ }
}
MAGLEV_UNIMPLEMENTED_BYTECODE(LdaGlobalInsideTypeof)
MAGLEV_UNIMPLEMENTED_BYTECODE(StaGlobal)
@@ -228,35 +395,51 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupGlobalSlotInsideTypeof)
MAGLEV_UNIMPLEMENTED_BYTECODE(StaLookupSlot)
void MaglevGraphBuilder::VisitGetNamedProperty() {
// GetNamedProperty <object> <name_index> <slot>
- ValueNode* object = LoadRegister(0);
- FeedbackNexus nexus = feedback_nexus(2);
-
- if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
- EnsureCheckpoint();
- AddNewNode<SoftDeopt>({});
- } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
- std::vector<MapAndHandler> maps_and_handlers;
- nexus.ExtractMapsAndHandlers(&maps_and_handlers);
- DCHECK_EQ(maps_and_handlers.size(), 1);
- MapAndHandler& map_and_handler = maps_and_handlers[0];
- if (map_and_handler.second->IsSmi()) {
- int handler = map_and_handler.second->ToSmi().value();
- LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
- if (kind == LoadHandler::Kind::kField &&
- !LoadHandler::IsWasmStructBits::decode(handler)) {
- EnsureCheckpoint();
- AddNewNode<CheckMaps>({object},
- MakeRef(broker(), map_and_handler.first));
- SetAccumulator(AddNewNode<LoadField>({object}, handler));
- return;
+ ValueNode* object = LoadRegisterTaggedValue(0);
+ compiler::NameRef name = GetRefOperand<Name>(1);
+ FeedbackSlot slot = GetSlotOperand(2);
+ compiler::FeedbackSource feedback_source{feedback(), slot};
+
+ const compiler::ProcessedFeedback& processed_feedback =
+ broker()->GetFeedbackForPropertyAccess(feedback_source,
+ compiler::AccessMode::kLoad, name);
+
+ switch (processed_feedback.kind()) {
+ case compiler::ProcessedFeedback::kInsufficient:
+ EmitUnconditionalDeopt();
+ return;
+
+ case compiler::ProcessedFeedback::kNamedAccess: {
+ const compiler::NamedAccessFeedback& named_feedback =
+ processed_feedback.AsNamedAccess();
+ if (named_feedback.maps().size() == 1) {
+ // Monomorphic load, check the handler.
+ // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
+ MaybeObjectHandle handler =
+ FeedbackNexusForSlot(slot).FindHandlerForMap(
+ named_feedback.maps()[0].object());
+ if (!handler.is_null() && handler->IsSmi()) {
+ // Smi handler, emit a map check and LoadField.
+ int smi_handler = handler->ToSmi().value();
+ LoadHandler::Kind kind = LoadHandler::KindBits::decode(smi_handler);
+ if (kind == LoadHandler::Kind::kField &&
+ !LoadHandler::IsWasmStructBits::decode(smi_handler)) {
+ AddNewNode<CheckMaps>({object}, named_feedback.maps()[0]);
+ SetAccumulator(AddNewNode<LoadField>({object}, smi_handler));
+ return;
+ }
+ }
}
- }
+ } break;
+
+ default:
+ break;
}
+ // Create a generic load in the fallthrough.
ValueNode* context = GetContext();
- compiler::NameRef name = GetRefOperand<Name>(1);
- SetAccumulator(AddNewNode<LoadNamedGeneric>({context, object}, name));
- MarkPossibleSideEffect();
+ SetAccumulator(
+ AddNewNode<LoadNamedGeneric>({context, object}, name, feedback_source));
}
MAGLEV_UNIMPLEMENTED_BYTECODE(GetNamedPropertyFromSuper)
@@ -266,29 +449,44 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(StaModuleVariable)
void MaglevGraphBuilder::VisitSetNamedProperty() {
// SetNamedProperty <object> <name_index> <slot>
- ValueNode* object = LoadRegister(0);
- FeedbackNexus nexus = feedback_nexus(2);
-
- if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
- EnsureCheckpoint();
- AddNewNode<SoftDeopt>({});
- } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
- std::vector<MapAndHandler> maps_and_handlers;
- nexus.ExtractMapsAndHandlers(&maps_and_handlers);
- DCHECK_EQ(maps_and_handlers.size(), 1);
- MapAndHandler& map_and_handler = maps_and_handlers[0];
- if (map_and_handler.second->IsSmi()) {
- int handler = map_and_handler.second->ToSmi().value();
- StoreHandler::Kind kind = StoreHandler::KindBits::decode(handler);
- if (kind == StoreHandler::Kind::kField) {
- EnsureCheckpoint();
- AddNewNode<CheckMaps>({object},
- MakeRef(broker(), map_and_handler.first));
- ValueNode* value = GetAccumulator();
- AddNewNode<StoreField>({object, value}, handler);
- return;
+ ValueNode* object = LoadRegisterTaggedValue(0);
+ compiler::NameRef name = GetRefOperand<Name>(1);
+ FeedbackSlot slot = GetSlotOperand(2);
+ compiler::FeedbackSource feedback_source{feedback(), slot};
+
+ const compiler::ProcessedFeedback& processed_feedback =
+ broker()->GetFeedbackForPropertyAccess(
+ feedback_source, compiler::AccessMode::kStore, name);
+
+ switch (processed_feedback.kind()) {
+ case compiler::ProcessedFeedback::kInsufficient:
+ EmitUnconditionalDeopt();
+ return;
+
+ case compiler::ProcessedFeedback::kNamedAccess: {
+ const compiler::NamedAccessFeedback& named_feedback =
+ processed_feedback.AsNamedAccess();
+ if (named_feedback.maps().size() == 1) {
+ // Monomorphic store, check the handler.
+ // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
+ MaybeObjectHandle handler =
+ FeedbackNexusForSlot(slot).FindHandlerForMap(
+ named_feedback.maps()[0].object());
+ if (!handler.is_null() && handler->IsSmi()) {
+ int smi_handler = handler->ToSmi().value();
+ StoreHandler::Kind kind = StoreHandler::KindBits::decode(smi_handler);
+ if (kind == StoreHandler::Kind::kField) {
+ AddNewNode<CheckMaps>({object}, named_feedback.maps()[0]);
+ ValueNode* value = GetAccumulatorTaggedValue();
+ AddNewNode<StoreField>({object, value}, smi_handler);
+ return;
+ }
+ }
}
- }
+ } break;
+
+ default:
+ break;
}
// TODO(victorgomes): Generic store.
@@ -337,18 +535,42 @@ void MaglevGraphBuilder::VisitShiftRightLogical() {
VisitBinaryOperation<Operation::kShiftRightLogical>();
}
-MAGLEV_UNIMPLEMENTED_BYTECODE(AddSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(SubSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(MulSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(DivSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ModSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ExpSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseOrSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseXorSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAndSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeftSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogicalSmi)
+void MaglevGraphBuilder::VisitAddSmi() {
+ VisitBinarySmiOperation<Operation::kAdd>();
+}
+void MaglevGraphBuilder::VisitSubSmi() {
+ VisitBinarySmiOperation<Operation::kSubtract>();
+}
+void MaglevGraphBuilder::VisitMulSmi() {
+ VisitBinarySmiOperation<Operation::kMultiply>();
+}
+void MaglevGraphBuilder::VisitDivSmi() {
+ VisitBinarySmiOperation<Operation::kDivide>();
+}
+void MaglevGraphBuilder::VisitModSmi() {
+ VisitBinarySmiOperation<Operation::kModulus>();
+}
+void MaglevGraphBuilder::VisitExpSmi() {
+ VisitBinarySmiOperation<Operation::kExponentiate>();
+}
+void MaglevGraphBuilder::VisitBitwiseOrSmi() {
+ VisitBinarySmiOperation<Operation::kBitwiseOr>();
+}
+void MaglevGraphBuilder::VisitBitwiseXorSmi() {
+ VisitBinarySmiOperation<Operation::kBitwiseXor>();
+}
+void MaglevGraphBuilder::VisitBitwiseAndSmi() {
+ VisitBinarySmiOperation<Operation::kBitwiseAnd>();
+}
+void MaglevGraphBuilder::VisitShiftLeftSmi() {
+ VisitBinarySmiOperation<Operation::kShiftLeft>();
+}
+void MaglevGraphBuilder::VisitShiftRightSmi() {
+ VisitBinarySmiOperation<Operation::kShiftRight>();
+}
+void MaglevGraphBuilder::VisitShiftRightLogicalSmi() {
+ VisitBinarySmiOperation<Operation::kShiftRightLogical>();
+}
void MaglevGraphBuilder::VisitInc() {
VisitUnaryOperation<Operation::kIncrement>();
@@ -369,59 +591,95 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(TypeOf)
MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertyStrict)
MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertySloppy)
MAGLEV_UNIMPLEMENTED_BYTECODE(GetSuperConstructor)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallAnyReceiver)
-// TODO(leszeks): For all of these:
-// a) Read feedback and implement inlining
-// b) Wrap in a helper.
-void MaglevGraphBuilder::VisitCallProperty() {
- ValueNode* function = LoadRegister(0);
+// TODO(v8:7700): Read feedback and implement inlining
+void MaglevGraphBuilder::BuildCallFromRegisterList(
+ ConvertReceiverMode receiver_mode) {
+ ValueNode* function = LoadRegisterTaggedValue(0);
interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
ValueNode* context = GetContext();
- static constexpr int kTheContext = 1;
- CallProperty* call_property = AddNewNode<CallProperty>(
- args.register_count() + kTheContext, function, context);
- // TODO(leszeks): Move this for loop into the CallProperty constructor,
- // pre-size the args array.
+ size_t input_count = args.register_count() + Call::kFixedInputCount;
+
+ RootConstant* undefined_constant;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // The undefined constant node has to be created before the call node.
+ undefined_constant =
+ AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
+ input_count++;
+ }
+
+ Call* call = AddNewNode<Call>(input_count, receiver_mode, function, context);
+ int arg_index = 0;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ call->set_arg(arg_index++, undefined_constant);
+ }
for (int i = 0; i < args.register_count(); ++i) {
- call_property->set_arg(i, current_interpreter_frame_.get(args[i]));
+ call->set_arg(arg_index++, current_interpreter_frame_.get(args[i]));
}
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+
+ SetAccumulator(call);
}
-void MaglevGraphBuilder::VisitCallProperty0() {
- ValueNode* function = LoadRegister(0);
+
+void MaglevGraphBuilder::BuildCallFromRegisters(
+ int argc_count, ConvertReceiverMode receiver_mode) {
+ DCHECK_LE(argc_count, 2);
+ ValueNode* function = LoadRegisterTaggedValue(0);
ValueNode* context = GetContext();
- CallProperty* call_property =
- AddNewNode<CallProperty>({function, context, LoadRegister(1)});
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+ int argc_count_with_recv = argc_count + 1;
+ size_t input_count = argc_count_with_recv + Call::kFixedInputCount;
+
+ // The undefined constant node has to be created before the call node.
+ RootConstant* undefined_constant;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ undefined_constant =
+ AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
+ }
+
+ Call* call = AddNewNode<Call>(input_count, receiver_mode, function, context);
+ int arg_index = 0;
+ int reg_count = argc_count_with_recv;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ reg_count = argc_count;
+ call->set_arg(arg_index++, undefined_constant);
+ }
+ for (int i = 0; i < reg_count; i++) {
+ call->set_arg(arg_index++, LoadRegisterTaggedValue(i + 1));
+ }
+
+ SetAccumulator(call);
}
-void MaglevGraphBuilder::VisitCallProperty1() {
- ValueNode* function = LoadRegister(0);
- ValueNode* context = GetContext();
- CallProperty* call_property = AddNewNode<CallProperty>(
- {function, context, LoadRegister(1), LoadRegister(2)});
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+void MaglevGraphBuilder::VisitCallAnyReceiver() {
+ BuildCallFromRegisterList(ConvertReceiverMode::kAny);
+}
+void MaglevGraphBuilder::VisitCallProperty() {
+ BuildCallFromRegisterList(ConvertReceiverMode::kNotNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallProperty0() {
+ BuildCallFromRegisters(0, ConvertReceiverMode::kNotNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallProperty1() {
+ BuildCallFromRegisters(1, ConvertReceiverMode::kNotNullOrUndefined);
}
void MaglevGraphBuilder::VisitCallProperty2() {
- ValueNode* function = LoadRegister(0);
- ValueNode* context = GetContext();
-
- CallProperty* call_property = AddNewNode<CallProperty>(
- {function, context, LoadRegister(1), LoadRegister(2), LoadRegister(3)});
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+ BuildCallFromRegisters(2, ConvertReceiverMode::kNotNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver() {
+ BuildCallFromRegisterList(ConvertReceiverMode::kNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver0() {
+ BuildCallFromRegisters(0, ConvertReceiverMode::kNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver1() {
+ BuildCallFromRegisters(1, ConvertReceiverMode::kNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver2() {
+ BuildCallFromRegisters(2, ConvertReceiverMode::kNullOrUndefined);
}
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver0)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver1)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver2)
+
MAGLEV_UNIMPLEMENTED_BYTECODE(CallWithSpread)
MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntime)
MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntimeForPair)
@@ -429,9 +687,13 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(CallJSRuntime)
MAGLEV_UNIMPLEMENTED_BYTECODE(InvokeIntrinsic)
MAGLEV_UNIMPLEMENTED_BYTECODE(Construct)
MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
-MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqual)
-MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqualStrict)
+void MaglevGraphBuilder::VisitTestEqual() {
+ VisitBinaryOperation<Operation::kEqual>();
+}
+void MaglevGraphBuilder::VisitTestEqualStrict() {
+ VisitBinaryOperation<Operation::kStrictEqual>();
+}
void MaglevGraphBuilder::VisitTestLessThan() {
VisitBinaryOperation<Operation::kLessThan>();
}
@@ -531,8 +793,6 @@ void MaglevGraphBuilder::MergeIntoFrameState(BasicBlock* predecessor,
void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
int false_target) {
- // TODO(verwaest): Materialize true/false in the respective environments.
- if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
BasicBlock* block = FinishBlock<BranchIfTrue>(next_offset(), {node},
&jump_targets_[true_target],
&jump_targets_[false_target]);
@@ -541,27 +801,25 @@ void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
int true_target,
int false_target) {
- // TODO(verwaest): Materialize true/false in the respective environments.
- if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
BasicBlock* block = FinishBlock<BranchIfToBooleanTrue>(
next_offset(), {node}, &jump_targets_[true_target],
&jump_targets_[false_target]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
- BuildBranchIfToBooleanTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
- next_offset());
+ BuildBranchIfToBooleanTrue(GetAccumulatorTaggedValue(),
+ iterator_.GetJumpTargetOffset(), next_offset());
}
void MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
- BuildBranchIfToBooleanTrue(GetAccumulator(), next_offset(),
+ BuildBranchIfToBooleanTrue(GetAccumulatorTaggedValue(), next_offset(),
iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::VisitJumpIfTrue() {
- BuildBranchIfTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
- next_offset());
+ BuildBranchIfTrue(GetAccumulatorTaggedValue(),
+ iterator_.GetJumpTargetOffset(), next_offset());
}
void MaglevGraphBuilder::VisitJumpIfFalse() {
- BuildBranchIfTrue(GetAccumulator(), next_offset(),
+ BuildBranchIfTrue(GetAccumulatorTaggedValue(), next_offset(),
iterator_.GetJumpTargetOffset());
}
MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNull)
@@ -580,7 +838,7 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(SetPendingMessage)
MAGLEV_UNIMPLEMENTED_BYTECODE(Throw)
MAGLEV_UNIMPLEMENTED_BYTECODE(ReThrow)
void MaglevGraphBuilder::VisitReturn() {
- FinishBlock<Return>(next_offset(), {GetAccumulator()});
+ FinishBlock<Return>(next_offset(), {GetAccumulatorTaggedValue()});
}
MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowReferenceErrorIfHole)
MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowSuperNotCalledIfHole)
@@ -593,15 +851,6 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(GetIterator)
MAGLEV_UNIMPLEMENTED_BYTECODE(Debugger)
MAGLEV_UNIMPLEMENTED_BYTECODE(IncBlockCounter)
MAGLEV_UNIMPLEMENTED_BYTECODE(Abort)
-#define SHORT_STAR_VISITOR(Name, ...) \
- void MaglevGraphBuilder::Visit##Name() { \
- StoreRegister( \
- interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
- GetAccumulator(), \
- bytecode_analysis().GetOutLivenessFor(iterator_.current_offset())); \
- }
-SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
-#undef SHORT_STAR_VISITOR
void MaglevGraphBuilder::VisitWide() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitExtraWide() { UNREACHABLE(); }
diff --git a/deps/v8/src/maglev/maglev-graph-builder.h b/deps/v8/src/maglev/maglev-graph-builder.h
index da86b80841..80fe3df504 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.h
+++ b/deps/v8/src/maglev/maglev-graph-builder.h
@@ -7,10 +7,12 @@
#include <type_traits>
+#include "src/base/optional.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph.h"
@@ -23,7 +25,8 @@ namespace maglev {
class MaglevGraphBuilder {
public:
- explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit);
+ explicit MaglevGraphBuilder(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* compilation_unit);
void Build() {
for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
@@ -31,6 +34,14 @@ class MaglevGraphBuilder {
// TODO(v8:7700): Clean up after all bytecodes are supported.
if (found_unsupported_bytecode()) break;
}
+
+ // During InterpreterFrameState merge points, we might emit CheckedSmiTags
+ // and add them unsafely to the basic blocks. This addition might break a
+ // list invariant (namely `tail_` might not point to the last element).
+ // We revalidate this invariant here in all basic blocks.
+ for (BasicBlock* block : *graph_) {
+ block->nodes().RevalidateTail();
+ }
}
Graph* graph() const { return graph_; }
@@ -61,6 +72,11 @@ class MaglevGraphBuilder {
BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset();
while (old_jump_targets != nullptr) {
BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
+ if (predecessor == nullptr) {
+ // We can have null predecessors if the predecessor is dead.
+ predecessor_index--;
+ continue;
+ }
ControlNode* control = predecessor->control_node();
if (control->Is<ConditionalControlNode>()) {
// CreateEmptyBlock automatically registers itself with the offset.
@@ -94,11 +110,60 @@ class MaglevGraphBuilder {
}
}
+ // Return true if the given offset is a merge point, i.e. there are jumps
+ // targetting it.
+ bool IsOffsetAMergePoint(int offset) {
+ return merge_states_[offset] != nullptr;
+ }
+
+ // Called when a block is killed by an unconditional eager deopt.
+ void EmitUnconditionalDeopt() {
+ // Create a block rather than calling finish, since we don't yet know the
+ // next block's offset before the loop skipping the rest of the bytecodes.
+ BasicBlock* block = CreateBlock<Deopt>({});
+ ResolveJumpsToBlockAtOffset(block, block_offset_);
+
+ // Skip any bytecodes remaining in the block, up to the next merge point.
+ while (!IsOffsetAMergePoint(iterator_.next_offset())) {
+ iterator_.Advance();
+ if (iterator_.done()) break;
+ }
+
+ // If there is control flow out of this block, we need to kill the merges
+ // into the control flow targets.
+ interpreter::Bytecode bytecode = iterator_.current_bytecode();
+ if (interpreter::Bytecodes::IsForwardJump(bytecode)) {
+ // Jumps merge into their target, and conditional jumps also merge into
+ // the fallthrough.
+ merge_states_[iterator_.GetJumpTargetOffset()]->MergeDead();
+ if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
+ merge_states_[iterator_.next_offset()]->MergeDead();
+ }
+ } else if (bytecode == interpreter::Bytecode::kJumpLoop) {
+ // JumpLoop merges into its loop header, which has to be treated specially
+ // by the merge..
+ merge_states_[iterator_.GetJumpTargetOffset()]->MergeDeadLoop();
+ } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
+ // Switches merge into their targets, and into the fallthrough.
+ for (auto offset : iterator_.GetJumpTableTargetOffsets()) {
+ merge_states_[offset.target_offset]->MergeDead();
+ }
+ merge_states_[iterator_.next_offset()]->MergeDead();
+ } else if (!interpreter::Bytecodes::Returns(bytecode) &&
+ !interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
+ // Any other bytecode that doesn't return or throw will merge into the
+ // fallthrough.
+ merge_states_[iterator_.next_offset()]->MergeDead();
+ }
+ }
+
void VisitSingleBytecode() {
int offset = iterator_.current_offset();
if (V8_UNLIKELY(merge_states_[offset] != nullptr)) {
if (current_block_ != nullptr) {
- DCHECK(!current_block_->nodes().is_empty());
+ // TODO(leszeks): Re-evaluate this DCHECK, we might hit it if the only
+ // bytecodes in this basic block were only register juggling.
+ // DCHECK(!current_block_->nodes().is_empty());
FinishBlock<Jump>(offset, {}, &jump_targets_[offset]);
merge_states_[offset]->Merge(*compilation_unit_,
@@ -109,6 +174,10 @@ class MaglevGraphBuilder {
StartNewBlock(offset);
}
DCHECK_NOT_NULL(current_block_);
+#ifdef DEBUG
+ // Clear new nodes for the next VisitFoo
+ new_nodes_.clear();
+#endif
switch (iterator_.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
@@ -125,37 +194,41 @@ class MaglevGraphBuilder {
template <typename NodeT>
NodeT* AddNode(NodeT* node) {
+ if (node->properties().is_required_when_unused()) {
+ MarkPossibleSideEffect();
+ }
current_block_->nodes().Add(node);
- return node;
- }
-
- template <typename NodeT, typename... Args>
- NodeT* NewNode(size_t input_count, Args&&... args) {
- NodeT* node =
- Node::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+#ifdef DEBUG
+ new_nodes_.insert(node);
+#endif
return node;
}
- template <Operation kOperation, typename... Args>
- ValueNode* AddNewOperationNode(std::initializer_list<ValueNode*> inputs,
- Args&&... args);
-
template <typename NodeT, typename... Args>
NodeT* AddNewNode(size_t input_count, Args&&... args) {
- return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...));
+ return AddNode(
+ CreateNewNode<NodeT>(input_count, std::forward<Args>(args)...));
}
template <typename NodeT, typename... Args>
- NodeT* NewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
- NodeT* node = Node::New<NodeT>(zone(), inputs, std::forward<Args>(args)...);
- if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
- return node;
+ NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ return AddNode(CreateNewNode<NodeT>(inputs, std::forward<Args>(args)...));
}
template <typename NodeT, typename... Args>
- NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
- return AddNode(NewNode<NodeT>(inputs, std::forward<Args>(args)...));
+ NodeT* CreateNewNode(Args&&... args) {
+ if constexpr (NodeT::kProperties.can_eager_deopt()) {
+ return NodeBase::New<NodeT>(zone(), *compilation_unit_,
+ GetLatestCheckpointedState(),
+ std::forward<Args>(args)...);
+ } else if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ return NodeBase::New<NodeT>(zone(), *compilation_unit_,
+ GetCheckpointedStateForLazyDeopt(),
+ std::forward<Args>(args)...);
+ } else {
+ return NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
+ }
}
ValueNode* GetContext() const {
@@ -170,48 +243,130 @@ class MaglevGraphBuilder {
template <class T, typename = std::enable_if_t<
std::is_convertible<T*, Object*>::value>>
typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) {
- return MakeRef(broker(),
- Handle<T>::cast(iterator_.GetConstantForIndexOperand(
- operand_index, isolate())));
+ // The BytecodeArray itself was fetched by using a barrier so all reads
+ // from the constant pool are safe.
+ return MakeRefAssumeMemoryFence(
+ broker(), broker()->CanonicalPersistentHandle(
+ Handle<T>::cast(iterator_.GetConstantForIndexOperand(
+ operand_index, local_isolate()))));
+ }
+
+ ValueNode* GetConstant(const compiler::ObjectRef& ref) {
+ if (ref.IsSmi()) {
+ return AddNewNode<SmiConstant>({}, Smi::FromInt(ref.AsSmi()));
+ }
+ // TODO(leszeks): Detect roots and use RootConstant.
+ return AddNewNode<Constant>({}, ref.AsHeapObject());
+ }
+
+ // Move an existing ValueNode between two registers. You can pass
+ // virtual_accumulator as the src or dst to move in or out of the accumulator.
+ void MoveNodeBetweenRegisters(interpreter::Register src,
+ interpreter::Register dst) {
+ // We shouldn't be moving newly created nodes between registers.
+ DCHECK_EQ(0, new_nodes_.count(current_interpreter_frame_.get(src)));
+ DCHECK_NOT_NULL(current_interpreter_frame_.get(src));
+
+ current_interpreter_frame_.set(dst, current_interpreter_frame_.get(src));
+ }
+
+ ValueNode* GetTaggedValue(interpreter::Register reg) {
+ // TODO(victorgomes): Add the representation (Tagged/Untagged) in the
+ // InterpreterFrameState, so that we don't need to derefence a node.
+ ValueNode* value = current_interpreter_frame_.get(reg);
+ if (!value->is_untagged_value()) return value;
+ if (value->Is<CheckedSmiUntag>()) {
+ return value->input(0).node();
+ }
+ DCHECK(value->Is<Int32AddWithOverflow>() || value->Is<Int32Constant>());
+ ValueNode* tagged = AddNewNode<CheckedSmiTag>({value});
+ current_interpreter_frame_.set(reg, tagged);
+ return tagged;
}
- void SetAccumulator(ValueNode* node) {
- current_interpreter_frame_.set_accumulator(node);
+ ValueNode* GetSmiUntaggedValue(interpreter::Register reg) {
+ // TODO(victorgomes): Add the representation (Tagged/Untagged) in the
+ // InterpreterFrameState, so that we don't need to derefence a node.
+ ValueNode* value = current_interpreter_frame_.get(reg);
+ if (value->is_untagged_value()) return value;
+ if (value->Is<CheckedSmiTag>()) return value->input(0).node();
+ // Untag any other value.
+ ValueNode* untagged = AddNewNode<CheckedSmiUntag>({value});
+ current_interpreter_frame_.set(reg, untagged);
+ return untagged;
}
- ValueNode* GetAccumulator() const {
- return current_interpreter_frame_.accumulator();
+ ValueNode* GetAccumulatorTaggedValue() {
+ return GetTaggedValue(interpreter::Register::virtual_accumulator());
}
- ValueNode* LoadRegister(int operand_index) {
+ ValueNode* GetAccumulatorSmiUntaggedValue() {
+ return GetSmiUntaggedValue(interpreter::Register::virtual_accumulator());
+ }
+
+ bool IsRegisterEqualToAccumulator(int operand_index) {
interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
- return current_interpreter_frame_.get(source);
+ return current_interpreter_frame_.get(source) ==
+ current_interpreter_frame_.accumulator();
}
- void StoreRegister(interpreter::Register target, ValueNode* value,
- const compiler::BytecodeLivenessState* liveness) {
- if (target.index() >= 0 && !liveness->RegisterIsLive(target.index())) {
- return;
- }
+ ValueNode* LoadRegisterTaggedValue(int operand_index) {
+ return GetTaggedValue(iterator_.GetRegisterOperand(operand_index));
+ }
+
+ ValueNode* LoadRegisterSmiUntaggedValue(int operand_index) {
+ return GetSmiUntaggedValue(iterator_.GetRegisterOperand(operand_index));
+ }
+
+ template <typename NodeT>
+ void SetAccumulator(NodeT* node) {
+ // Accumulator stores are equivalent to stores to the virtual accumulator
+ // register.
+ StoreRegister(interpreter::Register::virtual_accumulator(), node);
+ }
+
+ template <typename NodeT>
+ void StoreRegister(interpreter::Register target, NodeT* value) {
+ // We should only set register values to nodes that were newly created in
+ // this Visit. Existing nodes should be moved between registers with
+ // MoveNodeBetweenRegisters.
+ DCHECK_NE(0, new_nodes_.count(value));
+ MarkAsLazyDeoptResult(value, target);
current_interpreter_frame_.set(target, value);
- AddNewNode<StoreToFrame>({}, value, target);
}
- void AddCheckpoint() {
- // TODO(v8:7700): Verify this calls the initializer list overload.
- AddNewNode<Checkpoint>({}, iterator_.current_offset(),
- GetInLiveness()->AccumulatorIsLive(),
- GetAccumulator());
- has_valid_checkpoint_ = true;
+ CheckpointedInterpreterState GetLatestCheckpointedState() {
+ if (!latest_checkpointed_state_) {
+ latest_checkpointed_state_.emplace(
+ BytecodeOffset(iterator_.current_offset()),
+ zone()->New<CompactInterpreterFrameState>(
+ *compilation_unit_, GetInLiveness(), current_interpreter_frame_));
+ }
+ return *latest_checkpointed_state_;
}
- void EnsureCheckpoint() {
- if (!has_valid_checkpoint_) AddCheckpoint();
+ CheckpointedInterpreterState GetCheckpointedStateForLazyDeopt() {
+ return CheckpointedInterpreterState(
+ BytecodeOffset(iterator_.current_offset()),
+ zone()->New<CompactInterpreterFrameState>(
+ *compilation_unit_, GetOutLiveness(), current_interpreter_frame_));
+ }
+
+ template <typename NodeT>
+ void MarkAsLazyDeoptResult(NodeT* value,
+ interpreter::Register result_location) {
+ DCHECK_EQ(NodeT::kProperties.can_lazy_deopt(),
+ value->properties().can_lazy_deopt());
+ if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ DCHECK(result_location.is_valid());
+ DCHECK(!value->lazy_deopt_info()->result_location.is_valid());
+ value->lazy_deopt_info()->result_location = result_location;
+ }
}
void MarkPossibleSideEffect() {
// If there was a potential side effect, invalidate the previous checkpoint.
- has_valid_checkpoint_ = false;
+ latest_checkpointed_state_.reset();
}
int next_offset() const {
@@ -233,8 +388,8 @@ class MaglevGraphBuilder {
template <typename ControlNodeT, typename... Args>
BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
- current_block_->set_control_node(NodeBase::New<ControlNodeT>(
- zone(), control_inputs, std::forward<Args>(args)...));
+ current_block_->set_control_node(CreateNewNode<ControlNodeT>(
+ control_inputs, std::forward<Args>(args)...));
BasicBlock* block = current_block_;
current_block_ = nullptr;
@@ -246,51 +401,65 @@ class MaglevGraphBuilder {
return block;
}
+ // Update all jumps which were targetting the not-yet-created block at the
+ // given `block_offset`, to now point to the given `block`.
+ void ResolveJumpsToBlockAtOffset(BasicBlock* block, int block_offset) const {
+ BasicBlockRef* jump_target_refs_head =
+ jump_targets_[block_offset].SetToBlockAndReturnNext(block);
+ while (jump_target_refs_head != nullptr) {
+ jump_target_refs_head =
+ jump_target_refs_head->SetToBlockAndReturnNext(block);
+ }
+ DCHECK_EQ(jump_targets_[block_offset].block_ptr(), block);
+ }
+
template <typename ControlNodeT, typename... Args>
BasicBlock* FinishBlock(int next_block_offset,
std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
BasicBlock* block =
CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
-
- // Resolve pointers to this basic block.
- BasicBlockRef* jump_target_refs_head =
- jump_targets_[block_offset_].SetToBlockAndReturnNext(block);
- while (jump_target_refs_head != nullptr) {
- jump_target_refs_head =
- jump_target_refs_head->SetToBlockAndReturnNext(block);
- }
- DCHECK_EQ(jump_targets_[block_offset_].block_ptr(), block);
+ ResolveJumpsToBlockAtOffset(block, block_offset_);
// If the next block has merge states, then it's not a simple fallthrough,
// and we should reset the checkpoint validity.
if (merge_states_[next_block_offset] != nullptr) {
- has_valid_checkpoint_ = false;
+ latest_checkpointed_state_.reset();
}
// Start a new block for the fallthrough path, unless it's a merge point, in
// which case we merge our state into it. That merge-point could also be a
// loop header, in which case the merge state might not exist yet (if the
// only predecessors are this path and the JumpLoop).
+ DCHECK_NULL(current_block_);
if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
if (NumPredecessors(next_block_offset) == 1) {
StartNewBlock(next_block_offset);
} else {
- DCHECK_NULL(current_block_);
MergeIntoFrameState(block, next_block_offset);
}
}
return block;
}
+ void BuildCallFromRegisterList(ConvertReceiverMode receiver_mode);
+ void BuildCallFromRegisters(int argc_count,
+ ConvertReceiverMode receiver_mode);
+
+ void BuildPropertyCellAccess(const compiler::PropertyCellRef& property_cell);
+
template <Operation kOperation>
void BuildGenericUnaryOperationNode();
template <Operation kOperation>
void BuildGenericBinaryOperationNode();
+ template <Operation kOperation>
+ void BuildGenericBinarySmiOperationNode();
template <Operation kOperation>
void VisitUnaryOperation();
template <Operation kOperation>
void VisitBinaryOperation();
+ template <Operation kOperation>
+ void VisitBinarySmiOperation();
void MergeIntoFrameState(BasicBlock* block, int target);
void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
@@ -332,10 +501,14 @@ class MaglevGraphBuilder {
const compiler::FeedbackVectorRef& feedback() const {
return compilation_unit_->feedback();
}
- const FeedbackNexus feedback_nexus(int slot_operand_index) const {
- // TODO(leszeks): Use JSHeapBroker here.
+ const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const {
return FeedbackNexus(feedback().object(),
- GetSlotOperand(slot_operand_index));
+ GetSlotOperand(slot_operand_index),
+ broker()->feedback_nexus_config());
+ }
+ const FeedbackNexus FeedbackNexusForSlot(FeedbackSlot slot) const {
+ return FeedbackNexus(feedback().object(), slot,
+ broker()->feedback_nexus_config());
}
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode();
@@ -343,7 +516,7 @@ class MaglevGraphBuilder {
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis();
}
- Isolate* isolate() const { return compilation_unit_->isolate(); }
+ LocalIsolate* local_isolate() const { return local_isolate_; }
Zone* zone() const { return compilation_unit_->zone(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
int register_count() const { return compilation_unit_->register_count(); }
@@ -354,6 +527,7 @@ class MaglevGraphBuilder {
return compilation_unit_->graph_labeller();
}
+ LocalIsolate* const local_isolate_;
MaglevCompilationUnit* const compilation_unit_;
interpreter::BytecodeArrayIterator iterator_;
uint32_t* predecessors_;
@@ -361,7 +535,7 @@ class MaglevGraphBuilder {
// Current block information.
BasicBlock* current_block_ = nullptr;
int block_offset_ = 0;
- bool has_valid_checkpoint_ = false;
+ base::Optional<CheckpointedInterpreterState> latest_checkpointed_state_;
BasicBlockRef* jump_targets_;
MergePointInterpreterFrameState** merge_states_;
@@ -374,6 +548,10 @@ class MaglevGraphBuilder {
// TODO(v8:7700): Clean up after all bytecodes are supported.
bool found_unsupported_bytecode_ = false;
bool this_field_will_be_unused_once_all_bytecodes_are_supported_;
+
+#ifdef DEBUG
+ std::unordered_set<Node*> new_nodes_;
+#endif
};
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-graph-printer.cc b/deps/v8/src/maglev/maglev-graph-printer.cc
index ccd7bfbad8..61bdc8e417 100644
--- a/deps/v8/src/maglev/maglev-graph-printer.cc
+++ b/deps/v8/src/maglev/maglev-graph-printer.cc
@@ -211,8 +211,8 @@ int MaglevPrintingVisitorOstream::overflow(int c) {
MaglevPrintingVisitor::MaglevPrintingVisitor(std::ostream& os)
: os_(os),
- os_for_additional_info_(new MaglevPrintingVisitorOstream(os_, &targets)) {
-}
+ os_for_additional_info_(
+ new MaglevPrintingVisitorOstream(os_, &targets_)) {}
void MaglevPrintingVisitor::PreProcessGraph(
MaglevCompilationUnit* compilation_unit, Graph* graph) {
@@ -221,7 +221,7 @@ void MaglevPrintingVisitor::PreProcessGraph(
for (BasicBlock* block : *graph) {
if (block->control_node()->Is<JumpLoop>()) {
- loop_headers.insert(block->control_node()->Cast<JumpLoop>()->target());
+ loop_headers_.insert(block->control_node()->Cast<JumpLoop>()->target());
}
}
@@ -229,31 +229,31 @@ void MaglevPrintingVisitor::PreProcessGraph(
for (BlockConstIterator block_it = graph->begin(); block_it != graph->end();
++block_it) {
BasicBlock* block = *block_it;
- std::replace(targets.begin(), targets.end(), block,
+ std::replace(targets_.begin(), targets_.end(), block,
static_cast<BasicBlock*>(nullptr));
- if (loop_headers.find(block) != loop_headers.end()) {
- AddTarget(targets, block);
+ if (loop_headers_.find(block) != loop_headers_.end()) {
+ AddTarget(targets_, block);
}
ControlNode* node = block->control_node();
if (node->Is<JumpLoop>()) {
BasicBlock* target = node->Cast<JumpLoop>()->target();
- std::replace(targets.begin(), targets.end(), target,
+ std::replace(targets_.begin(), targets_.end(), target,
static_cast<BasicBlock*>(nullptr));
} else if (node->Is<UnconditionalControlNode>()) {
- AddTargetIfNotNext(targets,
+ AddTargetIfNotNext(targets_,
node->Cast<UnconditionalControlNode>()->target(),
*(block_it + 1));
} else if (node->Is<ConditionalControlNode>()) {
- AddTargetIfNotNext(targets,
+ AddTargetIfNotNext(targets_,
node->Cast<ConditionalControlNode>()->if_true(),
*(block_it + 1));
- AddTargetIfNotNext(targets,
+ AddTargetIfNotNext(targets_,
node->Cast<ConditionalControlNode>()->if_false(),
*(block_it + 1));
}
}
- DCHECK(std::all_of(targets.begin(), targets.end(),
+ DCHECK(std::all_of(targets_.begin(), targets_.end(),
[](BasicBlock* block) { return block == nullptr; }));
}
@@ -262,19 +262,19 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(
MaglevGraphLabeller* graph_labeller = compilation_unit->graph_labeller();
size_t loop_position = static_cast<size_t>(-1);
- if (loop_headers.erase(block) > 0) {
- loop_position = AddTarget(targets, block);
+ if (loop_headers_.erase(block) > 0) {
+ loop_position = AddTarget(targets_, block);
}
{
bool saw_start = false;
- for (size_t i = 0; i < targets.size(); ++i) {
+ for (size_t i = 0; i < targets_.size(); ++i) {
Connection c;
if (saw_start) {
c.AddHorizontal();
}
// If this is one of the arrows pointing to this block, terminate the
// line by connecting it rightwards.
- if (targets[i] == block) {
+ if (targets_[i] == block) {
c.Connect(kRight);
// If this is the loop header, go down instead of up and don't clear
// the target.
@@ -282,10 +282,10 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(
c.Connect(kBottom);
} else {
c.Connect(kTop);
- targets[i] = nullptr;
+ targets_[i] = nullptr;
}
saw_start = true;
- } else if (c.connected == 0 && targets[i] != nullptr) {
+ } else if (c.connected == 0 && targets_[i] != nullptr) {
// If this is another arrow, connect it, but only if that doesn't
// clobber any existing drawing.
c.AddVertical();
@@ -301,10 +301,99 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)->set_padding(1);
}
+namespace {
+
+template <typename NodeT>
+void PrintEagerDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeT* node, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ PrintVerticalArrows(os, targets);
+ PrintPadding(os, graph_labeller, 0);
+
+ EagerDeoptInfo* deopt_info = node->eager_deopt_info();
+ os << " ↱ eager @" << deopt_info->state.bytecode_position << " : {";
+ bool first = true;
+ int index = 0;
+ deopt_info->state.register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ if (first) {
+ first = false;
+ } else {
+ os << ", ";
+ }
+ os << reg.ToString() << ":" << PrintNodeLabel(graph_labeller, node)
+ << ":" << deopt_info->input_locations[index].operand();
+ index++;
+ });
+ os << "}\n";
+}
+void MaybePrintEagerDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+#define CASE(Name) \
+ case Opcode::k##Name: \
+ if constexpr (Name::kProperties.can_eager_deopt()) { \
+ PrintEagerDeopt<Name>(os, targets, node->Cast<Name>(), state); \
+ } \
+ break;
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
+}
+
+template <typename NodeT>
+void PrintLazyDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeT* node, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ PrintVerticalArrows(os, targets);
+ PrintPadding(os, graph_labeller, 0);
+
+ LazyDeoptInfo* deopt_info = node->lazy_deopt_info();
+ os << " ↳ lazy @" << deopt_info->state.bytecode_position << " : {";
+ bool first = true;
+ int index = 0;
+ deopt_info->state.register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ if (first) {
+ first = false;
+ } else {
+ os << ", ";
+ }
+ os << reg.ToString() << ":";
+ if (reg == deopt_info->result_location) {
+ os << "<result>";
+ } else {
+ os << PrintNodeLabel(graph_labeller, node) << ":"
+ << deopt_info->input_locations[index].operand();
+ }
+ index++;
+ });
+ os << "}\n";
+}
+void MaybePrintLazyDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+#define CASE(Name) \
+ case Opcode::k##Name: \
+ if constexpr (Name::kProperties.can_lazy_deopt()) { \
+ PrintLazyDeopt<Name>(os, targets, node->Cast<Name>(), state); \
+ } \
+ break;
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
+}
+
+} // namespace
+
void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, phi);
os_ << "Phi (";
// Manually walk Phi inputs to print just the node labels, without
@@ -312,7 +401,11 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
// moves).
for (int i = 0; i < phi->input_count(); ++i) {
if (i > 0) os_ << ", ";
- os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
+ if (state.block()->predecessor_at(i) == nullptr) {
+ os_ << "<dead>";
+ } else {
+ os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
+ }
}
os_ << ") → " << phi->result().operand() << "\n";
@@ -322,27 +415,34 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
- PrintVerticalArrows(os_, targets);
+
+ MaybePrintEagerDeopt(os_, targets_, node, state);
+
+ PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, node);
os_ << PrintNode(graph_labeller, node) << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
->set_padding(graph_labeller->max_node_id_width() + 4);
+
+ MaybePrintLazyDeopt(os_, targets_, node, state);
}
void MaglevPrintingVisitor::Process(ControlNode* control_node,
const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+ MaybePrintEagerDeopt(os_, targets_, control_node, state);
+
bool has_fallthrough = false;
if (control_node->Is<JumpLoop>()) {
BasicBlock* target = control_node->Cast<JumpLoop>()->target();
- PrintVerticalArrows(os_, targets, {}, {target}, true);
+ PrintVerticalArrows(os_, targets_, {}, {target}, true);
os_ << "◄─";
PrintPaddedId(os_, graph_labeller, control_node, "─", -2);
- std::replace(targets.begin(), targets.end(), target,
+ std::replace(targets_.begin(), targets_.end(), target,
static_cast<BasicBlock*>(nullptr));
} else if (control_node->Is<UnconditionalControlNode>()) {
@@ -350,9 +450,9 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
control_node->Cast<UnconditionalControlNode>()->target();
std::set<size_t> arrows_starting_here;
- has_fallthrough |= !AddTargetIfNotNext(targets, target, state.next_block(),
+ has_fallthrough |= !AddTargetIfNotNext(targets_, target, state.next_block(),
&arrows_starting_here);
- PrintVerticalArrows(os_, targets, arrows_starting_here);
+ PrintVerticalArrows(os_, targets_, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, control_node,
has_fallthrough ? " " : "─");
@@ -364,14 +464,14 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
std::set<size_t> arrows_starting_here;
has_fallthrough |= !AddTargetIfNotNext(
- targets, false_target, state.next_block(), &arrows_starting_here);
+ targets_, false_target, state.next_block(), &arrows_starting_here);
has_fallthrough |= !AddTargetIfNotNext(
- targets, true_target, state.next_block(), &arrows_starting_here);
- PrintVerticalArrows(os_, targets, arrows_starting_here);
+ targets_, true_target, state.next_block(), &arrows_starting_here);
+ PrintVerticalArrows(os_, targets_, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, control_node, "─");
} else {
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, control_node);
}
@@ -383,13 +483,13 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
control_node->Cast<UnconditionalControlNode>()->target();
if (target->has_phi()) {
printed_phis = true;
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPadding(os_, graph_labeller, -1);
os_ << (has_fallthrough ? "│" : " ");
os_ << " with gap moves:\n";
int pid = state.block()->predecessor_id();
for (Phi* phi : *target->phis()) {
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPadding(os_, graph_labeller, -1);
os_ << (has_fallthrough ? "│" : " ");
os_ << " - ";
@@ -400,7 +500,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
}
}
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
if (has_fallthrough) {
PrintPadding(os_, graph_labeller, -1);
if (printed_phis) {
diff --git a/deps/v8/src/maglev/maglev-graph-printer.h b/deps/v8/src/maglev/maglev-graph-printer.h
index d416293d08..6250727460 100644
--- a/deps/v8/src/maglev/maglev-graph-printer.h
+++ b/deps/v8/src/maglev/maglev-graph-printer.h
@@ -26,9 +26,6 @@ class ProcessingState;
class MaglevPrintingVisitor {
public:
- // Could be interesting to print checkpoints too.
- static constexpr bool kNeedsCheckpointStates = false;
-
explicit MaglevPrintingVisitor(std::ostream& os);
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
@@ -43,8 +40,8 @@ class MaglevPrintingVisitor {
private:
std::ostream& os_;
std::unique_ptr<std::ostream> os_for_additional_info_;
- std::set<BasicBlock*> loop_headers;
- std::vector<BasicBlock*> targets;
+ std::set<BasicBlock*> loop_headers_;
+ std::vector<BasicBlock*> targets_;
};
void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
diff --git a/deps/v8/src/maglev/maglev-graph-processor.h b/deps/v8/src/maglev/maglev-graph-processor.h
index 892fe6071b..557d969b47 100644
--- a/deps/v8/src/maglev/maglev-graph-processor.h
+++ b/deps/v8/src/maglev/maglev-graph-processor.h
@@ -24,10 +24,6 @@ namespace maglev {
//
// It expects a NodeProcessor class with:
//
-// // True if the GraphProcessor should snapshot Checkpoint states for
-// // deopting nodes.
-// static constexpr bool kNeedsCheckpointStates;
-//
// // A function that processes the graph before the nodes are walked.
// void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
//
@@ -50,15 +46,8 @@ class GraphProcessor;
class ProcessingState {
public:
explicit ProcessingState(MaglevCompilationUnit* compilation_unit,
- BlockConstIterator block_it,
- const InterpreterFrameState* interpreter_frame_state,
- const Checkpoint* checkpoint,
- const InterpreterFrameState* checkpoint_frame_state)
- : compilation_unit_(compilation_unit),
- block_it_(block_it),
- interpreter_frame_state_(interpreter_frame_state),
- checkpoint_(checkpoint),
- checkpoint_frame_state_(checkpoint_frame_state) {}
+ BlockConstIterator block_it)
+ : compilation_unit_(compilation_unit), block_it_(block_it) {}
// Disallow copies, since the underlying frame states stay mutable.
ProcessingState(const ProcessingState&) = delete;
@@ -67,20 +56,7 @@ class ProcessingState {
BasicBlock* block() const { return *block_it_; }
BasicBlock* next_block() const { return *(block_it_ + 1); }
- const InterpreterFrameState* interpreter_frame_state() const {
- DCHECK_NOT_NULL(interpreter_frame_state_);
- return interpreter_frame_state_;
- }
-
- const Checkpoint* checkpoint() const {
- DCHECK_NOT_NULL(checkpoint_);
- return checkpoint_;
- }
-
- const InterpreterFrameState* checkpoint_frame_state() const {
- DCHECK_NOT_NULL(checkpoint_frame_state_);
- return checkpoint_frame_state_;
- }
+ MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
int register_count() const { return compilation_unit_->register_count(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
@@ -92,27 +68,16 @@ class ProcessingState {
private:
MaglevCompilationUnit* compilation_unit_;
BlockConstIterator block_it_;
- const InterpreterFrameState* interpreter_frame_state_;
- const Checkpoint* checkpoint_;
- const InterpreterFrameState* checkpoint_frame_state_;
};
template <typename NodeProcessor>
class GraphProcessor {
public:
- static constexpr bool kNeedsCheckpointStates =
- NodeProcessor::kNeedsCheckpointStates;
-
template <typename... Args>
explicit GraphProcessor(MaglevCompilationUnit* compilation_unit,
Args&&... args)
: compilation_unit_(compilation_unit),
- node_processor_(std::forward<Args>(args)...),
- current_frame_state_(*compilation_unit_) {
- if (kNeedsCheckpointStates) {
- checkpoint_state_.emplace(*compilation_unit_);
- }
- }
+ node_processor_(std::forward<Args>(args)...) {}
void ProcessGraph(Graph* graph) {
graph_ = graph;
@@ -124,14 +89,6 @@ class GraphProcessor {
node_processor_.PreProcessBasicBlock(compilation_unit_, block);
- if (block->has_state()) {
- current_frame_state_.CopyFrom(*compilation_unit_, *block->state());
- if (kNeedsCheckpointStates) {
- checkpoint_state_->last_checkpoint_block_it = block_it_;
- checkpoint_state_->last_checkpoint_node_it = NodeConstIterator();
- }
- }
-
if (block->has_phi()) {
for (Phi* phi : *block->phis()) {
node_processor_.Process(phi, GetCurrentState());
@@ -155,11 +112,7 @@ class GraphProcessor {
private:
ProcessingState GetCurrentState() {
- return ProcessingState(
- compilation_unit_, block_it_, &current_frame_state_,
- kNeedsCheckpointStates ? checkpoint_state_->latest_checkpoint : nullptr,
- kNeedsCheckpointStates ? &checkpoint_state_->checkpoint_frame_state
- : nullptr);
+ return ProcessingState(compilation_unit_, block_it_);
}
void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
@@ -176,170 +129,6 @@ class GraphProcessor {
void PreProcess(NodeBase* node, const ProcessingState& state) {}
- void PreProcess(Checkpoint* checkpoint, const ProcessingState& state) {
- current_frame_state_.set_accumulator(checkpoint->accumulator());
- if (kNeedsCheckpointStates) {
- checkpoint_state_->latest_checkpoint = checkpoint;
- if (checkpoint->is_used()) {
- checkpoint_state_->checkpoint_frame_state.CopyFrom(
- *compilation_unit_, current_frame_state_);
- checkpoint_state_->last_checkpoint_block_it = block_it_;
- checkpoint_state_->last_checkpoint_node_it = node_it_;
- ClearDeadCheckpointNodes();
- }
- }
- }
-
- void PreProcess(StoreToFrame* store_to_frame, const ProcessingState& state) {
- current_frame_state_.set(store_to_frame->target(), store_to_frame->value());
- }
-
- void PreProcess(SoftDeopt* node, const ProcessingState& state) {
- PreProcessDeoptingNode();
- }
-
- void PreProcess(CheckMaps* node, const ProcessingState& state) {
- PreProcessDeoptingNode();
- }
-
- void PreProcessDeoptingNode() {
- if (!kNeedsCheckpointStates) return;
-
- Checkpoint* checkpoint = checkpoint_state_->latest_checkpoint;
- if (checkpoint->is_used()) {
- DCHECK(!checkpoint_state_->last_checkpoint_node_it.is_null());
- DCHECK_EQ(checkpoint, *checkpoint_state_->last_checkpoint_node_it);
- return;
- }
- DCHECK_IMPLIES(!checkpoint_state_->last_checkpoint_node_it.is_null(),
- checkpoint != *checkpoint_state_->last_checkpoint_node_it);
-
- // TODO(leszeks): The following code is _ugly_, should figure out how to
- // clean it up.
-
- // Go to the previous state checkpoint (either on the Checkpoint that
- // provided the current checkpoint snapshot, or on a BasicBlock).
- BlockConstIterator block_it = checkpoint_state_->last_checkpoint_block_it;
- NodeConstIterator node_it = checkpoint_state_->last_checkpoint_node_it;
- if (node_it.is_null()) {
- // There was no recent enough Checkpoint node, and the block iterator
- // points at a basic block with a state snapshot. Copy that snapshot and
- // start iterating from there.
- BasicBlock* block = *block_it;
- DCHECK(block->has_state());
- checkpoint_state_->checkpoint_frame_state.CopyFrom(*compilation_unit_,
- *block->state());
-
- // Start iterating from the first node in the block.
- node_it = block->nodes().begin();
- } else {
- // The node iterator should point at the previous Checkpoint node. We
- // don't need that Checkpoint state snapshot anymore, we're making a new
- // one, so we can just reuse the snapshot as-is without copying it.
- DCHECK_NE(*node_it, checkpoint);
- DCHECK((*node_it)->Is<Checkpoint>());
- DCHECK((*node_it)->Cast<Checkpoint>()->is_used());
-
- // Advance it by one since we don't need to check this node anymore.
- ++node_it;
- }
-
- // Now walk forward to the checkpoint, and apply any StoreToFrame operations
- // along the way into the snapshotted checkpoint state.
- BasicBlock* block = *block_it;
- while (true) {
- // Check if we've run out of nodes in this block, and advance to the
- // next block if so.
- while (node_it == block->nodes().end()) {
- DCHECK_NE(block_it, graph_->end());
-
- // We should only end up visiting blocks with fallthrough to the next
- // block -- otherwise, the block should have had a frame state snapshot,
- // as either a merge block or a non-fallthrough jump target.
- if ((*block_it)->control_node()->Is<Jump>()) {
- DCHECK_EQ((*block_it)->control_node()->Cast<Jump>()->target(),
- *(block_it + 1));
- } else {
- DCHECK_IMPLIES((*block_it)
- ->control_node()
- ->Cast<ConditionalControlNode>()
- ->if_true() != *(block_it + 1),
- (*block_it)
- ->control_node()
- ->Cast<ConditionalControlNode>()
- ->if_false() != *(block_it + 1));
- }
-
- // Advance to the next block (which the above DCHECKs confirm is the
- // unconditional fallthrough from the previous block), and update the
- // cached block pointer.
- block_it++;
- block = *block_it;
-
- // We should never visit a block with state (aside from the very first
- // block we visit), since then that should have been our start point
- // to start with.
- DCHECK(!(*block_it)->has_state());
- node_it = (*block_it)->nodes().begin();
- }
-
- // We should never reach the current node, the "until" checkpoint node
- // should be before it.
- DCHECK_NE(node_it, node_it_);
-
- Node* node = *node_it;
-
- // Break once we hit the given Checkpoint node. This could be right at
- // the start of the iteration, if the BasicBlock held the snapshot and the
- // Checkpoint was the first node in it.
- if (node == checkpoint) break;
-
- // Update the state from the current node, if it's a state update.
- if (node->Is<StoreToFrame>()) {
- StoreToFrame* store_to_frame = node->Cast<StoreToFrame>();
- checkpoint_state_->checkpoint_frame_state.set(store_to_frame->target(),
- store_to_frame->value());
- } else {
- // Any checkpoints we meet along the way should be unused, otherwise
- // they should have provided the most recent state snapshot.
- DCHECK_IMPLIES(node->Is<Checkpoint>(),
- !node->Cast<Checkpoint>()->is_used());
- }
-
- // Continue to the next node.
- ++node_it;
- }
-
- checkpoint_state_->last_checkpoint_block_it = block_it;
- checkpoint_state_->last_checkpoint_node_it = node_it;
- checkpoint_state_->checkpoint_frame_state.set_accumulator(
- checkpoint->accumulator());
- ClearDeadCheckpointNodes();
- checkpoint->SetUsed();
- }
-
- // Walk the checkpointed state, and null out any values that are dead at this
- // checkpoint.
- // TODO(leszeks): Consider doing this on checkpoint copy, not as a
- // post-process step.
- void ClearDeadCheckpointNodes() {
- const compiler::BytecodeLivenessState* liveness =
- bytecode_analysis().GetInLivenessFor(
- checkpoint_state_->latest_checkpoint->bytecode_position());
- for (int i = 0; i < register_count(); ++i) {
- if (!liveness->RegisterIsLive(i)) {
- checkpoint_state_->checkpoint_frame_state.set(interpreter::Register(i),
- nullptr);
- }
- }
-
- // The accumulator is on the checkpoint node itself, and should have already
- // been nulled out during graph building if it's dead.
- DCHECK_EQ(
- !liveness->AccumulatorIsLive(),
- checkpoint_state_->checkpoint_frame_state.accumulator() == nullptr);
- }
-
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis();
@@ -350,19 +139,6 @@ class GraphProcessor {
Graph* graph_;
BlockConstIterator block_it_;
NodeConstIterator node_it_;
- InterpreterFrameState current_frame_state_;
-
- // The CheckpointState field only exists if the node processor needs
- // checkpoint states.
- struct CheckpointState {
- explicit CheckpointState(const MaglevCompilationUnit& compilation_unit)
- : checkpoint_frame_state(compilation_unit) {}
- Checkpoint* latest_checkpoint = nullptr;
- BlockConstIterator last_checkpoint_block_it;
- NodeConstIterator last_checkpoint_node_it;
- InterpreterFrameState checkpoint_frame_state;
- };
- base::Optional<CheckpointState> checkpoint_state_;
};
// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
@@ -373,8 +149,6 @@ class NodeMultiProcessor;
template <>
class NodeMultiProcessor<> {
public:
- static constexpr bool kNeedsCheckpointStates = false;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
@@ -387,9 +161,6 @@ class NodeMultiProcessor<Processor, Processors...>
using Base = NodeMultiProcessor<Processors...>;
public:
- static constexpr bool kNeedsCheckpointStates =
- Processor::kNeedsCheckpointStates || Base::kNeedsCheckpointStates;
-
template <typename Node>
void Process(Node* node, const ProcessingState& state) {
processor_.Process(node, state);
diff --git a/deps/v8/src/maglev/maglev-graph-verifier.h b/deps/v8/src/maglev/maglev-graph-verifier.h
new file mode 100644
index 0000000000..55bd4e89a5
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-verifier.h
@@ -0,0 +1,143 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_VERIFIER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_VERIFIER_H_
+
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+std::ostream& operator<<(std::ostream& os, const ValueRepresentation& repr) {
+ switch (repr) {
+ case ValueRepresentation::kTagged:
+ os << "TaggedValue";
+ break;
+ case ValueRepresentation::kUntagged:
+ os << "UntaggedValue";
+ break;
+ }
+ return os;
+}
+
+class Graph;
+
+// TODO(victorgomes): Currently it only verifies the inputs for all ValueNodes
+// are expected to be tagged/untagged. Add more verification later.
+class MaglevGraphVerifier {
+ public:
+ void PreProcessGraph(MaglevCompilationUnit* compilation_unit, Graph* graph) {
+ if (compilation_unit->has_graph_labeller()) {
+ graph_labeller_ = compilation_unit->graph_labeller();
+ }
+ }
+
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+ void CheckValueInputIs(NodeBase* node, int i, ValueRepresentation repr) {
+ ValueNode* input = node->input(i).node();
+ if (input->value_representation() != repr) {
+ std::ostringstream str;
+ str << "Type representation error: node ";
+ if (graph_labeller_) {
+ str << "#" << graph_labeller_->NodeId(node) << " : ";
+ }
+ str << node->opcode() << " (input @" << i << " = " << input->opcode()
+ << ") type " << input->value_representation() << " is not " << repr;
+ FATAL("%s", str.str().c_str());
+ }
+ }
+
+ void Process(NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+ case Opcode::kConstant:
+ case Opcode::kSmiConstant:
+ case Opcode::kInt32Constant:
+ case Opcode::kRootConstant:
+ case Opcode::kInitialValue:
+ case Opcode::kRegisterInput:
+ case Opcode::kGapMove:
+ case Opcode::kDeopt:
+ case Opcode::kJump:
+ case Opcode::kJumpLoop:
+ // No input.
+ DCHECK_EQ(node->input_count(), 0);
+ break;
+ case Opcode::kGenericNegate:
+ case Opcode::kGenericIncrement:
+ case Opcode::kGenericDecrement:
+ case Opcode::kCheckedSmiUntag:
+ case Opcode::kLoadField:
+ case Opcode::kLoadGlobal:
+ // TODO(victorgomes): Can we check that the input is actually a map?
+ case Opcode::kCheckMaps:
+ // TODO(victorgomes): Can we check that the input is Boolean?
+ case Opcode::kBranchIfTrue:
+ case Opcode::kBranchIfToBooleanTrue:
+ case Opcode::kReturn:
+ // Generic tagged unary operations.
+ DCHECK_EQ(node->input_count(), 1);
+ CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
+ break;
+ case Opcode::kCheckedSmiTag:
+ // Untagged unary operations.
+ CheckValueInputIs(node, 0, ValueRepresentation::kUntagged);
+ break;
+ case Opcode::kGenericAdd:
+ case Opcode::kGenericSubtract:
+ case Opcode::kGenericMultiply:
+ case Opcode::kGenericDivide:
+ case Opcode::kGenericModulus:
+ case Opcode::kGenericExponentiate:
+ case Opcode::kGenericBitwiseAnd:
+ case Opcode::kGenericBitwiseOr:
+ case Opcode::kGenericBitwiseXor:
+ case Opcode::kGenericShiftLeft:
+ case Opcode::kGenericShiftRight:
+ case Opcode::kGenericShiftRightLogical:
+ case Opcode::kGenericBitwiseNot:
+ // TODO(victorgomes): Can we use the fact that these nodes return a
+ // Boolean?
+ case Opcode::kGenericEqual:
+ case Opcode::kGenericStrictEqual:
+ case Opcode::kGenericLessThan:
+ case Opcode::kGenericLessThanOrEqual:
+ case Opcode::kGenericGreaterThan:
+ case Opcode::kGenericGreaterThanOrEqual:
+ // TODO(victorgomes): Can we check that first input is an Object?
+ case Opcode::kStoreField:
+ case Opcode::kLoadNamedGeneric:
+ // Generic tagged binary operations.
+ DCHECK_EQ(node->input_count(), 2);
+ CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
+ CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
+ break;
+ case Opcode::kInt32AddWithOverflow:
+ // Untagged binary operations.
+ CheckValueInputIs(node, 0, ValueRepresentation::kUntagged);
+ CheckValueInputIs(node, 1, ValueRepresentation::kUntagged);
+ break;
+ case Opcode::kCall:
+ case Opcode::kPhi:
+ // All inputs should be tagged.
+ for (int i = 0; i < node->input_count(); i++) {
+ CheckValueInputIs(node, i, ValueRepresentation::kTagged);
+ }
+ break;
+ }
+ }
+
+ private:
+ MaglevGraphLabeller* graph_labeller_ = nullptr;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_VERIFIER_H_
diff --git a/deps/v8/src/maglev/maglev-interpreter-frame-state.h b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
index 5a907607f9..a64b1b88c5 100644
--- a/deps/v8/src/maglev/maglev-interpreter-frame-state.h
+++ b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
@@ -10,6 +10,7 @@
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
#include "src/maglev/maglev-register-frame-array.h"
@@ -29,26 +30,30 @@ class InterpreterFrameState {
InterpreterFrameState(const MaglevCompilationUnit& info,
const InterpreterFrameState& state)
- : accumulator_(state.accumulator_), frame_(info) {
+ : frame_(info) {
frame_.CopyFrom(info, state.frame_, nullptr);
}
void CopyFrom(const MaglevCompilationUnit& info,
const InterpreterFrameState& state) {
- accumulator_ = state.accumulator_;
frame_.CopyFrom(info, state.frame_, nullptr);
}
inline void CopyFrom(const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
- void set_accumulator(ValueNode* value) { accumulator_ = value; }
- ValueNode* accumulator() const { return accumulator_; }
+ void set_accumulator(ValueNode* value) {
+ frame_[interpreter::Register::virtual_accumulator()] = value;
+ }
+ ValueNode* accumulator() const {
+ return frame_[interpreter::Register::virtual_accumulator()];
+ }
void set(interpreter::Register reg, ValueNode* value) {
DCHECK_IMPLIES(reg.is_parameter(),
reg == interpreter::Register::current_context() ||
reg == interpreter::Register::function_closure() ||
+ reg == interpreter::Register::virtual_accumulator() ||
reg.ToParameterIndex() >= 0);
frame_[reg] = value;
}
@@ -56,6 +61,7 @@ class InterpreterFrameState {
DCHECK_IMPLIES(reg.is_parameter(),
reg == interpreter::Register::current_context() ||
reg == interpreter::Register::function_closure() ||
+ reg == interpreter::Register::virtual_accumulator() ||
reg.ToParameterIndex() >= 0);
return frame_[reg];
}
@@ -63,10 +69,120 @@ class InterpreterFrameState {
const RegisterFrameArray<ValueNode*>& frame() const { return frame_; }
private:
- ValueNode* accumulator_ = nullptr;
RegisterFrameArray<ValueNode*> frame_;
};
+class CompactInterpreterFrameState {
+ public:
+ CompactInterpreterFrameState(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness)
+ : live_registers_and_accumulator_(
+ info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
+ liveness_(liveness) {}
+
+ CompactInterpreterFrameState(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness,
+ const InterpreterFrameState& state)
+ : CompactInterpreterFrameState(info, liveness) {
+ ForEachValue(info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = state.get(reg);
+ });
+ }
+
+ CompactInterpreterFrameState(const CompactInterpreterFrameState&) = delete;
+ CompactInterpreterFrameState(CompactInterpreterFrameState&&) = delete;
+ CompactInterpreterFrameState& operator=(const CompactInterpreterFrameState&) =
+ delete;
+ CompactInterpreterFrameState& operator=(CompactInterpreterFrameState&&) =
+ delete;
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(live_registers_and_accumulator_[i], reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(live_registers_and_accumulator_[i], reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(const MaglevCompilationUnit& info, Function&& f) const {
+ int live_reg = 0;
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(live_registers_and_accumulator_[info.parameter_count() + live_reg++],
+ reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(const MaglevCompilationUnit& info, Function&& f) {
+ int live_reg = 0;
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(live_registers_and_accumulator_[info.parameter_count() + live_reg++],
+ reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
+ ForEachParameter(info, f);
+ ForEachLocal(info, f);
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
+ ForEachParameter(info, f);
+ ForEachLocal(info, f);
+ }
+
+ template <typename Function>
+ void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
+ ForEachRegister(info, f);
+ if (liveness_->AccumulatorIsLive()) {
+ f(accumulator(info), interpreter::Register::virtual_accumulator());
+ }
+ }
+
+ template <typename Function>
+ void ForEachValue(const MaglevCompilationUnit& info, Function&& f) const {
+ ForEachRegister(info, f);
+ if (liveness_->AccumulatorIsLive()) {
+ f(accumulator(info), interpreter::Register::virtual_accumulator());
+ }
+ }
+
+ const compiler::BytecodeLivenessState* liveness() const { return liveness_; }
+
+ ValueNode*& accumulator(const MaglevCompilationUnit& info) {
+ return live_registers_and_accumulator_[size(info) - 1];
+ }
+ ValueNode* accumulator(const MaglevCompilationUnit& info) const {
+ return live_registers_and_accumulator_[size(info) - 1];
+ }
+
+ size_t size(const MaglevCompilationUnit& info) const {
+ return SizeFor(info, liveness_);
+ }
+
+ private:
+ static size_t SizeFor(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness) {
+ return info.parameter_count() + liveness->live_value_count();
+ }
+
+ ValueNode** const live_registers_and_accumulator_;
+ const compiler::BytecodeLivenessState* const liveness_;
+};
+
class MergePointRegisterState {
public:
class Iterator {
@@ -108,6 +224,8 @@ class MergePointRegisterState {
class MergePointInterpreterFrameState {
public:
+ static constexpr BasicBlock* kDeadPredecessor = nullptr;
+
void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit,
int merge_offset, interpreter::Register reg,
ValueNode* value) {
@@ -132,17 +250,8 @@ class MergePointInterpreterFrameState {
const compiler::BytecodeLivenessState* liveness)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
- live_registers_and_accumulator_(
- info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
- liveness_(liveness),
- predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
- int live_index = 0;
- ForEachRegister(info, [&](interpreter::Register reg) {
- live_registers_and_accumulator_[live_index++] = state.get(reg);
- });
- if (liveness_->AccumulatorIsLive()) {
- live_registers_and_accumulator_[live_index++] = state.accumulator();
- }
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
+ frame_state_(info, liveness, state) {
predecessors_[0] = predecessor;
}
@@ -152,27 +261,24 @@ class MergePointInterpreterFrameState {
const compiler::LoopInfo* loop_info)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
- live_registers_and_accumulator_(
- info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
- liveness_(liveness),
- predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
- int live_index = 0;
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
+ frame_state_(info, liveness) {
auto& assignments = loop_info->assignments();
- ForEachParameter(info, [&](interpreter::Register reg) {
- ValueNode* value = nullptr;
- if (assignments.ContainsParameter(reg.ToParameterIndex())) {
- value = NewLoopPhi(info.zone(), reg, merge_offset, value);
- }
- live_registers_and_accumulator_[live_index++] = value;
- });
- ForEachLocal([&](interpreter::Register reg) {
- ValueNode* value = nullptr;
- if (assignments.ContainsLocal(reg.index())) {
- value = NewLoopPhi(info.zone(), reg, merge_offset, value);
- }
- live_registers_and_accumulator_[live_index++] = value;
- });
- DCHECK(!liveness_->AccumulatorIsLive());
+ frame_state_.ForEachParameter(
+ info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = nullptr;
+ if (assignments.ContainsParameter(reg.ToParameterIndex())) {
+ entry = NewLoopPhi(info.zone(), reg, merge_offset);
+ }
+ });
+ frame_state_.ForEachLocal(
+ info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = nullptr;
+ if (assignments.ContainsLocal(reg.index())) {
+ entry = NewLoopPhi(info.zone(), reg, merge_offset);
+ }
+ });
+ DCHECK(!frame_state_.liveness()->AccumulatorIsLive());
#ifdef DEBUG
predecessors_[0] = nullptr;
@@ -181,26 +287,24 @@ class MergePointInterpreterFrameState {
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
- void Merge(const MaglevCompilationUnit& compilation_unit,
+ void Merge(MaglevCompilationUnit& compilation_unit,
const InterpreterFrameState& unmerged, BasicBlock* predecessor,
int merge_offset) {
DCHECK_GT(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = predecessor;
- ForEachValue(
- compilation_unit, [&](interpreter::Register reg, ValueNode*& value) {
+ frame_state_.ForEachValue(
+ compilation_unit, [&](ValueNode*& value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
- value = MergeValue(compilation_unit.zone(), reg, value,
- unmerged.get(reg), merge_offset);
+ value = MergeValue(compilation_unit, reg, value, unmerged.get(reg),
+ merge_offset);
});
predecessors_so_far_++;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
}
- MergePointRegisterState& register_state() { return register_state_; }
-
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void MergeLoop(const MaglevCompilationUnit& compilation_unit,
@@ -210,16 +314,38 @@ class MergePointInterpreterFrameState {
DCHECK_NULL(predecessors_[0]);
predecessors_[0] = loop_end_block;
- ForEachValue(
- compilation_unit, [&](interpreter::Register reg, ValueNode* value) {
+ frame_state_.ForEachValue(
+ compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
MergeLoopValue(compilation_unit.zone(), reg, value,
loop_end_state.get(reg), merge_offset);
});
- DCHECK(!liveness_->AccumulatorIsLive());
}
+ // Merges a dead framestate (e.g. one which has been early terminated with a
+ // deopt).
+ void MergeDead() {
+ DCHECK_GT(predecessor_count_, 1);
+ DCHECK_LT(predecessors_so_far_, predecessor_count_);
+ predecessors_[predecessors_so_far_] = kDeadPredecessor;
+ predecessors_so_far_++;
+ DCHECK_LE(predecessors_so_far_, predecessor_count_);
+ }
+
+ // Merges a dead loop framestate (e.g. one where the block containing the
+ // JumpLoop has been early terminated with a deopt).
+ void MergeDeadLoop() {
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_);
+ DCHECK_NULL(predecessors_[0]);
+ predecessors_[0] = kDeadPredecessor;
+ }
+
+ const CompactInterpreterFrameState& frame_state() const {
+ return frame_state_;
+ }
+ MergePointRegisterState& register_state() { return register_state_; }
+
bool has_phi() const { return !phis_.is_empty(); }
Phi::List* phis() { return &phis_; }
@@ -242,9 +368,40 @@ class MergePointInterpreterFrameState {
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
- ValueNode* MergeValue(Zone* zone, interpreter::Register owner,
- ValueNode* merged, ValueNode* unmerged,
- int merge_offset) {
+ ValueNode* TagValue(MaglevCompilationUnit& compilation_unit,
+ ValueNode* value) {
+ DCHECK(value->is_untagged_value());
+ if (value->Is<CheckedSmiUntag>()) {
+ return value->input(0).node();
+ }
+ DCHECK(value->Is<Int32AddWithOverflow>() || value->Is<Int32Constant>());
+ // Check if the next Node in the block after value is its CheckedSmiTag
+ // version and reuse it.
+ if (value->NextNode()) {
+ CheckedSmiTag* tagged = value->NextNode()->TryCast<CheckedSmiTag>();
+ if (tagged != nullptr && value == tagged->input().node()) {
+ return tagged;
+ }
+ }
+ // Otherwise create a tagged version.
+ ValueNode* tagged =
+ Node::New<CheckedSmiTag, std::initializer_list<ValueNode*>>(
+ compilation_unit.zone(), compilation_unit,
+ value->eager_deopt_info()->state, {value});
+ value->AddNodeAfter(tagged);
+ compilation_unit.RegisterNodeInGraphLabeller(tagged);
+ return tagged;
+ }
+
+ ValueNode* EnsureTagged(MaglevCompilationUnit& compilation_unit,
+ ValueNode* value) {
+ if (value->is_untagged_value()) return TagValue(compilation_unit, value);
+ return value;
+ }
+
+ ValueNode* MergeValue(MaglevCompilationUnit& compilation_unit,
+ interpreter::Register owner, ValueNode* merged,
+ ValueNode* unmerged, int merge_offset) {
// If the merged node is null, this is a pre-created loop header merge
// frame will null values for anything that isn't a loop Phi.
if (merged == nullptr) {
@@ -258,12 +415,22 @@ class MergePointInterpreterFrameState {
// It's possible that merged == unmerged at this point since loop-phis are
// not dropped if they are only assigned to themselves in the loop.
DCHECK_EQ(result->owner(), owner);
+ unmerged = EnsureTagged(compilation_unit, unmerged);
result->set_input(predecessors_so_far_, unmerged);
return result;
}
if (merged == unmerged) return merged;
+ // We guarantee that the values are tagged.
+ // TODO(victorgomes): Support Phi nodes of untagged values.
+ merged = EnsureTagged(compilation_unit, merged);
+ unmerged = EnsureTagged(compilation_unit, unmerged);
+
+ // Tagged versions could point to the same value, avoid Phi nodes in this
+ // case.
+ if (merged == unmerged) return merged;
+
// Up to this point all predecessors had the same value for this interpreter
// frame slot. Now that we find a distinct value, insert a copy of the first
// value for each predecessor seen so far, in addition to the new value.
@@ -272,7 +439,8 @@ class MergePointInterpreterFrameState {
// the frame slot. In that case we only need the inputs for representation
// selection, and hence could remove duplicate inputs. We'd likely need to
// attach the interpreter register to the phi in that case?
- result = Node::New<Phi>(zone, predecessor_count_, owner, merge_offset);
+ result = Node::New<Phi>(compilation_unit.zone(), predecessor_count_, owner,
+ merge_offset);
for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
result->set_input(predecessors_so_far_, unmerged);
@@ -297,8 +465,8 @@ class MergePointInterpreterFrameState {
result->set_input(0, unmerged);
}
- ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg, int merge_offset,
- ValueNode* initial_value) {
+ ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg,
+ int merge_offset) {
DCHECK_EQ(predecessors_so_far_, 1);
// Create a new loop phi, which for now is empty.
Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
@@ -308,89 +476,23 @@ class MergePointInterpreterFrameState {
phis_.Add(result);
return result;
}
- static int SizeFor(const MaglevCompilationUnit& info,
- const compiler::BytecodeLivenessState* liveness) {
- return info.parameter_count() + liveness->live_value_count();
- }
-
- template <typename Function>
- void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
- for (int i = 0; i < info.parameter_count(); i++) {
- interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
- for (int i = 0; i < info.parameter_count(); i++) {
- interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachLocal(Function&& f) const {
- for (int register_index : *liveness_) {
- interpreter::Register reg = interpreter::Register(register_index);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachLocal(Function&& f) {
- for (int register_index : *liveness_) {
- interpreter::Register reg = interpreter::Register(register_index);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
- ForEachParameter(info, f);
- ForEachLocal(f);
- }
-
- template <typename Function>
- void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
- ForEachParameter(info, f);
- ForEachLocal(f);
- }
-
- template <typename Function>
- void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
- int live_index = 0;
- ForEachRegister(info, [&](interpreter::Register reg) {
- f(reg, live_registers_and_accumulator_[live_index++]);
- });
- if (liveness_->AccumulatorIsLive()) {
- f(interpreter::Register::virtual_accumulator(),
- live_registers_and_accumulator_[live_index++]);
- live_index++;
- }
- DCHECK_EQ(live_index, SizeFor(info, liveness_));
- }
int predecessor_count_;
int predecessors_so_far_;
Phi::List phis_;
- ValueNode** live_registers_and_accumulator_;
- const compiler::BytecodeLivenessState* liveness_ = nullptr;
BasicBlock** predecessors_;
+ CompactInterpreterFrameState frame_state_;
MergePointRegisterState register_state_;
};
void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state) {
- int live_index = 0;
- state.ForEachRegister(info, [&](interpreter::Register reg) {
- frame_[reg] = state.live_registers_and_accumulator_[live_index++];
- });
- if (state.liveness_->AccumulatorIsLive()) {
- accumulator_ = state.live_registers_and_accumulator_[live_index++];
- }
+ state.frame_state().ForEachValue(
+ info, [&](ValueNode* value, interpreter::Register reg) {
+ frame_[reg] = value;
+ });
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-ir.cc b/deps/v8/src/maglev/maglev-ir.cc
index 929a748330..c648ee581c 100644
--- a/deps/v8/src/maglev/maglev-ir.cc
+++ b/deps/v8/src/maglev/maglev-ir.cc
@@ -12,9 +12,11 @@
#include "src/compiler/backend/instruction.h"
#include "src/ic/handler-configuration.h"
#include "src/maglev/maglev-code-gen-state.h"
+#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-vreg-allocator.h"
namespace v8 {
@@ -32,11 +34,12 @@ const char* ToString(Opcode opcode) {
// TODO(v8:7700): Clean up after all code paths are supported.
static bool g_this_field_will_be_unused_once_all_code_paths_are_supported;
-#define UNSUPPORTED() \
- do { \
- std::cerr << "Maglev: Can't compile, unsuppored codegen path.\n"; \
- code_gen_state->set_found_unsupported_code_paths(true); \
- g_this_field_will_be_unused_once_all_code_paths_are_supported = true; \
+#define UNSUPPORTED(REASON) \
+ do { \
+ std::cerr << "Maglev: Can't compile, unsuppored codegen path (" REASON \
+ ")\n"; \
+ code_gen_state->set_found_unsupported_code_paths(true); \
+ g_this_field_will_be_unused_once_all_code_paths_are_supported = true; \
} while (false)
namespace {
@@ -63,10 +66,7 @@ void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
vreg_state->AllocateVirtualRegister());
}
-// TODO(victorgomes): Use this for smi binary operation and remove attribute
-// [[maybe_unused]].
-[[maybe_unused]] void DefineSameAsFirst(MaglevVregAllocationState* vreg_state,
- Node* node) {
+void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) {
node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
}
@@ -147,6 +147,10 @@ struct CopyForDeferredHelper<MaglevCompilationUnit*>
template <>
struct CopyForDeferredHelper<Register>
: public CopyForDeferredByValue<Register> {};
+// Bytecode offsets are copied by value.
+template <>
+struct CopyForDeferredHelper<BytecodeOffset>
+ : public CopyForDeferredByValue<BytecodeOffset> {};
// InterpreterFrameState is cloned.
template <>
@@ -158,6 +162,10 @@ struct CopyForDeferredHelper<const InterpreterFrameState*> {
*compilation_unit, *frame_state);
}
};
+// EagerDeoptInfo pointers are copied by value.
+template <>
+struct CopyForDeferredHelper<EagerDeoptInfo*>
+ : public CopyForDeferredByValue<EagerDeoptInfo*> {};
template <typename T>
T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
@@ -196,7 +204,7 @@ struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
};
template <typename Function>
-class DeferredCodeInfoImpl final : public MaglevCodeGenState::DeferredCodeInfo {
+class DeferredCodeInfoImpl final : public DeferredCodeInfo {
public:
using FunctionPointer =
typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
@@ -252,64 +260,25 @@ void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
// Deopt
// ---
-void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
- int deopt_bytecode_position,
- const InterpreterFrameState* checkpoint_state) {
- DCHECK(node->properties().can_deopt());
- // TODO(leszeks): Extract to separate call, or at the very least defer.
-
- // TODO(leszeks): Stack check.
- MaglevCompilationUnit* compilation_unit = code_gen_state->compilation_unit();
- int maglev_frame_size = code_gen_state->vreg_slots();
-
- ASM_CODE_COMMENT_STRING(code_gen_state->masm(), "Deoptimize");
- __ RecordComment("Push registers and load accumulator");
- int num_saved_slots = 0;
- // TODO(verwaest): We probably shouldn't be spilling all values that go
- // through deopt :)
- for (int i = 0; i < compilation_unit->register_count(); ++i) {
- ValueNode* node = checkpoint_state->get(interpreter::Register(i));
- if (node == nullptr) continue;
- __ Push(ToMemOperand(node->spill_slot()));
- num_saved_slots++;
- }
- ValueNode* accumulator = checkpoint_state->accumulator();
- if (accumulator) {
- __ movq(kInterpreterAccumulatorRegister,
- ToMemOperand(accumulator->spill_slot()));
- }
-
- __ RecordComment("Load registers from extra pushed slots");
- int slot = 0;
- for (int i = 0; i < compilation_unit->register_count(); ++i) {
- ValueNode* node = checkpoint_state->get(interpreter::Register(i));
- if (node == nullptr) continue;
- __ movq(kScratchRegister, MemOperand(rsp, (num_saved_slots - slot++ - 1) *
- kSystemPointerSize));
- __ movq(MemOperand(rbp, InterpreterFrameConstants::kRegisterFileFromFp -
- i * kSystemPointerSize),
- kScratchRegister);
+void RegisterEagerDeopt(MaglevCodeGenState* code_gen_state,
+ EagerDeoptInfo* deopt_info) {
+ if (deopt_info->deopt_entry_label.is_unused()) {
+ code_gen_state->PushEagerDeopt(deopt_info);
}
- DCHECK_EQ(slot, num_saved_slots);
-
- __ RecordComment("Materialize bytecode array and offset");
- __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp),
- compilation_unit->bytecode().object());
- __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
- Smi::FromInt(deopt_bytecode_position +
- (BytecodeArray::kHeaderSize - kHeapObjectTag)));
+}
- // Reset rsp to bytecode sized frame.
- __ addq(rsp, Immediate((maglev_frame_size + num_saved_slots -
- (2 + compilation_unit->register_count())) *
- kSystemPointerSize));
- __ TailCallBuiltin(Builtin::kBaselineOrInterpreterEnterAtBytecode);
+void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
+ EagerDeoptInfo* deopt_info) {
+ RegisterEagerDeopt(code_gen_state, deopt_info);
+ __ RecordComment("-- Jump to eager deopt");
+ __ j(cond, &deopt_info->deopt_entry_label);
}
-void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
- const ProcessingState& state) {
- EmitDeopt(code_gen_state, node, state.checkpoint()->bytecode_position(),
- state.checkpoint_frame_state());
+template <typename NodeT>
+void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
+ NodeT* node) {
+ STATIC_ASSERT(NodeT::kProperties.can_eager_deopt());
+ EmitEagerDeoptIf(cond, code_gen_state, node->eager_deopt_info());
}
// ---
@@ -378,6 +347,20 @@ void NodeBase::Print(std::ostream& os,
UNREACHABLE();
}
+DeoptInfo::DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState state)
+ : state(state),
+ input_locations(zone->NewArray<InputLocation>(
+ state.register_frame->size(compilation_unit))) {
+ // Default initialise if we're printing the graph, to avoid printing junk
+ // values.
+ if (FLAG_print_maglev_graph) {
+ for (size_t i = 0; i < state.register_frame->size(compilation_unit); ++i) {
+ new (&input_locations[i]) InputLocation();
+ }
+ }
+}
+
// ---
// Nodes
// ---
@@ -394,29 +377,13 @@ void SmiConstant::PrintParams(std::ostream& os,
os << "(" << value() << ")";
}
-void Checkpoint::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {}
-void Checkpoint::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {}
-void Checkpoint::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << PrintNodeLabel(graph_labeller, accumulator()) << ")";
-}
-
-void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {}
-void SoftDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
- EmitDeopt(code_gen_state, this, state);
-}
-
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
}
void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
- UNREACHABLE();
+ __ Move(ToRegister(result()), object_.object());
}
void Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
@@ -516,21 +483,20 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
__ Cmp(map_tmp, map().object());
// TODO(leszeks): Encode as a bit on CheckMaps.
- if (map().object()->is_migration_target()) {
+ if (map().is_migration_target()) {
JumpToDeferredIf(
not_equal, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label,
- Register object, CheckMaps* node, int checkpoint_position,
- const InterpreterFrameState* checkpoint_state_snapshot,
+ Register object, CheckMaps* node, EagerDeoptInfo* deopt_info,
Register map_tmp) {
- Label deopt;
+ RegisterEagerDeopt(code_gen_state, deopt_info);
// If the map is not deprecated, deopt straight away.
__ movl(kScratchRegister,
FieldOperand(map_tmp, Map::kBitField3Offset));
__ testl(kScratchRegister,
Immediate(Map::Bits3::IsDeprecatedBit::kMask));
- __ j(zero, &deopt);
+ __ j(zero, &deopt_info->deopt_entry_label);
// Otherwise, try migrating the object. If the migration returns Smi
// zero, then it failed and we should deopt.
@@ -540,25 +506,18 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
// TODO(verwaest): We're calling so we need to spill around it.
__ CallRuntime(Runtime::kTryMigrateInstance);
__ cmpl(kReturnRegister0, Immediate(0));
- __ j(equal, &deopt);
+ __ j(equal, &deopt_info->deopt_entry_label);
// The migrated object is returned on success, retry the map check.
__ Move(object, kReturnRegister0);
__ LoadMap(map_tmp, object);
__ Cmp(map_tmp, node->map().object());
__ j(equal, return_label);
-
- __ bind(&deopt);
- EmitDeopt(code_gen_state, node, checkpoint_position,
- checkpoint_state_snapshot);
+ __ jmp(&deopt_info->deopt_entry_label);
},
- object, this, state.checkpoint()->bytecode_position(),
- state.checkpoint_frame_state(), map_tmp);
+ object, this, eager_deopt_info(), map_tmp);
} else {
- Label is_ok;
- __ j(equal, &is_ok);
- EmitDeopt(code_gen_state, this, state);
- __ bind(&is_ok);
+ EmitEagerDeoptIf(not_equal, code_gen_state, this);
}
}
void CheckMaps::PrintParams(std::ostream& os,
@@ -580,19 +539,28 @@ void LoadField::GenerateCode(MaglevCodeGenState* code_gen_state,
// LoadHandler::FieldIndexBits::decode(raw_handler);
Register object = ToRegister(object_input());
+ Register res = ToRegister(result());
int handler = this->handler();
if (LoadHandler::IsInobjectBits::decode(handler)) {
Operand input_field_operand = FieldOperand(
object, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
- __ DecompressAnyTagged(ToRegister(result()), input_field_operand);
- if (LoadHandler::IsDoubleBits::decode(handler)) {
- // TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
- UNSUPPORTED();
- }
+ __ DecompressAnyTagged(res, input_field_operand);
} else {
- // TODO(leszeks): Handle out-of-object properties.
- UNSUPPORTED();
+ Operand property_array_operand =
+ FieldOperand(object, JSReceiver::kPropertiesOrHashOffset);
+ __ DecompressAnyTagged(res, property_array_operand);
+
+ __ AssertNotSmi(res);
+
+ Operand input_field_operand = FieldOperand(
+ res, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
+ __ DecompressAnyTagged(res, input_field_operand);
+ }
+
+ if (LoadHandler::IsDoubleBits::decode(handler)) {
+ // TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
+ UNSUPPORTED("LoadField double property");
}
}
void LoadField::PrintParams(std::ostream& os,
@@ -617,7 +585,7 @@ void StoreField::GenerateCode(MaglevCodeGenState* code_gen_state,
__ StoreTaggedField(operand, value);
} else {
// TODO(victorgomes): Out-of-object properties.
- UNSUPPORTED();
+ UNSUPPORTED("StoreField out-of-object property");
}
}
@@ -628,37 +596,27 @@ void StoreField::PrintParams(std::ostream& os,
void LoadNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
- using D = LoadNoFeedbackDescriptor;
+ using D = LoadWithVectorDescriptor;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void LoadNamedGeneric::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
- using D = LoadNoFeedbackDescriptor;
- const int ic_kind = static_cast<int>(FeedbackSlotKind::kLoadProperty);
+ using D = LoadWithVectorDescriptor;
DCHECK_EQ(ToRegister(context()), kContextRegister);
DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
__ Move(D::GetRegisterParameter(D::kName), name().object());
- __ Move(D::GetRegisterParameter(D::kICKind),
- Immediate(Smi::FromInt(ic_kind)));
- __ CallBuiltin(Builtin::kLoadIC_NoFeedback);
+ __ Move(D::GetRegisterParameter(D::kSlot),
+ Smi::FromInt(feedback().slot.ToInt()));
+ __ Move(D::GetRegisterParameter(D::kVector), feedback().vector);
+ __ CallBuiltin(Builtin::kLoadIC);
}
void LoadNamedGeneric::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << name_ << ")";
}
-void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {}
-void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {}
-void StoreToFrame::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << target().ToString() << " ← "
- << PrintNodeLabel(graph_labeller, value()) << ")";
-}
-
void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UNREACHABLE();
@@ -753,6 +711,64 @@ void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
#undef DEF_OPERATION
+void CheckedSmiUntag::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(input());
+ DefineSameAsFirst(vreg_state, this);
+}
+
+void CheckedSmiUntag::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register value = ToRegister(input());
+ // TODO(leszeks): Consider optimizing away this test and using the carry bit
+ // of the `sarl` for cases where the deopt uses the value from a different
+ // register.
+ __ testb(value, Immediate(1));
+ EmitEagerDeoptIf(not_zero, code_gen_state, this);
+ __ sarl(value, Immediate(1));
+}
+
+void CheckedSmiTag::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(input());
+ DefineSameAsFirst(vreg_state, this);
+}
+
+void CheckedSmiTag::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input());
+ __ addl(reg, reg);
+ EmitEagerDeoptIf(overflow, code_gen_state, this);
+}
+
+void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void Int32Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ __ Move(ToRegister(result()), Immediate(value()));
+}
+void Int32Constant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << value() << ")";
+}
+
+void Int32AddWithOverflow::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(vreg_state, this);
+}
+
+void Int32AddWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ addl(left, right);
+ EmitEagerDeoptIf(overflow, code_gen_state, this);
+}
+
void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
// Phi inputs are processed in the post-process, once loop phis' inputs'
@@ -768,16 +784,14 @@ void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
}
}
void Phi::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
- DCHECK_EQ(state.interpreter_frame_state()->get(owner()), this);
-}
+ const ProcessingState& state) {}
void Phi::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << owner().ToString() << ")";
}
-void CallProperty::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {
+void Call::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
UseFixed(function(), CallTrampolineDescriptor::GetRegisterParameter(
CallTrampolineDescriptor::kFunction));
UseFixed(context(), kContextRegister);
@@ -786,8 +800,8 @@ void CallProperty::AllocateVreg(MaglevVregAllocationState* vreg_state,
}
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
-void CallProperty::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
+void Call::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
// TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
DCHECK_EQ(ToRegister(function()),
@@ -806,16 +820,25 @@ void CallProperty::GenerateCode(MaglevCodeGenState* code_gen_state,
// TODO(leszeks): This doesn't collect feedback yet, either pass in the
// feedback vector by Handle.
- __ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
-}
+ switch (receiver_mode_) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ __ CallBuiltin(Builtin::kCall_ReceiverIsNullOrUndefined);
+ break;
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ __ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
+ break;
+ case ConvertReceiverMode::kAny:
+ __ CallBuiltin(Builtin::kCall_ReceiverIsAny);
+ break;
+ }
-void CallUndefinedReceiver::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {
- UNREACHABLE();
-}
-void CallUndefinedReceiver::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
- UNREACHABLE();
+ lazy_deopt_info()->deopting_call_return_pc = __ pc_offset_for_safepoint();
+ code_gen_state->PushLazyDeopt(lazy_deopt_info());
+
+ SafepointTableBuilder::Safepoint safepoint =
+ code_gen_state->safepoint_table_builder()->DefineSafepoint(
+ code_gen_state->masm());
+ code_gen_state->DefineSafepointStackSlots(safepoint);
}
// ---
@@ -829,9 +852,42 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
+ // We're not going to continue execution, so we can use an arbitrary register
+ // here instead of relying on temporaries from the register allocator.
+ Register actual_params_size = r8;
+
+ // Compute the size of the actual parameters + receiver (in bytes).
+ // TODO(leszeks): Consider making this an input into Return to re-use the
+ // incoming argc's register (if it's still valid).
+ __ movq(actual_params_size,
+ MemOperand(rbp, StandardFrameConstants::kArgCOffset));
+
+ // Leave the frame.
+ // TODO(leszeks): Add a new frame maker for Maglev.
__ LeaveFrame(StackFrame::BASELINE);
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label drop_dynamic_arg_size;
+ __ cmpq(actual_params_size, Immediate(code_gen_state->parameter_count()));
+ __ j(greater, &drop_dynamic_arg_size);
+
+ // Drop receiver + arguments according to static formal arguments size.
__ Ret(code_gen_state->parameter_count() * kSystemPointerSize,
kScratchRegister);
+
+ __ bind(&drop_dynamic_arg_size);
+ // Drop receiver + arguments according to dynamic arguments size.
+ __ DropArguments(actual_params_size, r9, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
+ __ Ret();
+}
+
+void Deopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ EmitEagerDeoptIf(always, code_gen_state, this);
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
diff --git a/deps/v8/src/maglev/maglev-ir.h b/deps/v8/src/maglev/maglev-ir.h
index 398f9254d9..1f7c5471de 100644
--- a/deps/v8/src/maglev/maglev-ir.h
+++ b/deps/v8/src/maglev/maglev-ir.h
@@ -9,14 +9,17 @@
#include "src/base/macros.h"
#include "src/base/small-vector.h"
#include "src/base/threaded-list.h"
+#include "src/codegen/label.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
#include "src/common/operation.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/heap-refs.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-compilation-unit.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
+#include "src/utils/utils.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -26,8 +29,10 @@ namespace maglev {
class BasicBlock;
class ProcessingState;
class MaglevCodeGenState;
+class MaglevCompilationUnit;
class MaglevGraphLabeller;
class MaglevVregAllocationState;
+class CompactInterpreterFrameState;
// Nodes are either
// 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or
@@ -61,8 +66,7 @@ class MaglevVregAllocationState;
V(GenericGreaterThanOrEqual)
#define VALUE_NODE_LIST(V) \
- V(CallProperty) \
- V(CallUndefinedReceiver) \
+ V(Call) \
V(Constant) \
V(InitialValue) \
V(LoadField) \
@@ -72,15 +76,16 @@ class MaglevVregAllocationState;
V(RegisterInput) \
V(RootConstant) \
V(SmiConstant) \
+ V(CheckedSmiTag) \
+ V(CheckedSmiUntag) \
+ V(Int32AddWithOverflow) \
+ V(Int32Constant) \
GENERIC_OPERATIONS_NODE_LIST(V)
#define NODE_LIST(V) \
- V(Checkpoint) \
V(CheckMaps) \
V(GapMove) \
- V(SoftDeopt) \
V(StoreField) \
- V(StoreToFrame) \
VALUE_NODE_LIST(V)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
@@ -93,6 +98,7 @@ class MaglevVregAllocationState;
#define CONTROL_NODE_LIST(V) \
V(Return) \
+ V(Deopt) \
CONDITIONAL_CONTROL_NODE_LIST(V) \
UNCONDITIONAL_CONTROL_NODE_LIST(V)
@@ -159,6 +165,11 @@ class ConditionalControlNode;
class UnconditionalControlNode;
class ValueNode;
+enum class ValueRepresentation {
+ kTagged,
+ kUntagged,
+};
+
#define DEF_FORWARD_DECLARATION(type, ...) class type;
NODE_BASE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@@ -168,16 +179,26 @@ static constexpr uint32_t kInvalidNodeId = 0;
class OpProperties {
public:
- bool is_call() const { return kIsCallBit::decode(bitfield_); }
- bool can_deopt() const { return kCanDeoptBit::decode(bitfield_); }
- bool can_read() const { return kCanReadBit::decode(bitfield_); }
- bool can_write() const { return kCanWriteBit::decode(bitfield_); }
- bool non_memory_side_effects() const {
+ constexpr bool is_call() const { return kIsCallBit::decode(bitfield_); }
+ constexpr bool can_eager_deopt() const {
+ return kCanEagerDeoptBit::decode(bitfield_);
+ }
+ constexpr bool can_lazy_deopt() const {
+ return kCanLazyDeoptBit::decode(bitfield_);
+ }
+ constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); }
+ constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); }
+ constexpr bool non_memory_side_effects() const {
return kNonMemorySideEffectsBit::decode(bitfield_);
}
+ constexpr bool is_untagged_value() const {
+ return kUntaggedValueBit::decode(bitfield_);
+ }
- bool is_pure() const { return (bitfield_ | kPureMask) == kPureValue; }
- bool is_required_when_unused() const {
+ constexpr bool is_pure() const {
+ return (bitfield_ | kPureMask) == kPureValue;
+ }
+ constexpr bool is_required_when_unused() const {
return can_write() || non_memory_side_effects();
}
@@ -189,8 +210,11 @@ class OpProperties {
static constexpr OpProperties Call() {
return OpProperties(kIsCallBit::encode(true));
}
- static constexpr OpProperties Deopt() {
- return OpProperties(kCanDeoptBit::encode(true));
+ static constexpr OpProperties EagerDeopt() {
+ return OpProperties(kCanEagerDeoptBit::encode(true));
+ }
+ static constexpr OpProperties LazyDeopt() {
+ return OpProperties(kCanLazyDeoptBit::encode(true));
}
static constexpr OpProperties Reading() {
return OpProperties(kCanReadBit::encode(true));
@@ -201,16 +225,27 @@ class OpProperties {
static constexpr OpProperties NonMemorySideEffects() {
return OpProperties(kNonMemorySideEffectsBit::encode(true));
}
+ static constexpr OpProperties UntaggedValue() {
+ return OpProperties(kUntaggedValueBit::encode(true));
+ }
+ static constexpr OpProperties JSCall() {
+ return Call() | NonMemorySideEffects() | LazyDeopt();
+ }
static constexpr OpProperties AnySideEffects() {
return Reading() | Writing() | NonMemorySideEffects();
}
+ constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
+ operator uint32_t() const { return bitfield_; }
+
private:
using kIsCallBit = base::BitField<bool, 0, 1>;
- using kCanDeoptBit = kIsCallBit::Next<bool, 1>;
- using kCanReadBit = kCanDeoptBit::Next<bool, 1>;
+ using kCanEagerDeoptBit = kIsCallBit::Next<bool, 1>;
+ using kCanLazyDeoptBit = kCanEagerDeoptBit::Next<bool, 1>;
+ using kCanReadBit = kCanLazyDeoptBit::Next<bool, 1>;
using kCanWriteBit = kCanReadBit::Next<bool, 1>;
using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>;
+ using kUntaggedValueBit = kNonMemorySideEffectsBit::Next<bool, 1>;
static const uint32_t kPureMask = kCanReadBit::kMask | kCanWriteBit::kMask |
kNonMemorySideEffectsBit::kMask;
@@ -218,9 +253,10 @@ class OpProperties {
kCanWriteBit::encode(false) |
kNonMemorySideEffectsBit::encode(false);
- constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
+ const uint32_t bitfield_;
- uint32_t bitfield_;
+ public:
+ static const size_t kSize = kUntaggedValueBit::kLastUsedBit + 1;
};
class ValueLocation {
@@ -263,22 +299,66 @@ class ValueLocation {
compiler::InstructionOperand operand_;
};
-class Input : public ValueLocation {
+class InputLocation : public ValueLocation {
public:
- explicit Input(ValueNode* node) : node_(node) {}
-
- ValueNode* node() const { return node_; }
-
NodeIdT next_use_id() const { return next_use_id_; }
-
// Used in ValueNode::mark_use
NodeIdT* get_next_use_id_address() { return &next_use_id_; }
private:
- ValueNode* node_;
NodeIdT next_use_id_ = kInvalidNodeId;
};
+class Input : public InputLocation {
+ public:
+ explicit Input(ValueNode* node) : node_(node) {}
+ ValueNode* node() const { return node_; }
+
+ private:
+ ValueNode* node_;
+};
+
+class CheckpointedInterpreterState {
+ public:
+ CheckpointedInterpreterState() = default;
+ CheckpointedInterpreterState(BytecodeOffset bytecode_position,
+ const CompactInterpreterFrameState* state)
+ : bytecode_position(bytecode_position), register_frame(state) {}
+
+ BytecodeOffset bytecode_position = BytecodeOffset::None();
+ const CompactInterpreterFrameState* register_frame = nullptr;
+};
+
+class DeoptInfo {
+ protected:
+ DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint);
+
+ public:
+ CheckpointedInterpreterState state;
+ InputLocation* input_locations = nullptr;
+ Label deopt_entry_label;
+ int deopt_index = -1;
+};
+
+class EagerDeoptInfo : public DeoptInfo {
+ public:
+ EagerDeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint)
+ : DeoptInfo(zone, compilation_unit, checkpoint) {}
+};
+
+class LazyDeoptInfo : public DeoptInfo {
+ public:
+ LazyDeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint)
+ : DeoptInfo(zone, compilation_unit, checkpoint) {}
+
+ int deopting_call_return_pc = -1;
+ interpreter::Register result_location =
+ interpreter::Register::invalid_value();
+};
+
// Dummy type for the initial raw allocation.
struct NodeWithInlineInputs {};
@@ -296,10 +376,23 @@ struct opcode_of_helper;
};
NODE_BASE_LIST(DEF_OPCODE_OF)
#undef DEF_OPCODE_OF
+
} // namespace detail
class NodeBase : public ZoneObject {
+ private:
+ // Bitfield specification.
+ using OpcodeField = base::BitField<Opcode, 0, 6>;
+ STATIC_ASSERT(OpcodeField::is_valid(kLastOpcode));
+ using OpPropertiesField =
+ OpcodeField::Next<OpProperties, OpProperties::kSize>;
+ using InputCountField = OpPropertiesField::Next<uint16_t, 16>;
+
protected:
+ // Subclasses may use the remaining bitfield bits.
+ template <class T, int size>
+ using NextBitField = InputCountField::Next<T, size>;
+
template <class T>
static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value;
@@ -319,6 +412,21 @@ class NodeBase : public ZoneObject {
return node;
}
+ template <class Derived, typename... Args>
+ static Derived* New(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint, Args&&... args) {
+ Derived* node = New<Derived>(zone, std::forward<Args>(args)...);
+ if constexpr (Derived::kProperties.can_eager_deopt()) {
+ new (node->eager_deopt_info_address())
+ EagerDeoptInfo(zone, compilation_unit, checkpoint);
+ } else {
+ STATIC_ASSERT(Derived::kProperties.can_lazy_deopt());
+ new (node->lazy_deopt_info_address())
+ LazyDeoptInfo(zone, compilation_unit, checkpoint);
+ }
+ return node;
+ }
+
// Inputs must be initialized manually.
template <class Derived, typename... Args>
static Derived* New(Zone* zone, size_t input_count, Args&&... args) {
@@ -329,9 +437,11 @@ class NodeBase : public ZoneObject {
// Overwritten by subclasses.
static constexpr OpProperties kProperties = OpProperties::Pure();
- inline const OpProperties& properties() const;
constexpr Opcode opcode() const { return OpcodeField::decode(bit_field_); }
+ OpProperties properties() const {
+ return OpPropertiesField::decode(bit_field_);
+ }
template <class T>
constexpr bool Is() const;
@@ -407,15 +517,45 @@ class NodeBase : public ZoneObject {
void Print(std::ostream& os, MaglevGraphLabeller*) const;
+ EagerDeoptInfo* eager_deopt_info() {
+ DCHECK(properties().can_eager_deopt());
+ DCHECK(!properties().can_lazy_deopt());
+ return (
+ reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) -
+ 1);
+ }
+
+ const EagerDeoptInfo* eager_deopt_info() const {
+ DCHECK(properties().can_eager_deopt());
+ DCHECK(!properties().can_lazy_deopt());
+ return (reinterpret_cast<const EagerDeoptInfo*>(
+ input_address(input_count() - 1)) -
+ 1);
+ }
+
+ LazyDeoptInfo* lazy_deopt_info() {
+ DCHECK(properties().can_lazy_deopt());
+ DCHECK(!properties().can_eager_deopt());
+ return (reinterpret_cast<LazyDeoptInfo*>(input_address(input_count() - 1)) -
+ 1);
+ }
+
+ const LazyDeoptInfo* lazy_deopt_info() const {
+ DCHECK(properties().can_lazy_deopt());
+ DCHECK(!properties().can_eager_deopt());
+ return (reinterpret_cast<const LazyDeoptInfo*>(
+ input_address(input_count() - 1)) -
+ 1);
+ }
+
protected:
- NodeBase(Opcode opcode, size_t input_count)
- : bit_field_(OpcodeField::encode(opcode) |
- InputCountField::encode(input_count)) {}
+ explicit NodeBase(uint32_t bitfield) : bit_field_(bitfield) {}
Input* input_address(int index) {
DCHECK_LT(index, input_count());
return reinterpret_cast<Input*>(this) - (index + 1);
}
+
const Input* input_address(int index) const {
DCHECK_LT(index, input_count());
return reinterpret_cast<const Input*>(this) - (index + 1);
@@ -425,36 +565,54 @@ class NodeBase : public ZoneObject {
new (input_address(index)) Input(input);
}
+ void set_temporaries_needed(int value) {
+#ifdef DEBUG
+ DCHECK_EQ(kTemporariesState, kUnset);
+ kTemporariesState = kNeedsTemporaries;
+#endif // DEBUG
+ num_temporaries_needed_ = value;
+ }
+
+ EagerDeoptInfo* eager_deopt_info_address() {
+ DCHECK(properties().can_eager_deopt());
+ DCHECK(!properties().can_lazy_deopt());
+ return reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) -
+ 1;
+ }
+
+ LazyDeoptInfo* lazy_deopt_info_address() {
+ DCHECK(!properties().can_eager_deopt());
+ DCHECK(properties().can_lazy_deopt());
+ return reinterpret_cast<LazyDeoptInfo*>(input_address(input_count() - 1)) -
+ 1;
+ }
+
private:
template <class Derived, typename... Args>
static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) {
- const size_t size = sizeof(Derived) + input_count * sizeof(Input);
+ static_assert(
+ !Derived::kProperties.can_eager_deopt() ||
+ !Derived::kProperties.can_lazy_deopt(),
+ "The current deopt info representation, at the end of inputs, requires "
+ "that we cannot have both lazy and eager deopts on a node. If we ever "
+ "need this, we have to update accessors to check node->properties() "
+ "for which deopts are active.");
+ const size_t size_before_node =
+ input_count * sizeof(Input) +
+ (Derived::kProperties.can_eager_deopt() ? sizeof(EagerDeoptInfo) : 0) +
+ (Derived::kProperties.can_lazy_deopt() ? sizeof(LazyDeoptInfo) : 0);
+ const size_t size = size_before_node + sizeof(Derived);
intptr_t raw_buffer =
reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size));
- void* node_buffer =
- reinterpret_cast<void*>(raw_buffer + input_count * sizeof(Input));
+ void* node_buffer = reinterpret_cast<void*>(raw_buffer + size_before_node);
+ uint32_t bitfield = OpcodeField::encode(opcode_of<Derived>) |
+ OpPropertiesField::encode(Derived::kProperties) |
+ InputCountField::encode(input_count);
Derived* node =
- new (node_buffer) Derived(input_count, std::forward<Args>(args)...);
+ new (node_buffer) Derived(bitfield, std::forward<Args>(args)...);
return node;
}
- protected:
- // Bitfield specification.
- using OpcodeField = base::BitField<Opcode, 0, 6>;
- STATIC_ASSERT(OpcodeField::is_valid(kLastOpcode));
- using InputCountField = OpcodeField::Next<uint16_t, 16>;
- // Subclasses may use the remaining bits.
- template <class T, int size>
- using NextBitField = InputCountField::Next<T, size>;
-
- void set_temporaries_needed(int value) {
-#ifdef DEBUG
- DCHECK_EQ(kTemporariesState, kUnset);
- kTemporariesState = kNeedsTemporaries;
-#endif // DEBUG
- num_temporaries_needed_ = value;
- }
-
uint32_t bit_field_;
private:
@@ -505,13 +663,24 @@ class Node : public NodeBase {
inline ValueLocation& result();
+ // This might break ThreadedList invariants.
+ // Run ThreadedList::RevalidateTail afterwards.
+ void AddNodeAfter(Node* node) {
+ DCHECK_NOT_NULL(node);
+ DCHECK_NULL(node->next_);
+ node->next_ = next_;
+ next_ = node;
+ }
+
+ Node* NextNode() const { return next_; }
+
protected:
- explicit Node(Opcode opcode, size_t input_count)
- : NodeBase(opcode, input_count) {}
+ using NodeBase::NodeBase;
private:
Node** next() { return &next_; }
Node* next_ = nullptr;
+
friend List;
friend base::ThreadedListTraits<Node>;
};
@@ -559,16 +728,14 @@ class ValueNode : public Node {
return compiler::AllocatedOperand::cast(spill_or_hint_);
}
- void mark_use(NodeIdT id, Input* use) {
+ void mark_use(NodeIdT id, InputLocation* input_location) {
DCHECK_EQ(state_, kLastUse);
DCHECK_NE(id, kInvalidNodeId);
DCHECK_LT(start_id(), id);
DCHECK_IMPLIES(has_valid_live_range(), id >= end_id_);
end_id_ = id;
*last_uses_next_use_id_ = id;
- if (use) {
- last_uses_next_use_id_ = use->get_next_use_id_address();
- }
+ last_uses_next_use_id_ = input_location->get_next_use_id_address();
}
struct LiveRange {
@@ -606,9 +773,16 @@ class ValueNode : public Node {
return compiler::AllocatedOperand::cast(spill_or_hint_);
}
+ bool is_untagged_value() const { return properties().is_untagged_value(); }
+
+ ValueRepresentation value_representation() const {
+ return is_untagged_value() ? ValueRepresentation::kUntagged
+ : ValueRepresentation::kTagged;
+ }
+
protected:
- explicit ValueNode(Opcode opcode, size_t input_count)
- : Node(opcode, input_count),
+ explicit ValueNode(uint32_t bitfield)
+ : Node(bitfield),
last_uses_next_use_id_(&next_use_)
#ifdef DEBUG
,
@@ -647,11 +821,13 @@ class NodeT : public Node {
STATIC_ASSERT(!IsValueNode(opcode_of<Derived>));
public:
- constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr Opcode opcode() const { return opcode_of<Derived>; }
const OpProperties& properties() const { return Derived::kProperties; }
protected:
- explicit NodeT(size_t input_count) : Node(opcode_of<Derived>, input_count) {}
+ explicit NodeT(uint32_t bitfield) : Node(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ }
};
template <size_t InputCount, class Derived>
@@ -667,9 +843,8 @@ class FixedInputNodeT : public NodeT<Derived> {
}
protected:
- explicit FixedInputNodeT(size_t input_count) : NodeT<Derived>(kInputCount) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ explicit FixedInputNodeT(uint32_t bitfield) : NodeT<Derived>(bitfield) {
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
@@ -678,12 +853,13 @@ class ValueNodeT : public ValueNode {
STATIC_ASSERT(IsValueNode(opcode_of<Derived>));
public:
- constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr Opcode opcode() const { return opcode_of<Derived>; }
const OpProperties& properties() const { return Derived::kProperties; }
protected:
- explicit ValueNodeT(size_t input_count)
- : ValueNode(opcode_of<Derived>, input_count) {}
+ explicit ValueNodeT(uint32_t bitfield) : ValueNode(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ }
};
template <size_t InputCount, class Derived>
@@ -699,10 +875,9 @@ class FixedInputValueNodeT : public ValueNodeT<Derived> {
}
protected:
- explicit FixedInputValueNodeT(size_t input_count)
- : ValueNodeT<Derived>(InputCount) {
- DCHECK_EQ(input_count, InputCount);
- USE(input_count);
+ explicit FixedInputValueNodeT(uint32_t bitfield)
+ : ValueNodeT<Derived>(bitfield) {
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
@@ -712,16 +887,16 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
public:
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
static constexpr int kOperandIndex = 0;
Input& operand_input() { return Node::input(kOperandIndex); }
compiler::FeedbackSource feedback() const { return feedback_; }
protected:
- explicit UnaryWithFeedbackNode(size_t input_count,
+ explicit UnaryWithFeedbackNode(uint32_t bitfield,
const compiler::FeedbackSource& feedback)
- : Base(input_count), feedback_(feedback) {}
+ : Base(bitfield), feedback_(feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -736,7 +911,7 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
public:
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@@ -745,9 +920,9 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
compiler::FeedbackSource feedback() const { return feedback_; }
protected:
- BinaryWithFeedbackNode(size_t input_count,
+ BinaryWithFeedbackNode(uint32_t bitfield,
const compiler::FeedbackSource& feedback)
- : Base(input_count), feedback_(feedback) {}
+ : Base(bitfield), feedback_(feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -761,8 +936,8 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
- Name(size_t input_count, const compiler::FeedbackSource& feedback) \
- : Base(input_count, feedback) {} \
+ Name(uint32_t bitfield, const compiler::FeedbackSource& feedback) \
+ : Base(bitfield, feedback) {} \
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
@@ -778,12 +953,82 @@ COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
#undef DEF_UNARY_WITH_FEEDBACK_NODE
#undef DEF_BINARY_WITH_FEEDBACK_NODE
+class CheckedSmiTag : public FixedInputValueNodeT<1, CheckedSmiTag> {
+ using Base = FixedInputValueNodeT<1, CheckedSmiTag>;
+
+ public:
+ explicit CheckedSmiTag(uint32_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+
+ Input& input() { return Node::input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedSmiUntag : public FixedInputValueNodeT<1, CheckedSmiUntag> {
+ using Base = FixedInputValueNodeT<1, CheckedSmiUntag>;
+
+ public:
+ explicit CheckedSmiUntag(uint32_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::UntaggedValue();
+
+ Input& input() { return Node::input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
+ using Base = FixedInputValueNodeT<0, Int32Constant>;
+
+ public:
+ explicit Int32Constant(uint32_t bitfield, int32_t value)
+ : Base(bitfield), value_(value) {}
+
+ static constexpr OpProperties kProperties = OpProperties::UntaggedValue();
+
+ int32_t value() const { return value_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int32_t value_;
+};
+
+class Int32AddWithOverflow
+ : public FixedInputValueNodeT<2, Int32AddWithOverflow> {
+ using Base = FixedInputValueNodeT<2, Int32AddWithOverflow>;
+
+ public:
+ explicit Int32AddWithOverflow(uint32_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::UntaggedValue();
+
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return Node::input(kLeftIndex); }
+ Input& right_input() { return Node::input(kRightIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
using Base = FixedInputValueNodeT<0, InitialValue>;
public:
- explicit InitialValue(size_t input_count, interpreter::Register source)
- : Base(input_count), source_(source) {}
+ explicit InitialValue(uint32_t bitfield, interpreter::Register source)
+ : Base(bitfield), source_(source) {}
interpreter::Register source() const { return source_; }
@@ -799,8 +1044,8 @@ class RegisterInput : public FixedInputValueNodeT<0, RegisterInput> {
using Base = FixedInputValueNodeT<0, RegisterInput>;
public:
- explicit RegisterInput(size_t input_count, Register input)
- : Base(input_count), input_(input) {}
+ explicit RegisterInput(uint32_t bitfield, Register input)
+ : Base(bitfield), input_(input) {}
Register input() const { return input_; }
@@ -816,8 +1061,8 @@ class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
using Base = FixedInputValueNodeT<0, SmiConstant>;
public:
- explicit SmiConstant(size_t input_count, Smi value)
- : Base(input_count), value_(value) {}
+ explicit SmiConstant(uint32_t bitfield, Smi value)
+ : Base(bitfield), value_(value) {}
Smi value() const { return value_; }
@@ -833,8 +1078,8 @@ class Constant : public FixedInputValueNodeT<0, Constant> {
using Base = FixedInputValueNodeT<0, Constant>;
public:
- explicit Constant(size_t input_count, const compiler::HeapObjectRef& object)
- : Base(input_count), object_(object) {}
+ explicit Constant(uint32_t bitfield, const compiler::HeapObjectRef& object)
+ : Base(bitfield), object_(object) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -848,8 +1093,8 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
using Base = FixedInputValueNodeT<0, RootConstant>;
public:
- explicit RootConstant(size_t input_count, RootIndex index)
- : Base(input_count), index_(index) {}
+ explicit RootConstant(uint32_t bitfield, RootIndex index)
+ : Base(bitfield), index_(index) {}
RootIndex index() const { return index_; }
@@ -861,57 +1106,18 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
const RootIndex index_;
};
-class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
- using Base = FixedInputNodeT<0, Checkpoint>;
-
- public:
- explicit Checkpoint(size_t input_count, int bytecode_position,
- bool accumulator_is_live, ValueNode* accumulator)
- : Base(input_count),
- bytecode_position_(bytecode_position),
- accumulator_(accumulator_is_live ? accumulator : nullptr) {}
-
- int bytecode_position() const { return bytecode_position_; }
- bool is_used() const { return IsUsedBit::decode(bit_field_); }
- void SetUsed() { bit_field_ = IsUsedBit::update(bit_field_, true); }
- ValueNode* accumulator() const { return accumulator_; }
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
-
- private:
- using IsUsedBit = NextBitField<bool, 1>;
-
- const int bytecode_position_;
- ValueNode* const accumulator_;
-};
-
-class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> {
- using Base = FixedInputNodeT<0, SoftDeopt>;
-
- public:
- explicit SoftDeopt(size_t input_count) : Base(input_count) {}
-
- static constexpr OpProperties kProperties = OpProperties::Deopt();
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
-};
-
class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
using Base = FixedInputNodeT<1, CheckMaps>;
public:
- explicit CheckMaps(size_t input_count, const compiler::MapRef& map)
- : Base(input_count), map_(map) {}
+ explicit CheckMaps(uint32_t bitfield, const compiler::MapRef& map)
+ : Base(bitfield), map_(map) {}
// TODO(verwaest): This just calls in deferred code, so probably we'll need to
// mark that to generate stack maps. Mark as call so we at least clear the
// registers since we currently don't properly spill either.
static constexpr OpProperties kProperties =
- OpProperties::Deopt() | OpProperties::Call();
+ OpProperties::EagerDeopt() | OpProperties::Call();
compiler::MapRef map() const { return map_; }
@@ -930,11 +1136,10 @@ class LoadField : public FixedInputValueNodeT<1, LoadField> {
using Base = FixedInputValueNodeT<1, LoadField>;
public:
- explicit LoadField(size_t input_count, int handler)
- : Base(input_count), handler_(handler) {}
+ explicit LoadField(uint32_t bitfield, int handler)
+ : Base(bitfield), handler_(handler) {}
- // The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::Reading();
int handler() const { return handler_; }
@@ -953,8 +1158,10 @@ class StoreField : public FixedInputNodeT<2, StoreField> {
using Base = FixedInputNodeT<2, StoreField>;
public:
- explicit StoreField(size_t input_count, int handler)
- : Base(input_count), handler_(handler) {}
+ explicit StoreField(uint32_t bitfield, int handler)
+ : Base(bitfield), handler_(handler) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Writing();
int handler() const { return handler_; }
@@ -975,11 +1182,11 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
using Base = FixedInputValueNodeT<1, LoadGlobal>;
public:
- explicit LoadGlobal(size_t input_count, const compiler::NameRef& name)
- : Base(input_count), name_(name) {}
+ explicit LoadGlobal(uint32_t bitfield, const compiler::NameRef& name)
+ : Base(bitfield), name_(name) {}
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
Input& context() { return input(0); }
const compiler::NameRef& name() const { return name_; }
@@ -996,13 +1203,15 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
using Base = FixedInputValueNodeT<2, LoadNamedGeneric>;
public:
- explicit LoadNamedGeneric(size_t input_count, const compiler::NameRef& name)
- : Base(input_count), name_(name) {}
+ explicit LoadNamedGeneric(uint32_t bitfield, const compiler::NameRef& name,
+ const compiler::FeedbackSource& feedback)
+ : Base(bitfield), name_(name), feedback_(feedback) {}
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
compiler::NameRef name() const { return name_; }
+ compiler::FeedbackSource feedback() const { return feedback_; }
static constexpr int kContextIndex = 0;
static constexpr int kObjectIndex = 1;
@@ -1015,35 +1224,16 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
private:
const compiler::NameRef name_;
-};
-
-class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> {
- using Base = FixedInputNodeT<0, StoreToFrame>;
-
- public:
- StoreToFrame(size_t input_count, ValueNode* value,
- interpreter::Register target)
- : Base(input_count), value_(value), target_(target) {}
-
- interpreter::Register target() const { return target_; }
- ValueNode* value() const { return value_; }
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
-
- private:
- ValueNode* const value_;
- const interpreter::Register target_;
+ const compiler::FeedbackSource feedback_;
};
class GapMove : public FixedInputNodeT<0, GapMove> {
using Base = FixedInputNodeT<0, GapMove>;
public:
- GapMove(size_t input_count, compiler::AllocatedOperand source,
+ GapMove(uint32_t bitfield, compiler::AllocatedOperand source,
compiler::AllocatedOperand target)
- : Base(input_count), source_(source), target_(target) {}
+ : Base(bitfield), source_(source), target_(target) {}
compiler::AllocatedOperand source() const { return source_; }
compiler::AllocatedOperand target() const { return target_; }
@@ -1067,8 +1257,8 @@ class Phi : public ValueNodeT<Phi> {
using List = base::ThreadedList<Phi>;
// TODO(jgruber): More intuitive constructors, if possible.
- Phi(size_t input_count, interpreter::Register owner, int merge_offset)
- : Base(input_count), owner_(owner), merge_offset_(merge_offset) {}
+ Phi(uint32_t bitfield, interpreter::Register owner, int merge_offset)
+ : Base(bitfield), owner_(owner), merge_offset_(merge_offset) {}
interpreter::Register owner() const { return owner_; }
int merge_offset() const { return merge_offset_; }
@@ -1090,54 +1280,42 @@ class Phi : public ValueNodeT<Phi> {
friend base::ThreadedListTraits<Phi>;
};
-class CallProperty : public ValueNodeT<CallProperty> {
- using Base = ValueNodeT<CallProperty>;
+class Call : public ValueNodeT<Call> {
+ using Base = ValueNodeT<Call>;
public:
- explicit CallProperty(size_t input_count) : Base(input_count) {}
+ // We assume function and context as fixed inputs.
+ static constexpr int kFunctionIndex = 0;
+ static constexpr int kContextIndex = 1;
+ static constexpr int kFixedInputCount = 2;
// This ctor is used when for variable input counts.
// Inputs must be initialized manually.
- CallProperty(size_t input_count, ValueNode* function, ValueNode* context)
- : Base(input_count) {
- set_input(0, function);
- set_input(1, context);
+ Call(uint32_t bitfield, ConvertReceiverMode mode, ValueNode* function,
+ ValueNode* context)
+ : Base(bitfield), receiver_mode_(mode) {
+ set_input(kFunctionIndex, function);
+ set_input(kContextIndex, context);
}
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
- Input& function() { return input(0); }
- const Input& function() const { return input(0); }
- Input& context() { return input(1); }
- const Input& context() const { return input(1); }
- int num_args() const { return input_count() - 2; }
- Input& arg(int i) { return input(i + 2); }
- void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
+ Input& function() { return input(kFunctionIndex); }
+ const Input& function() const { return input(kFunctionIndex); }
+ Input& context() { return input(kContextIndex); }
+ const Input& context() const { return input(kContextIndex); }
+ int num_args() const { return input_count() - kFixedInputCount; }
+ Input& arg(int i) { return input(i + kFixedInputCount); }
+ void set_arg(int i, ValueNode* node) {
+ set_input(i + kFixedInputCount, node);
+ }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
-};
-
-class CallUndefinedReceiver : public ValueNodeT<CallUndefinedReceiver> {
- using Base = ValueNodeT<CallUndefinedReceiver>;
-
- public:
- explicit CallUndefinedReceiver(size_t input_count) : Base(input_count) {}
- static constexpr OpProperties kProperties = OpProperties::Call();
-
- Input& function() { return input(0); }
- const Input& function() const { return input(0); }
- Input& context() { return input(1); }
- const Input& context() const { return input(1); }
- int num_args() const { return input_count() - 2; }
- Input& arg(int i) { return input(i + 2); }
- void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+ private:
+ ConvertReceiverMode receiver_mode_;
};
// Represents either a direct BasicBlock pointer, or an entry in a list of
@@ -1246,13 +1424,13 @@ class ControlNode : public NodeBase {
}
void set_next_post_dominating_hole(ControlNode* node) {
DCHECK_IMPLIES(node != nullptr, node->Is<Jump>() || node->Is<Return>() ||
+ node->Is<Deopt>() ||
node->Is<JumpLoop>());
next_post_dominating_hole_ = node;
}
protected:
- explicit ControlNode(Opcode opcode, size_t input_count)
- : NodeBase(opcode, input_count) {}
+ using NodeBase::NodeBase;
private:
ControlNode* next_post_dominating_hole_ = nullptr;
@@ -1265,12 +1443,11 @@ class UnconditionalControlNode : public ControlNode {
void set_predecessor_id(int id) { predecessor_id_ = id; }
protected:
- explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
+ explicit UnconditionalControlNode(uint32_t bitfield,
BasicBlockRef* target_refs)
- : ControlNode(opcode, input_count), target_(target_refs) {}
- explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
- BasicBlock* target)
- : ControlNode(opcode, input_count), target_(target) {}
+ : ControlNode(bitfield), target_(target_refs) {}
+ explicit UnconditionalControlNode(uint32_t bitfield, BasicBlock* target)
+ : ControlNode(bitfield), target_(target) {}
private:
const BasicBlockRef target_;
@@ -1292,25 +1469,24 @@ class UnconditionalControlNodeT : public UnconditionalControlNode {
}
protected:
- explicit UnconditionalControlNodeT(size_t input_count,
+ explicit UnconditionalControlNodeT(uint32_t bitfield,
BasicBlockRef* target_refs)
- : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target_refs) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ : UnconditionalControlNode(bitfield, target_refs) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
- explicit UnconditionalControlNodeT(size_t input_count, BasicBlock* target)
- : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ explicit UnconditionalControlNodeT(uint32_t bitfield, BasicBlock* target)
+ : UnconditionalControlNode(bitfield, target) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
class ConditionalControlNode : public ControlNode {
public:
- ConditionalControlNode(Opcode opcode, size_t input_count,
- BasicBlockRef* if_true_refs,
+ ConditionalControlNode(uint32_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : ControlNode(opcode, input_count),
+ : ControlNode(bitfield),
if_true_(if_true_refs),
if_false_(if_false_refs) {}
@@ -1337,13 +1513,12 @@ class ConditionalControlNodeT : public ConditionalControlNode {
}
protected:
- explicit ConditionalControlNodeT(size_t input_count,
+ explicit ConditionalControlNodeT(uint32_t bitfield,
BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : ConditionalControlNode(opcode_of<Derived>, kInputCount, if_true_refs,
- if_false_refs) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ : ConditionalControlNode(bitfield, if_true_refs, if_false_refs) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
@@ -1351,8 +1526,8 @@ class Jump : public UnconditionalControlNodeT<Jump> {
using Base = UnconditionalControlNodeT<Jump>;
public:
- explicit Jump(size_t input_count, BasicBlockRef* target_refs)
- : Base(input_count, target_refs) {}
+ explicit Jump(uint32_t bitfield, BasicBlockRef* target_refs)
+ : Base(bitfield, target_refs) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -1363,11 +1538,11 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
using Base = UnconditionalControlNodeT<JumpLoop>;
public:
- explicit JumpLoop(size_t input_count, BasicBlock* target)
- : Base(input_count, target) {}
+ explicit JumpLoop(uint32_t bitfield, BasicBlock* target)
+ : Base(bitfield, target) {}
- explicit JumpLoop(size_t input_count, BasicBlockRef* ref)
- : Base(input_count, ref) {}
+ explicit JumpLoop(uint32_t bitfield, BasicBlockRef* ref)
+ : Base(bitfield, ref) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -1376,8 +1551,9 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
class Return : public ControlNode {
public:
- explicit Return(size_t input_count)
- : ControlNode(opcode_of<Return>, input_count) {}
+ explicit Return(uint32_t bitfield) : ControlNode(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Return>);
+ }
Input& value_input() { return input(0); }
@@ -1386,13 +1562,26 @@ class Return : public ControlNode {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
+class Deopt : public ControlNode {
+ public:
+ explicit Deopt(uint32_t bitfield) : ControlNode(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Deopt>);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class BranchIfTrue : public ConditionalControlNodeT<1, BranchIfTrue> {
using Base = ConditionalControlNodeT<1, BranchIfTrue>;
public:
- explicit BranchIfTrue(size_t input_count, BasicBlockRef* if_true_refs,
+ explicit BranchIfTrue(uint32_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : Base(input_count, if_true_refs, if_false_refs) {}
+ : Base(bitfield, if_true_refs, if_false_refs) {}
Input& condition_input() { return input(0); }
@@ -1406,10 +1595,9 @@ class BranchIfToBooleanTrue
using Base = ConditionalControlNodeT<1, BranchIfToBooleanTrue>;
public:
- explicit BranchIfToBooleanTrue(size_t input_count,
- BasicBlockRef* if_true_refs,
+ explicit BranchIfToBooleanTrue(uint32_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : Base(input_count, if_true_refs, if_false_refs) {}
+ : Base(bitfield, if_true_refs, if_false_refs) {}
static constexpr OpProperties kProperties = OpProperties::Call();
@@ -1430,10 +1618,10 @@ class BranchIfCompare
Input& left_input() { return NodeBase::input(kLeftIndex); }
Input& right_input() { return NodeBase::input(kRightIndex); }
- explicit BranchIfCompare(size_t input_count, Operation operation,
+ explicit BranchIfCompare(uint32_t bitfield, Operation operation,
BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : Base(input_count, if_true_refs, if_false_refs), operation_(operation) {}
+ : Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -1443,17 +1631,6 @@ class BranchIfCompare
Operation operation_;
};
-const OpProperties& NodeBase::properties() const {
- switch (opcode()) {
-#define V(Name) \
- case Opcode::k##Name: \
- return Name::kProperties;
- NODE_BASE_LIST(V)
-#undef V
- }
- UNREACHABLE();
-}
-
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-regalloc.cc b/deps/v8/src/maglev/maglev-regalloc.cc
index 897f2a2d0e..a18fe1547c 100644
--- a/deps/v8/src/maglev/maglev-regalloc.cc
+++ b/deps/v8/src/maglev/maglev-regalloc.cc
@@ -60,9 +60,7 @@ ControlNode* NearestPostDominatingHole(ControlNode* node) {
bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
DCHECK_NOT_NULL(node);
-
- // TODO(leszeks): We shouldn't have any dead nodes passed into here.
- if (node->is_dead()) return false;
+ DCHECK(!node->is_dead());
// If we're looping, a value can only be live if it was live before the loop.
if (target->control_node()->id() <= source->id()) {
@@ -177,7 +175,8 @@ void StraightForwardRegisterAllocator::ComputePostDominatingHoles(
// If the first branch returns or jumps back, we've found highest
// reachable control-node of the longest branch (the second control
// node).
- if (first->Is<Return>() || first->Is<JumpLoop>()) {
+ if (first->Is<Return>() || first->Is<Deopt>() ||
+ first->Is<JumpLoop>()) {
control->set_next_post_dominating_hole(second);
break;
}
@@ -242,6 +241,9 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
} else if (control->Is<Return>()) {
printing_visitor_->os() << " " << control->id() << ".";
break;
+ } else if (control->Is<Deopt>()) {
+ printing_visitor_->os() << " " << control->id() << "✖️";
+ break;
} else if (control->Is<JumpLoop>()) {
printing_visitor_->os() << " " << control->id() << "↰";
break;
@@ -270,8 +272,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
compiler::AllocatedOperand::cast(allocation));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
- phi, ProcessingState(compilation_unit_, block_it_, nullptr,
- nullptr, nullptr));
+ phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (new reg) " << phi->result().operand() << std::endl;
}
@@ -285,8 +286,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
phi->result().SetAllocated(phi->spill_slot());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
- phi, ProcessingState(compilation_unit_, block_it_, nullptr,
- nullptr, nullptr));
+ phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (stack) " << phi->result().operand() << std::endl;
}
@@ -307,45 +307,68 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
}
}
-void StraightForwardRegisterAllocator::UpdateInputUse(uint32_t use,
- const Input& input) {
- ValueNode* node = input.node();
-
- // The value was already cleared through a previous input.
- if (node->is_dead()) return;
+void StraightForwardRegisterAllocator::UpdateUse(
+ ValueNode* node, InputLocation* input_location) {
+ DCHECK(!node->is_dead());
// Update the next use.
- node->set_next_use(input.next_use_id());
+ node->set_next_use(input_location->next_use_id());
+
+ if (!node->is_dead()) return;
// If a value is dead, make sure it's cleared.
- if (node->is_dead()) {
- FreeRegisters(node);
+ FreeRegisters(node);
+}
- // If the stack slot is a local slot, free it so it can be reused.
- if (node->is_spilled()) {
- compiler::AllocatedOperand slot = node->spill_slot();
- if (slot.index() > 0) free_slots_.push_back(slot.index());
- }
- return;
- }
+void StraightForwardRegisterAllocator::UpdateUse(
+ const EagerDeoptInfo& deopt_info) {
+ const CompactInterpreterFrameState* checkpoint_state =
+ deopt_info.state.register_frame;
+ int index = 0;
+ checkpoint_state->ForEachValue(
+ *compilation_unit_, [&](ValueNode* node, interpreter::Register reg) {
+ InputLocation* input = &deopt_info.input_locations[index++];
+ input->InjectAllocated(node->allocation());
+ UpdateUse(node, input);
+ });
+}
+
+void StraightForwardRegisterAllocator::UpdateUse(
+ const LazyDeoptInfo& deopt_info) {
+ const CompactInterpreterFrameState* checkpoint_state =
+ deopt_info.state.register_frame;
+ int index = 0;
+ checkpoint_state->ForEachValue(
+ *compilation_unit_, [&](ValueNode* node, interpreter::Register reg) {
+ // Skip over the result location.
+ if (reg == deopt_info.result_location) return;
+ InputLocation* input = &deopt_info.input_locations[index++];
+ input->InjectAllocated(node->allocation());
+ UpdateUse(node, input);
+ });
}
void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
for (Input& input : *node) AssignInput(input);
AssignTemporaries(node);
- for (Input& input : *node) UpdateInputUse(node->id(), input);
+ if (node->properties().can_eager_deopt()) {
+ UpdateUse(*node->eager_deopt_info());
+ }
+ for (Input& input : *node) UpdateUse(&input);
if (node->properties().is_call()) SpillAndClearRegisters();
- // TODO(verwaest): This isn't a good idea :)
- if (node->properties().can_deopt()) SpillRegisters();
// Allocate node output.
if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
+ // Lazy deopts are semantically after the node, so update them last.
+ if (node->properties().can_lazy_deopt()) {
+ UpdateUse(*node->lazy_deopt_info());
+ }
+
if (FLAG_trace_maglev_regalloc) {
- printing_visitor_->Process(
- node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
- nullptr));
+ printing_visitor_->Process(node,
+ ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
printing_visitor_->os() << "\n";
@@ -477,7 +500,10 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
BasicBlock* block) {
for (Input& input : *node) AssignInput(input);
AssignTemporaries(node);
- for (Input& input : *node) UpdateInputUse(node->id(), input);
+ if (node->properties().can_eager_deopt()) {
+ UpdateUse(*node->eager_deopt_info());
+ }
+ for (Input& input : *node) UpdateUse(&input);
if (node->properties().is_call()) SpillAndClearRegisters();
@@ -490,14 +516,12 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
Input& input = phi->input(block->predecessor_id());
input.InjectAllocated(input.node()->allocation());
}
- for (Phi* phi : *phis) {
- UpdateInputUse(phi->id(), phi->input(block->predecessor_id()));
- }
+ for (Phi* phi : *phis) UpdateUse(&phi->input(block->predecessor_id()));
}
}
// TODO(verwaest): This isn't a good idea :)
- if (node->properties().can_deopt()) SpillRegisters();
+ if (node->properties().can_eager_deopt()) SpillRegisters();
// Merge register values. Values only flowing into phis and not being
// independently live will be killed as part of the merge.
@@ -513,9 +537,8 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
}
if (FLAG_trace_maglev_regalloc) {
- printing_visitor_->Process(
- node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
- nullptr));
+ printing_visitor_->Process(node,
+ ProcessingState(compilation_unit_, block_it_));
}
}
@@ -528,8 +551,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
phi->result().SetAllocated(ForceAllocate(reg, phi));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
- phi, ProcessingState(compilation_unit_, block_it_, nullptr,
- nullptr, nullptr));
+ phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (reuse) " << input.operand() << std::endl;
}
@@ -629,13 +651,8 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
DCHECK(!node->is_spilled());
- uint32_t free_slot;
- if (free_slots_.empty()) {
- free_slot = top_of_stack_++;
- } else {
- free_slot = free_slots_.back();
- free_slots_.pop_back();
- }
+ uint32_t free_slot = top_of_stack_++;
+ compilation_unit_->push_stack_value_repr(node->value_representation());
node->Spill(compiler::AllocatedOperand(compiler::AllocatedOperand::STACK_SLOT,
MachineRepresentation::kTagged,
free_slot));
@@ -659,6 +676,7 @@ void StraightForwardRegisterAllocator::FreeSomeRegister() {
}
}
DCHECK(best.is_valid());
+ DropRegisterValue(best);
FreeRegister(best);
}
diff --git a/deps/v8/src/maglev/maglev-regalloc.h b/deps/v8/src/maglev/maglev-regalloc.h
index c198d2f8fc..5bc435f24e 100644
--- a/deps/v8/src/maglev/maglev-regalloc.h
+++ b/deps/v8/src/maglev/maglev-regalloc.h
@@ -33,7 +33,6 @@ class StraightForwardRegisterAllocator {
int top_of_stack_ = 0;
RegList free_registers_ = kAllocatableGeneralRegisters;
- std::vector<uint32_t> free_slots_;
RegList used_registers() const {
// Only allocatable registers should be free.
@@ -46,7 +45,10 @@ class StraightForwardRegisterAllocator {
void PrintLiveRegs() const;
- void UpdateInputUse(uint32_t use, const Input& input);
+ void UpdateUse(Input* input) { return UpdateUse(input->node(), input); }
+ void UpdateUse(ValueNode* node, InputLocation* input_location);
+ void UpdateUse(const EagerDeoptInfo& deopt_info);
+ void UpdateUse(const LazyDeoptInfo& deopt_info);
void AllocateControlNode(ControlNode* node, BasicBlock* block);
void AllocateNode(Node* node);
diff --git a/deps/v8/src/maglev/maglev-vreg-allocator.h b/deps/v8/src/maglev/maglev-vreg-allocator.h
index 19d5517f70..269f897a11 100644
--- a/deps/v8/src/maglev/maglev-vreg-allocator.h
+++ b/deps/v8/src/maglev/maglev-vreg-allocator.h
@@ -26,8 +26,6 @@ class MaglevVregAllocationState {
class MaglevVregAllocator {
public:
- static constexpr bool kNeedsCheckpointStates = true;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
for (BasicBlock* block : *graph) {
diff --git a/deps/v8/src/maglev/maglev.cc b/deps/v8/src/maglev/maglev.cc
index 6397d02e60..0240fb3261 100644
--- a/deps/v8/src/maglev/maglev.cc
+++ b/deps/v8/src/maglev/maglev.cc
@@ -16,7 +16,7 @@ MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
DCHECK(FLAG_maglev);
auto info = maglev::MaglevCompilationInfo::New(isolate, function);
maglev::MaglevCompilationUnit* const unit = info->toplevel_compilation_unit();
- maglev::MaglevCompiler::Compile(unit);
+ maglev::MaglevCompiler::Compile(isolate->main_thread_local_isolate(), unit);
return maglev::MaglevCompiler::GenerateCode(unit);
}
diff --git a/deps/v8/src/objects/call-site-info.cc b/deps/v8/src/objects/call-site-info.cc
index decd46dda1..e03ee1f8c6 100644
--- a/deps/v8/src/objects/call-site-info.cc
+++ b/deps/v8/src/objects/call-site-info.cc
@@ -22,6 +22,12 @@ bool CallSiteInfo::IsPromiseAll() const {
return fun == fun.native_context().promise_all();
}
+bool CallSiteInfo::IsPromiseAllSettled() const {
+ if (!IsAsync()) return false;
+ JSFunction fun = JSFunction::cast(function());
+ return fun == fun.native_context().promise_all_settled();
+}
+
bool CallSiteInfo::IsPromiseAny() const {
if (!IsAsync()) return false;
JSFunction fun = JSFunction::cast(function());
@@ -507,6 +513,7 @@ int CallSiteInfo::GetSourcePosition(Handle<CallSiteInfo> info) {
return info->code_offset_or_source_position();
}
DCHECK(!info->IsPromiseAll());
+ DCHECK(!info->IsPromiseAllSettled());
DCHECK(!info->IsPromiseAny());
int source_position =
ComputeSourcePosition(info, info->code_offset_or_source_position());
@@ -666,6 +673,14 @@ void AppendMethodCall(Isolate* isolate, Handle<CallSiteInfo> frame,
Handle<Object> method_name = CallSiteInfo::GetMethodName(frame);
Handle<Object> function_name = CallSiteInfo::GetFunctionName(frame);
+ Handle<Object> receiver(frame->receiver_or_instance(), isolate);
+ if (receiver->IsJSClassConstructor()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(receiver);
+ Handle<String> class_name = JSFunction::GetDebugName(function);
+ if (class_name->length() != 0) {
+ type_name = class_name;
+ }
+ }
if (IsNonEmptyString(function_name)) {
Handle<String> function_string = Handle<String>::cast(function_name);
if (IsNonEmptyString(type_name)) {
@@ -704,7 +719,8 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<CallSiteInfo> frame,
Handle<Object> function_name = CallSiteInfo::GetFunctionName(frame);
if (frame->IsAsync()) {
builder->AppendCStringLiteral("async ");
- if (frame->IsPromiseAll() || frame->IsPromiseAny()) {
+ if (frame->IsPromiseAll() || frame->IsPromiseAny() ||
+ frame->IsPromiseAllSettled()) {
builder->AppendCStringLiteral("Promise.");
builder->AppendString(Handle<String>::cast(function_name));
builder->AppendCStringLiteral(" (index ");
diff --git a/deps/v8/src/objects/call-site-info.h b/deps/v8/src/objects/call-site-info.h
index 42d8788351..7e143626c8 100644
--- a/deps/v8/src/objects/call-site-info.h
+++ b/deps/v8/src/objects/call-site-info.h
@@ -40,6 +40,7 @@ class CallSiteInfo : public TorqueGeneratedCallSiteInfo<CallSiteInfo, Struct> {
bool IsMethodCall() const;
bool IsToplevel() const;
bool IsPromiseAll() const;
+ bool IsPromiseAllSettled() const;
bool IsPromiseAny() const;
bool IsNative() const;
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 0286bde239..78b98e653d 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -383,8 +383,9 @@ int Code::raw_body_size() const {
}
int Code::InstructionSize() const {
- return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionSize()
- : raw_instruction_size();
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapInstructionSize(*this, builtin_id())
+ : raw_instruction_size();
}
Address Code::raw_instruction_start() const {
@@ -392,8 +393,9 @@ Address Code::raw_instruction_start() const {
}
Address Code::InstructionStart() const {
- return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionStart()
- : raw_instruction_start();
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? i::OffHeapInstructionStart(*this, builtin_id())
+ : raw_instruction_start();
}
Address Code::raw_instruction_end() const {
@@ -401,8 +403,9 @@ Address Code::raw_instruction_end() const {
}
Address Code::InstructionEnd() const {
- return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionEnd()
- : raw_instruction_end();
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? i::OffHeapInstructionEnd(*this, builtin_id())
+ : raw_instruction_end();
}
Address Code::raw_metadata_start() const {
@@ -428,24 +431,14 @@ int Code::GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const {
return static_cast<int>(offset);
}
-Address Code::MetadataStart() const {
- STATIC_ASSERT(kOnHeapBodyIsContiguous);
- return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataStart()
- : raw_metadata_start();
-}
-
Address Code::raw_metadata_end() const {
return raw_metadata_start() + raw_metadata_size();
}
-Address Code::MetadataEnd() const {
- return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataEnd()
- : raw_metadata_end();
-}
-
int Code::MetadataSize() const {
- return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataSize()
- : raw_metadata_size();
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapMetadataSize(*this, builtin_id())
+ : raw_metadata_size();
}
int Code::SizeIncludingMetadata() const {
@@ -457,6 +450,48 @@ int Code::SizeIncludingMetadata() const {
return size;
}
+Address Code::SafepointTableAddress() const {
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapSafepointTableAddress(*this, builtin_id())
+ : raw_metadata_start() + safepoint_table_offset();
+}
+
+int Code::safepoint_table_size() const {
+ DCHECK_GE(handler_table_offset() - safepoint_table_offset(), 0);
+ return handler_table_offset() - safepoint_table_offset();
+}
+
+bool Code::has_safepoint_table() const { return safepoint_table_size() > 0; }
+
+Address Code::HandlerTableAddress() const {
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapHandlerTableAddress(*this, builtin_id())
+ : raw_metadata_start() + handler_table_offset();
+}
+
+int Code::handler_table_size() const {
+ DCHECK_GE(constant_pool_offset() - handler_table_offset(), 0);
+ return constant_pool_offset() - handler_table_offset();
+}
+
+bool Code::has_handler_table() const { return handler_table_size() > 0; }
+
+int Code::constant_pool_size() const {
+ const int size = code_comments_offset() - constant_pool_offset();
+ DCHECK_IMPLIES(!FLAG_enable_embedded_constant_pool, size == 0);
+ DCHECK_GE(size, 0);
+ return size;
+}
+
+bool Code::has_constant_pool() const { return constant_pool_size() > 0; }
+
+int Code::code_comments_size() const {
+ DCHECK_GE(unwinding_info_offset() - code_comments_offset(), 0);
+ return unwinding_info_offset() - code_comments_offset();
+}
+
+bool Code::has_code_comments() const { return code_comments_size() > 0; }
+
ByteArray Code::unchecked_relocation_info() const {
PtrComprCageBase cage_base = main_cage_base();
return ByteArray::unchecked_cast(
@@ -596,23 +631,23 @@ inline bool Code::is_baseline_leave_frame_builtin() const {
}
#ifdef V8_EXTERNAL_CODE_SPACE
-// Note, must be in sync with Code::checks_optimization_marker().
-inline bool CodeDataContainer::checks_optimization_marker() const {
+// Note, must be in sync with Code::checks_tiering_state().
+inline bool CodeDataContainer::checks_tiering_state() const {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- bool checks_marker = (builtin_id() == Builtin::kCompileLazy ||
- builtin_id() == Builtin::kInterpreterEntryTrampoline ||
- CodeKindCanTierUp(kind()));
- return checks_marker ||
+ bool checks_state = (builtin_id() == Builtin::kCompileLazy ||
+ builtin_id() == Builtin::kInterpreterEntryTrampoline ||
+ CodeKindCanTierUp(kind()));
+ return checks_state ||
(CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
}
#endif // V8_EXTERNAL_CODE_SPACE
-// Note, must be in sync with CodeDataContainer::checks_optimization_marker().
-inline bool Code::checks_optimization_marker() const {
- bool checks_marker = (builtin_id() == Builtin::kCompileLazy ||
- builtin_id() == Builtin::kInterpreterEntryTrampoline ||
- CodeKindCanTierUp(kind()));
- return checks_marker ||
+// Note, must be in sync with CodeDataContainer::checks_tiering_state().
+inline bool Code::checks_tiering_state() const {
+ bool checks_state = (builtin_id() == Builtin::kCompileLazy ||
+ builtin_id() == Builtin::kInterpreterEntryTrampoline ||
+ CodeKindCanTierUp(kind()));
+ return checks_state ||
(CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
}
@@ -752,25 +787,10 @@ void Code::set_embedded_objects_cleared(bool flag) {
container.set_kind_specific_flags(updated, kRelaxedStore);
}
-bool Code::deopt_already_counted() const {
- DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags =
- code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
- return DeoptAlreadyCountedField::decode(flags);
-}
-
-void Code::set_deopt_already_counted(bool flag) {
- DCHECK(CodeKindCanDeoptimize(kind()));
- DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags(kRelaxedLoad);
- int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- container.set_kind_specific_flags(updated, kRelaxedStore);
-}
-
bool Code::is_optimized_code() const {
return CodeKindIsOptimizedJSFunction(kind());
}
+
bool Code::is_wasm_code() const { return kind() == CodeKind::WASM_FUNCTION; }
int Code::constant_pool_offset() const {
@@ -792,18 +812,28 @@ void Code::set_constant_pool_offset(int value) {
Address Code::constant_pool() const {
if (!has_constant_pool()) return kNullAddress;
- return MetadataStart() + constant_pool_offset();
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapConstantPoolAddress(*this, builtin_id())
+ : raw_metadata_start() + constant_pool_offset();
}
Address Code::code_comments() const {
- return MetadataStart() + code_comments_offset();
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapCodeCommentsAddress(*this, builtin_id())
+ : raw_metadata_start() + code_comments_offset();
}
Address Code::unwinding_info_start() const {
- return MetadataStart() + unwinding_info_offset();
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapUnwindingInfoAddress(*this, builtin_id())
+ : raw_metadata_start() + unwinding_info_offset();
}
-Address Code::unwinding_info_end() const { return MetadataEnd(); }
+Address Code::unwinding_info_end() const {
+ return V8_UNLIKELY(is_off_heap_trampoline())
+ ? OffHeapMetadataEnd(*this, builtin_id())
+ : raw_metadata_end();
+}
int Code::unwinding_info_size() const {
DCHECK_GE(unwinding_info_end(), unwinding_info_start());
@@ -1087,6 +1117,7 @@ inline bool CodeDataContainer::is_interpreter_trampoline_builtin() const {
return FromCodeT(*this).name(cage_base); \
}
+DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_maglevved, bool)
DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_turbofanned, bool)
DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_off_heap_trampoline, bool)
@@ -1156,27 +1187,58 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
}
}
-int BytecodeArray::osr_loop_nesting_level() const {
- return ACQUIRE_READ_INT8_FIELD(*this, kOsrLoopNestingLevelOffset);
+int BytecodeArray::osr_urgency() const {
+ return OsrUrgencyBits::decode(osr_urgency_and_install_target());
}
-void BytecodeArray::set_osr_loop_nesting_level(int depth) {
- DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
- STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- RELEASE_WRITE_INT8_FIELD(*this, kOsrLoopNestingLevelOffset, depth);
+void BytecodeArray::set_osr_urgency(int urgency) {
+ DCHECK(0 <= urgency && urgency <= BytecodeArray::kMaxOsrUrgency);
+ STATIC_ASSERT(BytecodeArray::kMaxOsrUrgency <= OsrUrgencyBits::kMax);
+ uint32_t value = osr_urgency_and_install_target();
+ set_osr_urgency_and_install_target(OsrUrgencyBits::update(value, urgency));
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
// Bytecode is aged by the concurrent marker.
- return static_cast<Age>(RELAXED_READ_INT8_FIELD(*this, kBytecodeAgeOffset));
+ static_assert(kBytecodeAgeSize == kUInt16Size);
+ return static_cast<Age>(RELAXED_READ_INT16_FIELD(*this, kBytecodeAgeOffset));
+}
+
+void BytecodeArray::reset_osr_urgency() { set_osr_urgency(0); }
+
+void BytecodeArray::RequestOsrAtNextOpportunity() {
+ set_osr_urgency(kMaxOsrUrgency);
+}
+
+int BytecodeArray::osr_install_target() {
+ return OsrInstallTargetBits::decode(osr_urgency_and_install_target());
+}
+
+void BytecodeArray::set_osr_install_target(BytecodeOffset jump_loop_offset) {
+ DCHECK_LE(jump_loop_offset.ToInt(), length());
+ set_osr_urgency_and_install_target(OsrInstallTargetBits::update(
+ osr_urgency_and_install_target(), OsrInstallTargetFor(jump_loop_offset)));
+}
+
+void BytecodeArray::reset_osr_install_target() {
+ uint32_t value = osr_urgency_and_install_target();
+ set_osr_urgency_and_install_target(
+ OsrInstallTargetBits::update(value, kNoOsrInstallTarget));
+}
+
+void BytecodeArray::reset_osr_urgency_and_install_target() {
+ set_osr_urgency_and_install_target(OsrUrgencyBits::encode(0) |
+ OsrInstallTargetBits::encode(0));
}
void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
DCHECK_GE(age, kFirstBytecodeAge);
DCHECK_LE(age, kLastBytecodeAge);
- STATIC_ASSERT(kLastBytecodeAge <= kMaxInt8);
+ static_assert(kLastBytecodeAge <= kMaxInt16);
+ static_assert(kBytecodeAgeSize == kUInt16Size);
// Bytecode is aged by the concurrent marker.
- RELAXED_WRITE_INT8_FIELD(*this, kBytecodeAgeOffset, static_cast<int8_t>(age));
+ RELAXED_WRITE_INT16_FIELD(*this, kBytecodeAgeOffset,
+ static_cast<int16_t>(age));
}
int32_t BytecodeArray::parameter_count() const {
@@ -1239,7 +1301,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DEFINE_DEOPT_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(EagerDeoptCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LazyDeoptCount, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
diff --git a/deps/v8/src/objects/code-kind.cc b/deps/v8/src/objects/code-kind.cc
index 84b7436ef1..f5808108ad 100644
--- a/deps/v8/src/objects/code-kind.cc
+++ b/deps/v8/src/objects/code-kind.cc
@@ -24,6 +24,8 @@ const char* CodeKindToMarker(CodeKind kind) {
return "~";
case CodeKind::BASELINE:
return "^";
+ case CodeKind::MAGLEV:
+ return "+";
case CodeKind::TURBOFAN:
return "*";
default:
diff --git a/deps/v8/src/objects/code-kind.h b/deps/v8/src/objects/code-kind.h
index dbb007df31..b43affdc2d 100644
--- a/deps/v8/src/objects/code-kind.h
+++ b/deps/v8/src/objects/code-kind.h
@@ -97,15 +97,13 @@ inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
return CodeKindIsUnoptimizedJSFunction(kind);
}
-// The optimization marker field on the feedback vector has a dual purpose of
-// controlling the tier-up workflow, and caching the produced code object for
-// access from multiple closures.
+// TODO(jgruber): Rename or remove this predicate. Currently it means 'is this
+// kind stored either in the FeedbackVector cache, or in the OSR cache?'.
inline constexpr bool CodeKindIsStoredInOptimizedCodeCache(CodeKind kind) {
return kind == CodeKind::TURBOFAN;
}
inline CodeKind CodeKindForTopTier() { return CodeKind::TURBOFAN; }
-inline CodeKind CodeKindForOSR() { return CodeKind::TURBOFAN; }
// The dedicated CodeKindFlag enum represents all code kinds in a format
// suitable for bit sets.
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 96bda038db..95e312fe6d 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -20,7 +20,7 @@
#include "src/objects/code-kind.h"
#include "src/objects/fixed-array.h"
#include "src/roots/roots-inl.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/utils/ostreams.h"
#ifdef ENABLE_DISASSEMBLER
@@ -33,43 +33,117 @@
namespace v8 {
namespace internal {
-Address Code::SafepointTableAddress() const {
- return MetadataStart() + safepoint_table_offset();
+namespace {
+
+// Helper function for getting an EmbeddedData that can handle un-embedded
+// builtins when short builtin calls are enabled.
+inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(
+ HeapObject code) {
+#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
+ // GetIsolateFromWritableObject(*this) works for both read-only and writable
+ // objects when pointer compression is enabled with a per-Isolate cage.
+ return EmbeddedData::FromBlob(GetIsolateFromWritableObject(code));
+#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+ // When pointer compression is enabled with a shared cage, there is also a
+ // shared CodeRange. When short builtin calls are enabled, there is a single
+ // copy of the re-embedded builtins in the shared CodeRange, so use that if
+ // it's present.
+ if (FLAG_jitless) return EmbeddedData::FromBlob();
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ return (code_range && code_range->embedded_blob_code_copy() != nullptr)
+ ? EmbeddedData::FromBlob(code_range)
+ : EmbeddedData::FromBlob();
+#else
+ // Otherwise there is a single copy of the blob across all Isolates, use the
+ // global atomic variables.
+ return EmbeddedData::FromBlob();
+#endif
+}
+
+} // namespace
+
+Address OffHeapInstructionStart(HeapObject code, Builtin builtin) {
+ // TODO(11527): Here and below: pass Isolate as an argument for getting
+ // the EmbeddedData.
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.InstructionStartOfBuiltin(builtin);
}
-int Code::safepoint_table_size() const {
- DCHECK_GE(handler_table_offset() - safepoint_table_offset(), 0);
- return handler_table_offset() - safepoint_table_offset();
+Address OffHeapInstructionEnd(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.InstructionStartOfBuiltin(builtin) +
+ d.InstructionSizeOfBuiltin(builtin);
}
-bool Code::has_safepoint_table() const { return safepoint_table_size() > 0; }
+int OffHeapInstructionSize(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.InstructionSizeOfBuiltin(builtin);
+}
-Address Code::HandlerTableAddress() const {
- return MetadataStart() + handler_table_offset();
+Address OffHeapMetadataStart(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.MetadataStartOfBuiltin(builtin);
}
-int Code::handler_table_size() const {
- DCHECK_GE(constant_pool_offset() - handler_table_offset(), 0);
- return constant_pool_offset() - handler_table_offset();
+Address OffHeapMetadataEnd(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.MetadataStartOfBuiltin(builtin) + d.MetadataSizeOfBuiltin(builtin);
}
-bool Code::has_handler_table() const { return handler_table_size() > 0; }
+int OffHeapMetadataSize(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.MetadataSizeOfBuiltin(builtin);
+}
-int Code::constant_pool_size() const {
- const int size = code_comments_offset() - constant_pool_offset();
- DCHECK_IMPLIES(!FLAG_enable_embedded_constant_pool, size == 0);
- DCHECK_GE(size, 0);
- return size;
+Address OffHeapSafepointTableAddress(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.SafepointTableStartOf(builtin);
}
-bool Code::has_constant_pool() const { return constant_pool_size() > 0; }
+int OffHeapSafepointTableSize(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.SafepointTableSizeOf(builtin);
+}
-int Code::code_comments_size() const {
- DCHECK_GE(unwinding_info_offset() - code_comments_offset(), 0);
- return unwinding_info_offset() - code_comments_offset();
+Address OffHeapHandlerTableAddress(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.HandlerTableStartOf(builtin);
}
-bool Code::has_code_comments() const { return code_comments_size() > 0; }
+int OffHeapHandlerTableSize(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.HandlerTableSizeOf(builtin);
+}
+
+Address OffHeapConstantPoolAddress(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.ConstantPoolStartOf(builtin);
+}
+
+int OffHeapConstantPoolSize(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.ConstantPoolSizeOf(builtin);
+}
+
+Address OffHeapCodeCommentsAddress(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.CodeCommentsStartOf(builtin);
+}
+
+int OffHeapCodeCommentsSize(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.CodeCommentsSizeOf(builtin);
+}
+
+Address OffHeapUnwindingInfoAddress(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.UnwindingInfoStartOf(builtin);
+}
+
+int OffHeapUnwindingInfoSize(HeapObject code, Builtin builtin) {
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ return d.UnwindingInfoSizeOf(builtin);
+}
void Code::ClearEmbeddedObjects(Heap* heap) {
HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
@@ -145,66 +219,6 @@ SafepointEntry Code::GetSafepointEntry(Isolate* isolate, Address pc) {
return table.FindEntry(pc);
}
-int Code::OffHeapInstructionSize() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
- return raw_instruction_size();
- }
- EmbeddedData d = EmbeddedData::FromBlob();
- return d.InstructionSizeOfBuiltin(builtin_id());
-}
-
-namespace {
-
-// Helper function for getting an EmbeddedData that can handle un-embedded
-// builtins when short builtin calls are enabled.
-inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(Code code) {
-#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
- // GetIsolateFromWritableObject(*this) works for both read-only and writable
- // objects when pointer compression is enabled with a per-Isolate cage.
- return EmbeddedData::FromBlob(GetIsolateFromWritableObject(code));
-#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
- // When pointer compression is enabled with a shared cage, there is also a
- // shared CodeRange. When short builtin calls are enabled, there is a single
- // copy of the re-embedded builtins in the shared CodeRange, so use that if
- // it's present.
- if (FLAG_jitless) return EmbeddedData::FromBlob();
- CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
- return (code_range && code_range->embedded_blob_code_copy() != nullptr)
- ? EmbeddedData::FromBlob(code_range)
- : EmbeddedData::FromBlob();
-#else
- // Otherwise there is a single copy of the blob across all Isolates, use the
- // global atomic variables.
- return EmbeddedData::FromBlob();
-#endif
-}
-
-} // namespace
-
-Address Code::OffHeapInstructionStart() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
- return raw_instruction_size();
- }
-
- // TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
- return d.InstructionStartOfBuiltin(builtin_id());
-}
-
-Address Code::OffHeapInstructionEnd() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
- return raw_instruction_size();
- }
-
- // TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
- return d.InstructionStartOfBuiltin(builtin_id()) +
- d.InstructionSizeOfBuiltin(builtin_id());
-}
-
Address Code::OffHeapInstructionStart(Isolate* isolate, Address pc) const {
DCHECK(is_off_heap_trampoline());
EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
@@ -218,34 +232,6 @@ Address Code::OffHeapInstructionEnd(Isolate* isolate, Address pc) const {
d.InstructionSizeOfBuiltin(builtin_id());
}
-int Code::OffHeapMetadataSize() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
- return raw_instruction_size();
- }
- EmbeddedData d = EmbeddedData::FromBlob();
- return d.MetadataSizeOfBuiltin(builtin_id());
-}
-
-Address Code::OffHeapMetadataStart() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
- return raw_instruction_size();
- }
- EmbeddedData d = EmbeddedData::FromBlob();
- return d.MetadataStartOfBuiltin(builtin_id());
-}
-
-Address Code::OffHeapMetadataEnd() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
- return raw_instruction_size();
- }
- EmbeddedData d = EmbeddedData::FromBlob();
- return d.MetadataStartOfBuiltin(builtin_id()) +
- d.MetadataSizeOfBuiltin(builtin_id());
-}
-
// TODO(cbruni): Move to BytecodeArray
int AbstractCode::SourcePosition(int offset) {
CHECK_NE(kind(), CodeKind::BASELINE);
@@ -513,7 +499,9 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
os << "compiler = "
<< (is_turbofanned()
? "turbofan"
- : kind() == CodeKind::BASELINE ? "baseline" : "unknown")
+ : is_maglevved()
+ ? "turbofan"
+ : kind() == CodeKind::BASELINE ? "baseline" : "unknown")
<< "\n";
os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n";
@@ -535,8 +523,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
os << "\nConstant Pool (size = " << pool_size << ")\n";
base::Vector<char> buf = base::Vector<char>::New(50);
- intptr_t* ptr =
- reinterpret_cast<intptr_t*>(MetadataStart() + constant_pool_offset());
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(constant_pool());
for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
os << static_cast<const void*>(ptr) << " " << buf.begin() << "\n";
@@ -623,8 +610,8 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << "Parameter count " << parameter_count() << "\n";
os << "Register count " << register_count() << "\n";
os << "Frame size " << frame_size() << "\n";
- os << "OSR nesting level: " << osr_loop_nesting_level() << "\n";
- os << "Bytecode Age: " << bytecode_age() << "\n";
+ os << "OSR urgency: " << osr_urgency() << "\n";
+ os << "Bytecode age: " << bytecode_age() << "\n";
Address base_address = GetFirstBytecodeAddress();
SourcePositionTableIterator source_positions(SourcePositionTable());
@@ -711,8 +698,9 @@ void BytecodeArray::MakeOlder() {
DCHECK_LE(RoundDown(age_addr, kTaggedSize) + kTaggedSize, address() + Size());
Age age = bytecode_age();
if (age < kLastBytecodeAge) {
- base::AsAtomic8::Relaxed_CompareAndSwap(
- reinterpret_cast<base::Atomic8*>(age_addr), age, age + 1);
+ static_assert(kBytecodeAgeSize == kUInt16Size);
+ base::AsAtomic16::Relaxed_CompareAndSwap(
+ reinterpret_cast<base::Atomic16*>(age_addr), age, age + 1);
}
DCHECK_GE(bytecode_age(), kFirstBytecodeAge);
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 690c68de3f..4c2679e3f9 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -130,13 +130,17 @@ class CodeDataContainer : public HeapObject {
inline bool is_baseline_trampoline_builtin() const;
inline bool is_baseline_leave_frame_builtin() const;
- // Tells whether the code checks the optimization marker in the function's
+ // Tells whether the code checks the tiering state in the function's
// feedback vector.
- inline bool checks_optimization_marker() const;
+ inline bool checks_tiering_state() const;
// Tells whether the outgoing parameters of this code are tagged pointers.
inline bool has_tagged_outgoing_params() const;
+ // [is_maglevved]: Tells whether the code object was generated by the
+ // Maglev optimizing compiler.
+ inline bool is_maglevved() const;
+
// [is_turbofanned]: Tells whether the code object was generated by the
// TurboFan optimizing compiler.
inline bool is_turbofanned() const;
@@ -285,11 +289,9 @@ class Code : public HeapObject {
inline Address raw_instruction_start() const;
inline Address InstructionStart() const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionStart() const;
inline Address raw_instruction_end() const;
inline Address InstructionEnd() const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionEnd() const;
// When builtins un-embedding is enabled for the Isolate
// (see Isolate::is_short_builtin_calls_enabled()) then both embedded and
@@ -316,49 +318,43 @@ class Code : public HeapObject {
inline int raw_instruction_size() const;
inline void set_raw_instruction_size(int value);
inline int InstructionSize() const;
- V8_EXPORT_PRIVATE int OffHeapInstructionSize() const;
inline Address raw_metadata_start() const;
- inline Address MetadataStart() const;
- V8_EXPORT_PRIVATE Address OffHeapMetadataStart() const;
inline Address raw_metadata_end() const;
- inline Address MetadataEnd() const;
- V8_EXPORT_PRIVATE Address OffHeapMetadataEnd() const;
inline int raw_metadata_size() const;
inline void set_raw_metadata_size(int value);
inline int MetadataSize() const;
- int OffHeapMetadataSize() const;
// The metadata section is aligned to this value.
static constexpr int kMetadataAlignment = kIntSize;
// [safepoint_table_offset]: The offset where the safepoint table starts.
inline int safepoint_table_offset() const { return 0; }
- Address SafepointTableAddress() const;
- int safepoint_table_size() const;
- bool has_safepoint_table() const;
+ inline Address SafepointTableAddress() const;
+ inline int safepoint_table_size() const;
+ inline bool has_safepoint_table() const;
// [handler_table_offset]: The offset where the exception handler table
// starts.
inline int handler_table_offset() const;
inline void set_handler_table_offset(int offset);
- Address HandlerTableAddress() const;
- int handler_table_size() const;
- bool has_handler_table() const;
+ inline Address HandlerTableAddress() const;
+ inline int handler_table_size() const;
+ inline bool has_handler_table() const;
// [constant_pool offset]: Offset of the constant pool.
inline int constant_pool_offset() const;
inline void set_constant_pool_offset(int offset);
inline Address constant_pool() const;
- int constant_pool_size() const;
- bool has_constant_pool() const;
+ inline int constant_pool_size() const;
+ inline bool has_constant_pool() const;
// [code_comments_offset]: Offset of the code comment section.
inline int code_comments_offset() const;
inline void set_code_comments_offset(int offset);
inline Address code_comments() const;
- V8_EXPORT_PRIVATE int code_comments_size() const;
- V8_EXPORT_PRIVATE bool has_code_comments() const;
+ inline int code_comments_size() const;
+ inline bool has_code_comments() const;
// [unwinding_info_offset]: Offset of the unwinding info section.
inline int32_t unwinding_info_offset() const;
@@ -425,9 +421,9 @@ class Code : public HeapObject {
inline bool is_baseline_trampoline_builtin() const;
inline bool is_baseline_leave_frame_builtin() const;
- // Tells whether the code checks the optimization marker in the function's
+ // Tells whether the code checks the tiering state in the function's
// feedback vector.
- inline bool checks_optimization_marker() const;
+ inline bool checks_tiering_state() const;
// Tells whether the outgoing parameters of this code are tagged pointers.
inline bool has_tagged_outgoing_params() const;
@@ -475,11 +471,6 @@ class Code : public HeapObject {
inline bool embedded_objects_cleared() const;
inline void set_embedded_objects_cleared(bool flag);
- // [deopt_already_counted]: If CodeKindCanDeoptimize(kind), tells whether
- // the code was already deoptimized.
- inline bool deopt_already_counted() const;
- inline void set_deopt_already_counted(bool flag);
-
// [is_promise_rejection]: For kind BUILTIN tells whether the
// exception thrown by the code will lead to promise rejection or
// uncaught if both this and is_exception_caught is set.
@@ -668,8 +659,8 @@ class Code : public HeapObject {
static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kHeaderPaddingSize =
- FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 8 : 20)
- : (COMPRESS_POINTERS_BOOL ? 12 : 24);
+ FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 8 : 52)
+ : (COMPRESS_POINTERS_BOOL ? 12 : 56);
#elif V8_TARGET_ARCH_S390X
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_RISCV64
@@ -698,12 +689,11 @@ class Code : public HeapObject {
#define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
V(MarkedForDeoptimizationField, bool, 1, _) \
V(EmbeddedObjectsClearedField, bool, 1, _) \
- V(DeoptAlreadyCountedField, bool, 1, _) \
V(CanHaveWeakObjectsField, bool, 1, _) \
V(IsPromiseRejectionField, bool, 1, _)
DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
#undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
- STATIC_ASSERT(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 5);
+ STATIC_ASSERT(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 4);
STATIC_ASSERT(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) *
kBitsPerByte);
@@ -738,6 +728,37 @@ class Code : public HeapObject {
OBJECT_CONSTRUCTORS(Code, HeapObject);
};
+// TODO(v8:11880): move these functions to CodeDataContainer once they are no
+// longer used from Code.
+V8_EXPORT_PRIVATE Address OffHeapInstructionStart(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE Address OffHeapInstructionEnd(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE int OffHeapInstructionSize(HeapObject code, Builtin builtin);
+
+V8_EXPORT_PRIVATE Address OffHeapMetadataStart(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE Address OffHeapMetadataEnd(HeapObject code, Builtin builtin);
+V8_EXPORT_PRIVATE int OffHeapMetadataSize(HeapObject code, Builtin builtin);
+
+V8_EXPORT_PRIVATE Address OffHeapSafepointTableAddress(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE int OffHeapSafepointTableSize(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE Address OffHeapHandlerTableAddress(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE int OffHeapHandlerTableSize(HeapObject code, Builtin builtin);
+V8_EXPORT_PRIVATE Address OffHeapConstantPoolAddress(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE int OffHeapConstantPoolSize(HeapObject code, Builtin builtin);
+V8_EXPORT_PRIVATE Address OffHeapCodeCommentsAddress(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE int OffHeapCodeCommentsSize(HeapObject code, Builtin builtin);
+V8_EXPORT_PRIVATE Address OffHeapUnwindingInfoAddress(HeapObject code,
+ Builtin builtin);
+V8_EXPORT_PRIVATE int OffHeapUnwindingInfoSize(HeapObject code,
+ Builtin builtin);
+
class Code::OptimizedCodeIterator {
public:
explicit OptimizedCodeIterator(Isolate* isolate);
@@ -817,10 +838,6 @@ class AbstractCode : public HeapObject {
inline Code GetCode();
inline BytecodeArray GetBytecodeArray();
- // Max loop nesting marker used to postpose OSR. We don't take loop
- // nesting that is deeper than 5 levels into account.
- static const int kMaxLoopNestingMarker = 6;
-
OBJECT_CONSTRUCTORS(AbstractCode, HeapObject);
private:
@@ -935,6 +952,8 @@ DEFINE_OPERATORS_FOR_FLAGS(DependentCode::DependencyGroups)
class BytecodeArray
: public TorqueGeneratedBytecodeArray<BytecodeArray, FixedArrayBase> {
public:
+ DEFINE_TORQUE_GENERATED_OSRURGENCY_AND_INSTALL_TARGET()
+
enum Age {
kNoAgeBytecodeAge = 0,
kQuadragenarianBytecodeAge,
@@ -953,36 +972,68 @@ class BytecodeArray
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
- // Setter and getter
inline byte get(int index) const;
inline void set(int index, byte value);
- // Returns data start address.
inline Address GetFirstBytecodeAddress();
- // Accessors for frame size.
inline int32_t frame_size() const;
inline void set_frame_size(int32_t frame_size);
- // Accessor for register count (derived from frame_size).
+ // Note: The register count is derived from frame_size.
inline int register_count() const;
- // Accessors for parameter count (including implicit 'this' receiver).
+ // Note: the parameter count includes the implicit 'this' receiver.
inline int32_t parameter_count() const;
inline void set_parameter_count(int32_t number_of_parameters);
- // Register used to pass the incoming new.target or generator object from the
- // fucntion call.
inline interpreter::Register incoming_new_target_or_generator_register()
const;
inline void set_incoming_new_target_or_generator_register(
interpreter::Register incoming_new_target_or_generator_register);
- // Accessors for OSR loop nesting level.
- inline int osr_loop_nesting_level() const;
- inline void set_osr_loop_nesting_level(int depth);
+ // The [osr_urgency] controls when OSR is attempted, and is incremented as
+ // the function becomes hotter. When the current loop depth is less than the
+ // osr_urgency, JumpLoop calls into runtime to attempt OSR optimization.
+ static constexpr int kMaxOsrUrgency = 6;
+ STATIC_ASSERT(kMaxOsrUrgency <= OsrUrgencyBits::kMax);
+ inline int osr_urgency() const;
+ inline void set_osr_urgency(int urgency);
+ inline void reset_osr_urgency();
+ inline void RequestOsrAtNextOpportunity();
+
+ // The [osr_install_target] is used upon finishing concurrent OSR
+ // compilation; instead of bumping the osr_urgency (which would target all
+ // JumpLoops of appropriate loop_depth), we target a specific JumpLoop at the
+ // given bytecode offset.
+ static constexpr int kNoOsrInstallTarget = 0;
+ static constexpr int OsrInstallTargetFor(BytecodeOffset offset) {
+ // Any set `osr_install_target` must be non-zero since zero is the 'unset'
+ // value and is ignored by generated code. For branchless code (both here
+ // and in generated code), we simply OR in a 1.
+ STATIC_ASSERT(kNoOsrInstallTarget == 0);
+ return (offset.ToInt() | 1) &
+ (OsrInstallTargetBits::kMask >> OsrInstallTargetBits::kShift);
+ }
+
+ inline int osr_install_target();
+ inline void set_osr_install_target(BytecodeOffset jump_loop_offset);
+ inline void reset_osr_install_target();
+
+ inline void reset_osr_urgency_and_install_target();
+
+ static constexpr int kBytecodeAgeSize = kUInt16Size;
+ static_assert(kBytecodeAgeOffset + kBytecodeAgeSize - 1 ==
+ kBytecodeAgeOffsetEnd);
+
+ // InterpreterEntryTrampoline and other builtins expect these fields to be
+ // next to each other and fill 32 bits in total, since they write a 32-bit
+ // value to reset them.
+ static constexpr bool kOsrStateAndBytecodeAgeAreContiguous32Bits =
+ kBytecodeAgeOffset == kOsrUrgencyAndInstallTargetOffset + kUInt16Size &&
+ kBytecodeAgeSize == kUInt16Size;
+ static_assert(kOsrStateAndBytecodeAgeAreContiguous32Bits);
- // Accessors for bytecode's code age.
inline Age bytecode_age() const;
inline void set_bytecode_age(Age age);
@@ -999,11 +1050,8 @@ class BytecodeArray
// as it would if no attempt was ever made to collect source positions.
inline void SetSourcePositionsFailedToCollect();
- // Dispatched behavior.
inline int BytecodeArraySize();
- inline int raw_instruction_size();
-
// Returns the size of bytecode and its metadata. This includes the size of
// bytecode, constant pool, source position table, and handler table.
inline int SizeIncludingMetadata();
@@ -1023,11 +1071,6 @@ class BytecodeArray
// is deterministic.
inline void clear_padding();
- // InterpreterEntryTrampoline expects these fields to be next to each other
- // and writes a 16-bit value to reset them.
- STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- kOsrLoopNestingLevelOffset + kCharSize);
-
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
// Maximal length of a single BytecodeArray.
@@ -1078,7 +1121,7 @@ class DeoptimizationData : public FixedArray {
static const int kSharedFunctionInfoIndex = 6;
static const int kInliningPositionsIndex = 7;
static const int kDeoptExitStartIndex = 8;
- static const int kNonLazyDeoptCountIndex = 9;
+ static const int kEagerDeoptCountIndex = 9;
static const int kLazyDeoptCountIndex = 10;
static const int kFirstDeoptEntryIndex = 11;
@@ -1107,7 +1150,7 @@ class DeoptimizationData : public FixedArray {
DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DECL_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
- DECL_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
+ DECL_ELEMENT_ACCESSORS(EagerDeoptCount, Smi)
DECL_ELEMENT_ACCESSORS(LazyDeoptCount, Smi)
#undef DECL_ELEMENT_ACCESSORS
diff --git a/deps/v8/src/objects/code.tq b/deps/v8/src/objects/code.tq
index c51b187107..7fe82f6b75 100644
--- a/deps/v8/src/objects/code.tq
+++ b/deps/v8/src/objects/code.tq
@@ -4,6 +4,14 @@
type DependentCode extends WeakFixedArray;
+bitfield struct OSRUrgencyAndInstallTarget extends uint16 {
+ // The layout is chosen s.t. urgency and the install target offset can be
+ // loaded with a single 16-bit load (i.e. no masking required).
+ osr_urgency: uint32: 3 bit;
+ // The 13 LSB of the install target bytecode offset.
+ osr_install_target: uint32: 13 bit;
+}
+
extern class BytecodeArray extends FixedArrayBase {
// TODO(v8:8983): bytecode array object sizes vary based on their contents.
constant_pool: FixedArray;
@@ -22,8 +30,8 @@ extern class BytecodeArray extends FixedArrayBase {
frame_size: int32;
parameter_size: int32;
incoming_new_target_or_generator_register: int32;
- osr_loop_nesting_level: int8;
- bytecode_age: int8;
+ osr_urgency_and_install_target: OSRUrgencyAndInstallTarget;
+ bytecode_age: uint16; // Only 3 bits used.
}
extern class CodeDataContainer extends HeapObject;
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 7661ccafb3..46ba4c50f2 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -296,10 +296,6 @@ ScriptContextTable NativeContext::synchronized_script_context_table() const {
get(SCRIPT_CONTEXT_TABLE_INDEX, kAcquireLoad));
}
-OSROptimizedCodeCache NativeContext::GetOSROptimizedCodeCache() {
- return OSROptimizedCodeCache::cast(osr_code_cache());
-}
-
void NativeContext::SetOptimizedCodeListHead(Object head) {
set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore);
}
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index c1a14a4501..efdf3c383f 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -345,6 +345,7 @@ enum ContextLookupFlags {
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
V(OBJECT_VALUE_OF_FUNCTION_INDEX, JSFunction, object_value_of_function) \
V(PROMISE_ALL_INDEX, JSFunction, promise_all) \
+ V(PROMISE_ALL_SETTLED_INDEX, JSFunction, promise_all_settled) \
V(PROMISE_ANY_INDEX, JSFunction, promise_any) \
V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
@@ -368,7 +369,7 @@ enum ContextLookupFlags {
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
V(WRAPPED_FUNCTION_MAP_INDEX, Map, wrapped_function_map) \
V(RETAINED_MAPS, Object, retained_maps) \
- V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache)
+ V(OSR_CODE_CACHE_INDEX, OSROptimizedCodeCache, osr_code_cache)
#include "torque-generated/src/objects/contexts-tq.inc"
@@ -776,8 +777,6 @@ class NativeContext : public Context {
inline void SetDeoptimizedCodeListHead(Object head);
inline Object DeoptimizedCodeListHead();
- inline OSROptimizedCodeCache GetOSROptimizedCodeCache();
-
void ResetErrorsThrown();
void IncrementErrorsThrown();
int GetErrorsThrown();
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index 8ace1c204b..d36c4c2c68 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -158,6 +158,8 @@ extern enum ContextSlot extends intptr constexpr 'Context::Field' {
BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX: Slot<NativeContext, Map>,
BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX: Slot<NativeContext, Map>,
+ WRAPPED_FUNCTION_MAP_INDEX: Slot<NativeContext, Map>,
+
MIN_CONTEXT_SLOTS,
...
}
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index f4013fcc12..6ea12ace1b 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -85,6 +85,9 @@ ACCESSORS_RELAXED_CHECKED(ErrorStackData, call_site_infos, FixedArray,
kCallSiteInfosOrFormattedStackOffset,
!HasFormattedStack())
+NEVER_READ_ONLY_SPACE_IMPL(PromiseOnStack)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseOnStack)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index efe5d68543..b80aa58437 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -457,5 +457,16 @@ void ErrorStackData::EnsureStackFrameInfos(Isolate* isolate,
error_stack->set_limit_or_stack_frame_infos(*stack_frame_infos);
}
+// static
+MaybeHandle<JSObject> PromiseOnStack::GetPromise(
+ Handle<PromiseOnStack> promise_on_stack) {
+ HeapObject promise;
+ Isolate* isolate = promise_on_stack->GetIsolate();
+ if (promise_on_stack->promise()->GetHeapObjectIfWeak(isolate, &promise)) {
+ return handle(JSObject::cast(promise), isolate);
+ }
+ return {};
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index efe056ac76..ddea5008b8 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -251,6 +251,19 @@ class ErrorStackData
TQ_OBJECT_CONSTRUCTORS(ErrorStackData)
};
+class PromiseOnStack
+ : public TorqueGeneratedPromiseOnStack<PromiseOnStack, Struct> {
+ public:
+ NEVER_READ_ONLY_SPACE
+
+ static MaybeHandle<JSObject> GetPromise(
+ Handle<PromiseOnStack> promise_on_stack);
+
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(PromiseOnStack)
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.tq b/deps/v8/src/objects/debug-objects.tq
index 2ce08a8e2a..52a6ac2c35 100644
--- a/deps/v8/src/objects/debug-objects.tq
+++ b/deps/v8/src/objects/debug-objects.tq
@@ -127,3 +127,8 @@ extern class ErrorStackData extends Struct {
// stack traces.
limit_or_stack_frame_infos: Smi|FixedArray;
}
+
+extern class PromiseOnStack extends Struct {
+ prev: PromiseOnStack|Zero;
+ promise: Weak<JSObject>;
+}
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 8a318c06ec..c0a1a5a970 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -3144,7 +3144,7 @@ class TypedElementsAccessor
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
Isolate* isolate = typed_array->GetIsolate();
DCHECK_LT(entry.raw_value(), typed_array->GetLength());
- DCHECK(!typed_array->WasDetached());
+ DCHECK(!typed_array->IsDetachedOrOutOfBounds());
auto* element_ptr =
static_cast<ElementType*>(typed_array->DataPtr()) + entry.raw_value();
auto is_shared = typed_array->buffer().is_shared() ? kShared : kUnshared;
@@ -3300,7 +3300,7 @@ class TypedElementsAccessor
Handle<Object> value, size_t start,
size_t end) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
- DCHECK(!typed_array->WasDetached());
+ DCHECK(!typed_array->IsDetachedOrOutOfBounds());
DCHECK_LE(start, end);
DCHECK_LE(end, typed_array->GetLength());
DisallowGarbageCollection no_gc;
@@ -3470,8 +3470,7 @@ class TypedElementsAccessor
DisallowGarbageCollection no_gc;
JSTypedArray typed_array = JSTypedArray::cast(*receiver);
- DCHECK(!typed_array.WasDetached());
- DCHECK(!typed_array.IsOutOfBounds());
+ DCHECK(!typed_array.IsDetachedOrOutOfBounds());
ElementType typed_search_value;
@@ -3525,7 +3524,7 @@ class TypedElementsAccessor
DisallowGarbageCollection no_gc;
JSTypedArray typed_array = JSTypedArray::cast(receiver);
- DCHECK(!typed_array.WasDetached());
+ DCHECK(!typed_array.IsDetachedOrOutOfBounds());
size_t len = typed_array.GetLength();
if (len == 0) return;
@@ -3570,8 +3569,8 @@ class TypedElementsAccessor
size_t start, size_t end) {
DisallowGarbageCollection no_gc;
DCHECK_EQ(destination.GetElementsKind(), AccessorClass::kind());
- CHECK(!source.WasDetached());
- CHECK(!destination.WasDetached());
+ CHECK(!source.IsDetachedOrOutOfBounds());
+ CHECK(!destination.IsDetachedOrOutOfBounds());
DCHECK_LE(start, end);
DCHECK_LE(end, source.GetLength());
size_t count = end - start;
@@ -3636,8 +3635,8 @@ class TypedElementsAccessor
// side-effects, as the source elements will always be a number.
DisallowGarbageCollection no_gc;
- CHECK(!source.WasDetached());
- CHECK(!destination.WasDetached());
+ CHECK(!source.IsDetachedOrOutOfBounds());
+ CHECK(!destination.IsDetachedOrOutOfBounds());
DCHECK_LE(offset, destination.GetLength());
DCHECK_LE(length, destination.GetLength() - offset);
@@ -3744,7 +3743,7 @@ class TypedElementsAccessor
CHECK(!destination.WasDetached());
bool out_of_bounds = false;
- CHECK(destination.GetLengthOrOutOfBounds(out_of_bounds) >= length);
+ CHECK_GE(destination.GetLengthOrOutOfBounds(out_of_bounds), length);
CHECK(!out_of_bounds);
size_t current_length;
@@ -3822,42 +3821,52 @@ class TypedElementsAccessor
return false;
}
+ // ES#sec-settypedarrayfromarraylike
static Object CopyElementsHandleSlow(Handle<Object> source,
Handle<JSTypedArray> destination,
size_t length, size_t offset) {
Isolate* isolate = destination->GetIsolate();
+ // 8. Let k be 0.
+ // 9. Repeat, while k < srcLength,
for (size_t i = 0; i < length; i++) {
Handle<Object> elem;
+ // a. Let Pk be ! ToString(𝔽(k)).
+ // b. Let value be ? Get(src, Pk).
LookupIterator it(isolate, source, i);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::GetProperty(&it));
+ // c. Let targetIndex be 𝔽(targetOffset + k).
+ // d. Perform ? IntegerIndexedElementSet(target, targetIndex, value).
+ //
+ // Rest of loop body inlines ES#IntegerIndexedElementSet
if (IsBigIntTypedArrayElementsKind(Kind)) {
+ // 1. If O.[[ContentType]] is BigInt, let numValue be ? ToBigInt(value).
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
BigInt::FromObject(isolate, elem));
} else {
+ // 2. Otherwise, let numValue be ? ToNumber(value).
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::ToNumber(isolate, elem));
}
+ // 3. If IsValidIntegerIndex(O, index) is true, then
+ // a. Let offset be O.[[ByteOffset]].
+ // b. Let elementSize be TypedArrayElementSize(O).
+ // c. Let indexedPosition be (ℝ(index) × elementSize) + offset.
+ // d. Let elementType be TypedArrayElementType(O).
+ // e. Perform SetValueInBuffer(O.[[ViewedArrayBuffer]],
+ // indexedPosition, elementType, numValue, true, Unordered).
bool out_of_bounds = false;
size_t new_length = destination->GetLengthOrOutOfBounds(out_of_bounds);
- if (V8_UNLIKELY(out_of_bounds || destination->WasDetached())) {
- const char* op = "set";
- const MessageTemplate message = MessageTemplate::kDetachedOperation;
- Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(op);
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError(message, operation));
- }
- if (V8_UNLIKELY(new_length <= offset + i)) {
+ if (V8_UNLIKELY(out_of_bounds || destination->WasDetached() ||
+ new_length <= offset + i)) {
// Proceed with the loop so that we call get getters for the source even
// though we don't set the values in the target.
- // TODO(v8:11111): Maybe change this, depending on how
- // https://github.com/tc39/proposal-resizablearraybuffer/issues/86 is
- // resolved.
continue;
}
SetImpl(destination, InternalIndex(offset + i), *elem);
+ // e. Set k to k + 1.
}
+ // 10. Return unused.
return *isolate->factory()->undefined_value();
}
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index a8493528af..1682f21858 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -139,8 +139,8 @@ CodeT FeedbackVector::optimized_code() const {
return code;
}
-OptimizationMarker FeedbackVector::optimization_marker() const {
- return OptimizationMarkerBits::decode(flags());
+TieringState FeedbackVector::tiering_state() const {
+ return TieringStateBits::decode(flags());
}
bool FeedbackVector::has_optimized_code() const {
@@ -156,10 +156,6 @@ void FeedbackVector::set_maybe_has_optimized_code(bool value) {
set_flags(MaybeHasOptimizedCodeBit::update(flags(), value));
}
-bool FeedbackVector::has_optimization_marker() const {
- return optimization_marker() != OptimizationMarker::kNone;
-}
-
// Conversion from an integer index to either a slot or an ic slot.
// static
FeedbackSlot FeedbackVector::ToSlot(intptr_t index) {
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index f4f517f73b..06f65fb1ec 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -261,7 +261,7 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->length(), slot_count);
DCHECK_EQ(vector->shared_function_info(), *shared);
- DCHECK_EQ(vector->optimization_marker(), OptimizationMarker::kNone);
+ DCHECK_EQ(vector->tiering_state(), TieringState::kNone);
DCHECK(!vector->maybe_has_optimized_code());
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
@@ -386,25 +386,22 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
if (ticks < Smi::kMaxValue) set_profiler_ticks(ticks + 1);
}
-// static
-void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
- Handle<CodeT> code) {
+void FeedbackVector::SetOptimizedCode(Handle<CodeT> code) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
// We should set optimized code only when there is no valid optimized code.
- DCHECK(!vector->has_optimized_code() ||
- vector->optimized_code().marked_for_deoptimization() ||
+ DCHECK(!has_optimized_code() ||
+ optimized_code().marked_for_deoptimization() ||
FLAG_stress_concurrent_inlining_attach_code);
- // TODO(mythria): We could see a CompileOptimized marker here either from
+ // TODO(mythria): We could see a CompileOptimized state here either from
// tests that use %OptimizeFunctionOnNextCall, --always-opt or because we
// re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that marker isn't
- // kCompileOptimized or kCompileOptimizedConcurrent.
- vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code),
- kReleaseStore);
- int32_t state = vector->flags();
- state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
+ // TieringState::kRequestTurbofan*.
+ set_maybe_optimized_code(HeapObjectReference::Weak(*code), kReleaseStore);
+ int32_t state = flags();
+ state = TieringStateBits::update(state, TieringState::kNone);
state = MaybeHasOptimizedCodeBit::update(state, true);
- vector->set_flags(state);
+ set_flags(state);
}
void FeedbackVector::ClearOptimizedCode() {
@@ -415,21 +412,35 @@ void FeedbackVector::ClearOptimizedCode() {
set_maybe_has_optimized_code(false);
}
-void FeedbackVector::ClearOptimizationMarker() {
- SetOptimizationMarker(OptimizationMarker::kNone);
+void FeedbackVector::reset_tiering_state() {
+ set_tiering_state(TieringState::kNone);
}
-void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
- int32_t state = flags();
- state = OptimizationMarkerBits::update(state, marker);
- set_flags(state);
+void FeedbackVector::set_tiering_state(TieringState state) {
+ int32_t new_flags = flags();
+ new_flags = TieringStateBits::update(new_flags, state);
+ set_flags(new_flags);
}
-void FeedbackVector::InitializeOptimizationState() {
- set_flags(OptimizationMarkerBits::encode(OptimizationMarker::kNone) |
+void FeedbackVector::reset_flags() {
+ set_flags(TieringStateBits::encode(TieringState::kNone) |
+ OsrTieringStateBit::encode(TieringState::kNone) |
MaybeHasOptimizedCodeBit::encode(false));
}
+TieringState FeedbackVector::osr_tiering_state() {
+ return OsrTieringStateBit::decode(flags());
+}
+
+void FeedbackVector::set_osr_tiering_state(TieringState marker) {
+ DCHECK(marker == TieringState::kNone || marker == TieringState::kInProgress);
+ STATIC_ASSERT(TieringState::kNone <= OsrTieringStateBit::kMax);
+ STATIC_ASSERT(TieringState::kInProgress <= OsrTieringStateBit::kMax);
+ int32_t state = flags();
+ state = OsrTieringStateBit::update(state, marker);
+ set_flags(state);
+}
+
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo shared, const char* reason) {
MaybeObject slot = maybe_optimized_code(kAcquireLoad);
@@ -441,9 +452,6 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
Code code = FromCodeT(CodeT::cast(slot->GetHeapObject()));
if (code.marked_for_deoptimization()) {
Deoptimizer::TraceEvictFromOptimizedCodeCache(shared, reason);
- if (!code.deopt_already_counted()) {
- code.set_deopt_already_counted(true);
- }
ClearOptimizedCode();
}
}
@@ -1416,7 +1424,7 @@ void FeedbackNexus::ResetTypeProfile() {
FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
: done_(false), index_(-1), state_(kOther) {
DCHECK(
- IsLoadICKind(nexus->kind()) || IsStoreICKind(nexus->kind()) ||
+ IsLoadICKind(nexus->kind()) || IsSetNamedICKind(nexus->kind()) ||
IsKeyedLoadICKind(nexus->kind()) || IsKeyedStoreICKind(nexus->kind()) ||
IsDefineNamedOwnICKind(nexus->kind()) ||
IsDefineKeyedOwnPropertyInLiteralKind(nexus->kind()) ||
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 359134baa0..42da62fe10 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -94,7 +94,7 @@ inline bool IsStoreGlobalICKind(FeedbackSlotKind kind) {
kind == FeedbackSlotKind::kStoreGlobalStrict;
}
-inline bool IsStoreICKind(FeedbackSlotKind kind) {
+inline bool IsSetNamedICKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kSetNamedSloppy ||
kind == FeedbackSlotKind::kSetNamedStrict;
}
@@ -140,7 +140,7 @@ inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
}
inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
- DCHECK(IsStoreICKind(kind) || IsDefineNamedOwnICKind(kind) ||
+ DCHECK(IsSetNamedICKind(kind) || IsDefineNamedOwnICKind(kind) ||
IsStoreGlobalICKind(kind) || IsKeyedStoreICKind(kind) ||
IsDefineKeyedOwnICKind(kind));
STATIC_ASSERT(FeedbackSlotKind::kStoreGlobalSloppy <=
@@ -187,30 +187,24 @@ class ClosureFeedbackCellArray : public FixedArray {
class NexusConfig;
-// A FeedbackVector has a fixed header with:
-// - shared function info (which includes feedback metadata)
-// - invocation count
-// - runtime profiler ticks
-// - optimized code cell (weak cell or Smi marker)
-// followed by an array of feedback slots, of length determined by the feedback
-// metadata.
+// A FeedbackVector has a fixed header followed by an array of feedback slots,
+// of length determined by the feedback metadata.
class FeedbackVector
: public TorqueGeneratedFeedbackVector<FeedbackVector, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS()
- STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <=
- OptimizationMarkerBits::kMax);
+ STATIC_ASSERT(TieringState::kLastTieringState <= TieringStateBits::kMax);
static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
using TorqueGeneratedFeedbackVector<FeedbackVector,
HeapObject>::maybe_optimized_code;
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
- static constexpr uint32_t kHasCompileOptimizedMarker =
- kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
- static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask =
- MaybeHasOptimizedCodeBit::kMask | kHasCompileOptimizedMarker;
+ static constexpr uint32_t kTieringStateIsAnyRequestMask =
+ kNoneOrInProgressMask << TieringStateBits::kShift;
+ static constexpr uint32_t kHasOptimizedCodeOrTieringStateIsAnyRequestMask =
+ MaybeHasOptimizedCodeBit::kMask | kTieringStateIsAnyRequestMask;
inline bool is_empty() const;
@@ -234,19 +228,19 @@ class FeedbackVector
// the world, thus 'maybe'.
inline bool maybe_has_optimized_code() const;
inline void set_maybe_has_optimized_code(bool value);
-
- inline bool has_optimization_marker() const;
- inline OptimizationMarker optimization_marker() const;
+ void SetOptimizedCode(Handle<CodeT> code);
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason);
- static void SetOptimizedCode(Handle<FeedbackVector> vector,
- Handle<CodeT> code);
void ClearOptimizedCode();
- void SetOptimizationMarker(OptimizationMarker marker);
- void InitializeOptimizationState();
- // Clears the optimization marker in the feedback vector.
- void ClearOptimizationMarker();
+ inline TieringState tiering_state() const;
+ void set_tiering_state(TieringState state);
+ void reset_tiering_state();
+
+ TieringState osr_tiering_state();
+ void set_osr_tiering_state(TieringState marker);
+
+ void reset_flags();
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
@@ -296,7 +290,7 @@ class FeedbackVector
DEFINE_SLOT_KIND_PREDICATE(IsLoadIC)
DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC)
- DEFINE_SLOT_KIND_PREDICATE(IsStoreIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsSetNamedIC)
DEFINE_SLOT_KIND_PREDICATE(IsDefineNamedOwnIC)
DEFINE_SLOT_KIND_PREDICATE(IsStoreGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsKeyedStoreIC)
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index 68365f67fe..97a2ca80ad 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -2,15 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-type OptimizationMarker extends uint16 constexpr 'OptimizationMarker';
+type TieringState extends uint16 constexpr 'TieringState';
bitfield struct FeedbackVectorFlags extends uint32 {
- optimization_marker: OptimizationMarker: 3 bit;
+ tiering_state: TieringState: 3 bit;
// Whether the maybe_optimized_code field contains a code object. 'maybe',
// because they flag may lag behind the actual state of the world (it will be
// updated in time).
maybe_has_optimized_code: bool: 1 bit;
- all_your_bits_are_belong_to_jgruber: uint32: 28 bit;
+ // Just one bit, since only {kNone,kInProgress} are relevant for OSR.
+ osr_tiering_state: TieringState: 1 bit;
+ all_your_bits_are_belong_to_jgruber: uint32: 27 bit;
}
@generateBodyDescriptor
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index d0f619ac05..5dea891511 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -83,7 +83,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -91,7 +90,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index f36025535b..f0c3bfc097 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -134,20 +134,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
-#if !defined(_WIN32)
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
-#endif
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 9049394cde..d6633d72bd 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -200,34 +200,10 @@ bool JSArrayBufferView::IsVariableLength() const {
size_t JSTypedArray::GetLengthOrOutOfBounds(bool& out_of_bounds) const {
DCHECK(!out_of_bounds);
if (WasDetached()) return 0;
- if (is_length_tracking()) {
- if (is_backed_by_rab()) {
- if (byte_offset() > buffer().byte_length()) {
- out_of_bounds = true;
- return 0;
- }
- return (buffer().byte_length() - byte_offset()) / element_size();
- }
- if (byte_offset() >
- buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst)) {
- out_of_bounds = true;
- return 0;
- }
- return (buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst) -
- byte_offset()) /
- element_size();
+ if (IsVariableLength()) {
+ return GetVariableLengthOrOutOfBounds(out_of_bounds);
}
- size_t array_length = LengthUnchecked();
- if (is_backed_by_rab()) {
- // The sum can't overflow, since we have managed to allocate the
- // JSTypedArray.
- if (byte_offset() + array_length * element_size() >
- buffer().byte_length()) {
- out_of_bounds = true;
- return 0;
- }
- }
- return array_length;
+ return LengthUnchecked();
}
size_t JSTypedArray::GetLength() const {
@@ -245,6 +221,15 @@ bool JSTypedArray::IsOutOfBounds() const {
return out_of_bounds;
}
+bool JSTypedArray::IsDetachedOrOutOfBounds() const {
+ if (WasDetached()) {
+ return true;
+ }
+ bool out_of_bounds = false;
+ GetLengthOrOutOfBounds(out_of_bounds);
+ return out_of_bounds;
+}
+
size_t JSTypedArray::length() const {
DCHECK(!is_length_tracking());
DCHECK(!is_backed_by_rab());
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index cd760b9e67..dd59d5d6af 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -147,6 +147,36 @@ size_t JSArrayBuffer::GsabByteLength(Isolate* isolate,
return buffer.GetBackingStore()->byte_length(std::memory_order_seq_cst);
}
+// static
+Maybe<bool> JSArrayBuffer::GetResizableBackingStorePageConfiguration(
+ Isolate* isolate, size_t byte_length, size_t max_byte_length,
+ ShouldThrow should_throw, size_t* page_size, size_t* initial_pages,
+ size_t* max_pages) {
+ DCHECK_NOT_NULL(page_size);
+ DCHECK_NOT_NULL(initial_pages);
+ DCHECK_NOT_NULL(max_pages);
+
+ *page_size = AllocatePageSize();
+
+ if (!RoundUpToPageSize(byte_length, *page_size, JSArrayBuffer::kMaxByteLength,
+ initial_pages)) {
+ if (should_throw == kDontThrow) return Nothing<bool>();
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength),
+ Nothing<bool>());
+ }
+
+ if (!RoundUpToPageSize(max_byte_length, *page_size,
+ JSArrayBuffer::kMaxByteLength, max_pages)) {
+ if (should_throw == kDontThrow) return Nothing<bool>();
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength),
+ Nothing<bool>());
+ }
+
+ return Just(true);
+}
+
ArrayBufferExtension* JSArrayBuffer::EnsureExtension() {
ArrayBufferExtension* extension = this->extension();
if (extension != nullptr) return extension;
@@ -236,52 +266,52 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
Handle<Object> key,
PropertyDescriptor* desc,
Maybe<ShouldThrow> should_throw) {
- // 1. Assert: IsPropertyKey(P) is true.
DCHECK(key->IsName() || key->IsNumber());
- // 2. Assert: O is an Object that has a [[ViewedArrayBuffer]] internal slot.
- // 3. If Type(P) is String, then
+ // 1. If Type(P) is String, then
PropertyKey lookup_key(isolate, key);
if (lookup_key.is_element() || key->IsSmi() || key->IsString()) {
- // 3a. Let numericIndex be ! CanonicalNumericIndexString(P)
- // 3b. If numericIndex is not undefined, then
+ // 1a. Let numericIndex be ! CanonicalNumericIndexString(P)
+ // 1b. If numericIndex is not undefined, then
bool is_minus_zero = false;
if (key->IsSmi() || // Smi keys are definitely canonical
CanonicalNumericIndexString(isolate, lookup_key, &is_minus_zero)) {
- // 3b i. If IsInteger(numericIndex) is false, return false.
- // 3b ii. If numericIndex = -0, return false.
- // 3b iii. If numericIndex < 0, return false.
- if (!lookup_key.is_element() || is_minus_zero) {
+ // 1b i. If IsValidIntegerIndex(O, numericIndex) is false, return false.
+
+ // IsValidIntegerIndex:
+ size_t index = lookup_key.index();
+ bool out_of_bounds = false;
+ size_t length = o->GetLengthOrOutOfBounds(out_of_bounds);
+ if (o->WasDetached() || out_of_bounds || index >= length) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
- size_t index = lookup_key.index();
- // 3b iv. Let length be O.[[ArrayLength]].
- size_t length = o->length();
- // 3b v. If numericIndex ≥ length, return false.
- if (o->WasDetached() || index >= length) {
+ if (!lookup_key.is_element() || is_minus_zero) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
- // 3b vi. If IsAccessorDescriptor(Desc) is true, return false.
+
+ // 1b ii. If Desc has a [[Configurable]] field and if
+ // Desc.[[Configurable]] is false, return false.
+ // 1b iii. If Desc has an [[Enumerable]] field and if Desc.[[Enumerable]]
+ // is false, return false.
+ // 1b iv. If IsAccessorDescriptor(Desc) is true, return false.
+ // 1b v. If Desc has a [[Writable]] field and if Desc.[[Writable]] is
+ // false, return false.
+
if (PropertyDescriptor::IsAccessorDescriptor(desc)) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed, key));
}
- // 3b vii. If Desc has a [[Configurable]] field and if
- // Desc.[[Configurable]] is false, return false.
- // 3b viii. If Desc has an [[Enumerable]] field and if Desc.[[Enumerable]]
- // is false, return false.
- // 3b ix. If Desc has a [[Writable]] field and if Desc.[[Writable]] is
- // false, return false.
+
if ((desc->has_configurable() && !desc->configurable()) ||
(desc->has_enumerable() && !desc->enumerable()) ||
(desc->has_writable() && !desc->writable())) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed, key));
}
- // 3b x. If Desc has a [[Value]] field, then
- // 3b x 1. Let value be Desc.[[Value]].
- // 3b x 2. Return ? IntegerIndexedElementSet(O, numericIndex, value).
+
+ // 1b vi. If Desc has a [[Value]] field, perform
+ // ? IntegerIndexedElementSet(O, numericIndex, Desc.[[Value]]).
if (desc->has_value()) {
if (!desc->has_configurable()) desc->set_configurable(true);
if (!desc->has_enumerable()) desc->set_enumerable(true);
@@ -293,7 +323,7 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
DefineOwnPropertyIgnoreAttributes(&it, value, desc->ToAttributes()),
Nothing<bool>());
}
- // 3b xi. Return true.
+ // 1b vii. Return true.
return Just(true);
}
}
@@ -350,5 +380,35 @@ size_t JSTypedArray::LengthTrackingGsabBackedTypedArrayLength(
return (backing_byte_length - array.byte_offset()) / element_byte_size;
}
+size_t JSTypedArray::GetVariableLengthOrOutOfBounds(bool& out_of_bounds) const {
+ DCHECK(!WasDetached());
+ if (is_length_tracking()) {
+ if (is_backed_by_rab()) {
+ if (byte_offset() > buffer().byte_length()) {
+ out_of_bounds = true;
+ return 0;
+ }
+ return (buffer().byte_length() - byte_offset()) / element_size();
+ }
+ if (byte_offset() >
+ buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst)) {
+ out_of_bounds = true;
+ return 0;
+ }
+ return (buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst) -
+ byte_offset()) /
+ element_size();
+ }
+ DCHECK(is_backed_by_rab());
+ size_t array_length = LengthUnchecked();
+ // The sum can't overflow, since we have managed to allocate the
+ // JSTypedArray.
+ if (byte_offset() + array_length * element_size() > buffer().byte_length()) {
+ out_of_bounds = true;
+ return 0;
+ }
+ return array_length;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 9a9fe2c813..4b3207c2ed 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -114,6 +114,11 @@ class JSArrayBuffer
static size_t GsabByteLength(Isolate* isolate, Address raw_array_buffer);
+ static Maybe<bool> GetResizableBackingStorePageConfiguration(
+ Isolate* isolate, size_t byte_length, size_t max_byte_length,
+ ShouldThrow should_throw, size_t* page_size, size_t* initial_pages,
+ size_t* max_pages);
+
// Allocates an ArrayBufferExtension for this array buffer, unless it is
// already associated with an extension.
ArrayBufferExtension* EnsureExtension();
@@ -296,10 +301,14 @@ class JSTypedArray
inline bool is_on_heap() const;
inline bool is_on_heap(AcquireLoadTag tag) const;
+ // Only valid to call when IsVariableLength() is true.
+ size_t GetVariableLengthOrOutOfBounds(bool& out_of_bounds) const;
+
inline size_t GetLengthOrOutOfBounds(bool& out_of_bounds) const;
inline size_t GetLength() const;
inline size_t GetByteLength() const;
inline bool IsOutOfBounds() const;
+ inline bool IsDetachedOrOutOfBounds() const;
static size_t LengthTrackingGsabBackedTypedArrayLength(Isolate* isolate,
Address raw_array);
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 3b9f796263..2e5b192c3b 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -43,12 +43,6 @@ bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
return new_length > kMaxFastArrayLength;
}
-bool JSArray::AllowsSetLength() {
- bool result = elements().IsFixedArray() || elements().IsFixedDoubleArray();
- DCHECK(result == !HasTypedArrayElements());
- return result;
-}
-
void JSArray::SetContent(Handle<JSArray> array,
Handle<FixedArrayBase> storage) {
EnsureCanContainElements(array, storage, storage->length(),
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 2cd2e3f309..58f0964e38 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -57,8 +57,6 @@ class JSArray : public TorqueGeneratedJSArray<JSArray, JSObject> {
static inline bool SetLengthWouldNormalize(Heap* heap, uint32_t new_length);
// Initializes the array to a certain length.
- inline bool AllowsSetLength();
-
V8_EXPORT_PRIVATE static Maybe<bool> SetLength(Handle<JSArray> array,
uint32_t length);
@@ -143,10 +141,6 @@ class JSArray : public TorqueGeneratedJSArray<JSArray, JSObject> {
TQ_OBJECT_CONSTRUCTORS(JSArray)
};
-Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
- Handle<Context> native_context,
- Handle<Map> initial_map);
-
// The JSArrayIterator describes JavaScript Array Iterators Objects, as
// defined in ES section #sec-array-iterator-objects.
class JSArrayIterator
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 99a1d4b64c..d703d423b6 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -44,36 +44,12 @@ ClosureFeedbackCellArray JSFunction::closure_feedback_cell_array() const {
return ClosureFeedbackCellArray::cast(raw_feedback_cell().value());
}
-bool JSFunction::HasOptimizationMarker() {
- return has_feedback_vector() && feedback_vector().has_optimization_marker();
-}
-
-void JSFunction::ClearOptimizationMarker() {
+void JSFunction::reset_tiering_state() {
DCHECK(has_feedback_vector());
- feedback_vector().ClearOptimizationMarker();
-}
-
-bool JSFunction::ChecksOptimizationMarker() {
- return code().checks_optimization_marker();
-}
-
-bool JSFunction::IsMarkedForOptimization() {
- return has_feedback_vector() &&
- feedback_vector().optimization_marker() ==
- OptimizationMarker::kCompileTurbofan_NotConcurrent;
-}
-
-bool JSFunction::IsMarkedForConcurrentOptimization() {
- return has_feedback_vector() &&
- feedback_vector().optimization_marker() ==
- OptimizationMarker::kCompileTurbofan_Concurrent;
+ feedback_vector().reset_tiering_state();
}
-bool JSFunction::IsInOptimizationQueue() {
- if (!has_feedback_vector()) return false;
- return feedback_vector().optimization_marker() ==
- OptimizationMarker::kInOptimizationQueue;
-}
+bool JSFunction::ChecksTieringState() { return code().checks_tiering_state(); }
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
if (!has_prototype_slot()) return;
@@ -126,12 +102,25 @@ void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfoOffset, value, mode);
}
-void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
+TieringState JSFunction::tiering_state() const {
+ if (!has_feedback_vector()) return TieringState::kNone;
+ return feedback_vector().tiering_state();
+}
+
+void JSFunction::set_tiering_state(TieringState state) {
+ DCHECK(has_feedback_vector());
+ DCHECK(IsNone(state) || ChecksTieringState());
+ feedback_vector().set_tiering_state(state);
+}
+
+TieringState JSFunction::osr_tiering_state() {
DCHECK(has_feedback_vector());
- DCHECK(ChecksOptimizationMarker());
- DCHECK(!ActiveTierIsTurbofan());
+ return feedback_vector().osr_tiering_state();
+}
- feedback_vector().SetOptimizationMarker(marker);
+void JSFunction::set_osr_tiering_state(TieringState marker) {
+ DCHECK(has_feedback_vector());
+ feedback_vector().set_osr_tiering_state(marker);
}
bool JSFunction::has_feedback_vector() const {
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index b96aa696e5..e8898a10eb 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -6,6 +6,7 @@
#include "src/codegen/compiler.h"
#include "src/diagnostics/code-tracer.h"
+#include "src/execution/isolate.h"
#include "src/execution/tiering-manager.h"
#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
@@ -162,16 +163,15 @@ bool JSFunction::CanDiscardCompiled() const {
namespace {
-constexpr OptimizationMarker OptimizationMarkerFor(CodeKind target_kind,
- ConcurrencyMode mode) {
+constexpr TieringState TieringStateFor(CodeKind target_kind,
+ ConcurrencyMode mode) {
DCHECK(target_kind == CodeKind::MAGLEV || target_kind == CodeKind::TURBOFAN);
return target_kind == CodeKind::MAGLEV
- ? (mode == ConcurrencyMode::kConcurrent
- ? OptimizationMarker::kCompileMaglev_Concurrent
- : OptimizationMarker::kCompileMaglev_NotConcurrent)
- : (mode == ConcurrencyMode::kConcurrent
- ? OptimizationMarker::kCompileTurbofan_Concurrent
- : OptimizationMarker::kCompileTurbofan_NotConcurrent);
+ ? (IsConcurrent(mode) ? TieringState::kRequestMaglev_Concurrent
+ : TieringState::kRequestMaglev_Synchronous)
+ : (IsConcurrent(mode)
+ ? TieringState::kRequestTurbofan_Concurrent
+ : TieringState::kRequestTurbofan_Synchronous);
}
} // namespace
@@ -180,7 +180,7 @@ void JSFunction::MarkForOptimization(Isolate* isolate, CodeKind target_kind,
ConcurrencyMode mode) {
if (!isolate->concurrent_recompilation_enabled() ||
isolate->bootstrapper()->IsActive()) {
- mode = ConcurrencyMode::kNotConcurrent;
+ mode = ConcurrencyMode::kSynchronous;
}
DCHECK(CodeKindIsOptimizedJSFunction(target_kind));
@@ -191,8 +191,8 @@ void JSFunction::MarkForOptimization(Isolate* isolate, CodeKind target_kind,
DCHECK(shared().allows_lazy_compilation() ||
!shared().optimization_disabled());
- if (mode == ConcurrencyMode::kConcurrent) {
- if (IsInOptimizationQueue()) {
+ if (IsConcurrent(mode)) {
+ if (IsInProgress(tiering_state())) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Not marking ");
ShortPrint();
@@ -208,7 +208,7 @@ void JSFunction::MarkForOptimization(Isolate* isolate, CodeKind target_kind,
}
}
- SetOptimizationMarker(OptimizationMarkerFor(target_kind, mode));
+ set_tiering_state(TieringStateFor(target_kind, mode));
}
void JSFunction::SetInterruptBudget(Isolate* isolate) {
@@ -217,6 +217,91 @@ void JSFunction::SetInterruptBudget(Isolate* isolate) {
}
// static
+Maybe<bool> JSFunctionOrBoundFunctionOrWrappedFunction::CopyNameAndLength(
+ Isolate* isolate,
+ Handle<JSFunctionOrBoundFunctionOrWrappedFunction> function,
+ Handle<JSReceiver> target, Handle<String> prefix, int arg_count) {
+ // Setup the "length" property based on the "length" of the {target}.
+ // If the targets length is the default JSFunction accessor, we can keep the
+ // accessor that's installed by default on the
+ // JSBoundFunction/JSWrappedFunction. It lazily computes the value from the
+ // underlying internal length.
+ Handle<AccessorInfo> function_length_accessor =
+ isolate->factory()->function_length_accessor();
+ LookupIterator length_lookup(isolate, target,
+ isolate->factory()->length_string(), target,
+ LookupIterator::OWN);
+ if (!target->IsJSFunction() ||
+ length_lookup.state() != LookupIterator::ACCESSOR ||
+ !length_lookup.GetAccessors().is_identical_to(function_length_accessor)) {
+ Handle<Object> length(Smi::zero(), isolate);
+ Maybe<PropertyAttributes> attributes =
+ JSReceiver::GetPropertyAttributes(&length_lookup);
+ if (attributes.IsNothing()) return Nothing<bool>();
+ if (attributes.FromJust() != ABSENT) {
+ Handle<Object> target_length;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_length,
+ Object::GetProperty(&length_lookup),
+ Nothing<bool>());
+ if (target_length->IsNumber()) {
+ length = isolate->factory()->NewNumber(std::max(
+ 0.0, DoubleToInteger(target_length->Number()) - arg_count));
+ }
+ }
+ LookupIterator it(isolate, function, isolate->factory()->length_string(),
+ function);
+ DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, length, it.property_attributes()),
+ Nothing<bool>());
+ }
+
+ // Setup the "name" property based on the "name" of the {target}.
+ // If the target's name is the default JSFunction accessor, we can keep the
+ // accessor that's installed by default on the
+ // JSBoundFunction/JSWrappedFunction. It lazily computes the value from the
+ // underlying internal name.
+ Handle<AccessorInfo> function_name_accessor =
+ isolate->factory()->function_name_accessor();
+ LookupIterator name_lookup(isolate, target, isolate->factory()->name_string(),
+ target);
+ if (!target->IsJSFunction() ||
+ name_lookup.state() != LookupIterator::ACCESSOR ||
+ !name_lookup.GetAccessors().is_identical_to(function_name_accessor) ||
+ (name_lookup.IsFound() && !name_lookup.HolderIsReceiver())) {
+ Handle<Object> target_name;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_name,
+ Object::GetProperty(&name_lookup),
+ Nothing<bool>());
+ Handle<String> name;
+ if (target_name->IsString()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, name,
+ Name::ToFunctionName(isolate, Handle<String>::cast(target_name)),
+ Nothing<bool>());
+ if (!prefix.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, name, isolate->factory()->NewConsString(prefix, name),
+ Nothing<bool>());
+ }
+ } else if (prefix.is_null()) {
+ name = isolate->factory()->empty_string();
+ } else {
+ name = prefix;
+ }
+ LookupIterator it(isolate, function, isolate->factory()->name_string());
+ DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, name, it.property_attributes()),
+ Nothing<bool>());
+ }
+
+ return Just(true);
+}
+
+// static
MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
Handle<JSBoundFunction> function) {
Handle<String> prefix = isolate->factory()->bound__string();
@@ -230,12 +315,19 @@ MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
function = handle(JSBoundFunction::cast(function->bound_target_function()),
isolate);
}
+ if (function->bound_target_function().IsJSWrappedFunction()) {
+ Handle<JSWrappedFunction> target(
+ JSWrappedFunction::cast(function->bound_target_function()), isolate);
+ Handle<String> name;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, name, JSWrappedFunction::GetName(isolate, target), String);
+ return factory->NewConsString(target_name, name);
+ }
if (function->bound_target_function().IsJSFunction()) {
Handle<JSFunction> target(
JSFunction::cast(function->bound_target_function()), isolate);
- Handle<Object> name = JSFunction::GetName(isolate, target);
- if (!name->IsString()) return target_name;
- return factory->NewConsString(target_name, Handle<String>::cast(name));
+ Handle<String> name = JSFunction::GetName(isolate, target);
+ return factory->NewConsString(target_name, name);
}
// This will omit the proper target name for bound JSProxies.
return target_name;
@@ -258,6 +350,16 @@ Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
nof_bound_arguments = Smi::kMaxValue;
}
}
+ if (function->bound_target_function().IsJSWrappedFunction()) {
+ Handle<JSWrappedFunction> target(
+ JSWrappedFunction::cast(function->bound_target_function()), isolate);
+ int target_length = 0;
+ MAYBE_ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, target_length, JSWrappedFunction::GetLength(isolate, target),
+ Nothing<int>());
+ int length = std::max(0, target_length - nof_bound_arguments);
+ return Just(length);
+ }
// All non JSFunction targets get a direct property and don't use this
// accessor.
Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
@@ -275,13 +377,107 @@ Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
}
// static
+MaybeHandle<String> JSWrappedFunction::GetName(
+ Isolate* isolate, Handle<JSWrappedFunction> function) {
+ STACK_CHECK(isolate, MaybeHandle<String>());
+ Factory* factory = isolate->factory();
+ Handle<String> target_name = factory->empty_string();
+ Handle<JSReceiver> target =
+ handle(function->wrapped_target_function(), isolate);
+ if (target->IsJSBoundFunction()) {
+ return JSBoundFunction::GetName(
+ isolate,
+ handle(JSBoundFunction::cast(function->wrapped_target_function()),
+ isolate));
+ } else if (target->IsJSFunction()) {
+ return JSFunction::GetName(
+ isolate,
+ handle(JSFunction::cast(function->wrapped_target_function()), isolate));
+ }
+ // This will omit the proper target name for bound JSProxies.
+ return target_name;
+}
+
+// static
+Maybe<int> JSWrappedFunction::GetLength(Isolate* isolate,
+ Handle<JSWrappedFunction> function) {
+ STACK_CHECK(isolate, Nothing<int>());
+ Handle<JSReceiver> target =
+ handle(function->wrapped_target_function(), isolate);
+ if (target->IsJSBoundFunction()) {
+ return JSBoundFunction::GetLength(
+ isolate,
+ handle(JSBoundFunction::cast(function->wrapped_target_function()),
+ isolate));
+ }
+ // All non JSFunction targets get a direct property and don't use this
+ // accessor.
+ return Just(Handle<JSFunction>::cast(target)->length());
+}
+
+// static
Handle<String> JSWrappedFunction::ToString(Handle<JSWrappedFunction> function) {
Isolate* const isolate = function->GetIsolate();
return isolate->factory()->function_native_code_string();
}
// static
-Handle<Object> JSFunction::GetName(Isolate* isolate,
+MaybeHandle<Object> JSWrappedFunction::Create(
+ Isolate* isolate, Handle<NativeContext> creation_context,
+ Handle<JSReceiver> value) {
+ // The value must be a callable according to the specification.
+ DCHECK(value->IsCallable());
+ // The intermediate wrapped functions are not user-visible. And calling a
+ // wrapped function won't cause a side effect in the creation realm.
+ // Unwrap here to avoid nested unwrapping at the call site.
+ if (value->IsJSWrappedFunction()) {
+ Handle<JSWrappedFunction> target_wrapped =
+ Handle<JSWrappedFunction>::cast(value);
+ value =
+ Handle<JSReceiver>(target_wrapped->wrapped_target_function(), isolate);
+ }
+
+ // 1. Let internalSlotsList be the internal slots listed in Table 2, plus
+ // [[Prototype]] and [[Extensible]].
+ // 2. Let wrapped be ! MakeBasicObject(internalSlotsList).
+ // 3. Set wrapped.[[Prototype]] to
+ // callerRealm.[[Intrinsics]].[[%Function.prototype%]].
+ // 4. Set wrapped.[[Call]] as described in 2.1.
+ // 5. Set wrapped.[[WrappedTargetFunction]] to Target.
+ // 6. Set wrapped.[[Realm]] to callerRealm.
+ Handle<JSWrappedFunction> wrapped =
+ isolate->factory()->NewJSWrappedFunction(creation_context, value);
+
+ // 7. Let result be CopyNameAndLength(wrapped, Target, "wrapped").
+ Maybe<bool> is_abrupt =
+ JSFunctionOrBoundFunctionOrWrappedFunction::CopyNameAndLength(
+ isolate, wrapped, value, Handle<String>(), 0);
+
+ // 8. If result is an Abrupt Completion, throw a TypeError exception.
+ if (is_abrupt.IsNothing()) {
+ DCHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ // TODO(v8:11989): provide a non-observable inspection on the
+ // pending_exception to the newly created TypeError.
+ // https://github.com/tc39/proposal-shadowrealm/issues/353
+
+ // The TypeError thrown is created with creation Realm's TypeError
+ // constructor instead of the executing Realm's.
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewError(Handle<JSFunction>(creation_context->type_error_function(),
+ isolate),
+ MessageTemplate::kCannotWrap),
+ {});
+ }
+ DCHECK(is_abrupt.FromJust());
+
+ // 9. Return wrapped.
+ return wrapped;
+}
+
+// static
+Handle<String> JSFunction::GetName(Isolate* isolate,
Handle<JSFunction> function) {
if (function->shared().name_should_print_as_anonymous()) {
return isolate->factory()->anonymous_string();
@@ -446,17 +642,8 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
Handle<Map> new_map =
Map::Copy(isolate, initial_map, "SetInstancePrototype");
JSFunction::SetInitialMap(isolate, function, new_map, value);
-
- // If the function is used as the global Array function, cache the
- // updated initial maps (and transitioned versions) in the native context.
- Handle<Context> native_context(function->context().native_context(),
- isolate);
- Handle<Object> array_function(
- native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
- if (array_function->IsJSFunction() &&
- *function == JSFunction::cast(*array_function)) {
- CacheInitialJSArrayMaps(isolate, native_context, new_map);
- }
+ DCHECK_IMPLIES(!isolate->bootstrapper()->IsActive(),
+ *function != function->native_context().array_function());
}
// Deoptimize all code that embeds the previous initial map.
@@ -499,8 +686,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
JSObject::MigrateToMap(isolate, function, new_map);
FunctionKind kind = function->shared().kind();
- Handle<Context> native_context(function->context().native_context(),
- isolate);
+ Handle<Context> native_context(function->native_context(), isolate);
construct_prototype = Handle<JSReceiver>(
IsGeneratorFunction(kind)
@@ -916,15 +1102,20 @@ int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
return initial_map().instance_size();
}
+std::unique_ptr<char[]> JSFunction::DebugNameCStr() {
+ return shared().DebugNameCStr();
+}
+
void JSFunction::PrintName(FILE* out) {
- PrintF(out, "%s", shared().DebugNameCStr().get());
+ PrintF(out, "%s", DebugNameCStr().get());
}
namespace {
bool UseFastFunctionNameLookup(Isolate* isolate, Map map) {
DCHECK(map.IsJSFunctionMap());
- if (map.NumberOfOwnDescriptors() < JSFunction::kMinDescriptorsForFastBind) {
+ if (map.NumberOfOwnDescriptors() <
+ JSFunction::kMinDescriptorsForFastBindAndWrap) {
return false;
}
DCHECK(!map.is_dictionary_map());
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 6d430aec30..b787276da7 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -26,6 +26,12 @@ class JSFunctionOrBoundFunctionOrWrappedFunction
static const int kLengthDescriptorIndex = 0;
static const int kNameDescriptorIndex = 1;
+ // https://tc39.es/proposal-shadowrealm/#sec-copynameandlength
+ static Maybe<bool> CopyNameAndLength(
+ Isolate* isolate,
+ Handle<JSFunctionOrBoundFunctionOrWrappedFunction> function,
+ Handle<JSReceiver> target, Handle<String> prefix, int arg_count);
+
STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize);
TQ_OBJECT_CONSTRUCTORS(JSFunctionOrBoundFunctionOrWrappedFunction)
};
@@ -56,6 +62,15 @@ class JSWrappedFunction
: public TorqueGeneratedJSWrappedFunction<
JSWrappedFunction, JSFunctionOrBoundFunctionOrWrappedFunction> {
public:
+ static MaybeHandle<String> GetName(Isolate* isolate,
+ Handle<JSWrappedFunction> function);
+ static Maybe<int> GetLength(Isolate* isolate,
+ Handle<JSWrappedFunction> function);
+ // https://tc39.es/proposal-shadowrealm/#sec-wrappedfunctioncreate
+ static MaybeHandle<Object> Create(Isolate* isolate,
+ Handle<NativeContext> creation_context,
+ Handle<JSReceiver> value);
+
// Dispatched behavior.
DECL_PRINTER(JSWrappedFunction)
DECL_VERIFIER(JSWrappedFunction)
@@ -80,7 +95,7 @@ class JSFunction : public TorqueGeneratedJSFunction<
DECL_RELAXED_GETTER(shared, SharedFunctionInfo)
// Fast binding requires length and name accessors.
- static const int kMinDescriptorsForFastBind = 2;
+ static const int kMinDescriptorsForFastBindAndWrap = 2;
// [context]: The context for this function.
inline Context context();
@@ -90,7 +105,7 @@ class JSFunction : public TorqueGeneratedJSFunction<
inline NativeContext native_context();
inline int length();
- static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
+ static Handle<String> GetName(Isolate* isolate, Handle<JSFunction> function);
// [code]: The generated code object for this function. Executed
// when the function is invoked, e.g. foo() or new foo(). See
@@ -152,30 +167,21 @@ class JSFunction : public TorqueGeneratedJSFunction<
// CompileLazy.
bool CanDiscardCompiled() const;
- // Tells whether or not this function checks its optimization marker in its
- // feedback vector.
- inline bool ChecksOptimizationMarker();
+ // Tells whether function's code object checks its tiering state (some code
+ // kinds, e.g. TURBOFAN, ignore the tiering state).
+ inline bool ChecksTieringState();
- // Tells whether or not this function has a (non-zero) optimization marker.
- inline bool HasOptimizationMarker();
+ inline TieringState tiering_state() const;
+ inline void set_tiering_state(TieringState state);
+ inline void reset_tiering_state();
// Mark this function for lazy recompilation. The function will be recompiled
// the next time it is executed.
void MarkForOptimization(Isolate* isolate, CodeKind target_kind,
ConcurrencyMode mode);
- // Tells whether or not the function is already marked for lazy recompilation.
- inline bool IsMarkedForOptimization();
- inline bool IsMarkedForConcurrentOptimization();
-
- // Tells whether or not the function is on the concurrent recompilation queue.
- inline bool IsInOptimizationQueue();
-
- // Sets the optimization marker in the function's feedback vector.
- inline void SetOptimizationMarker(OptimizationMarker marker);
-
- // Clears the optimization marker in the function's feedback vector.
- inline void ClearOptimizationMarker();
+ inline TieringState osr_tiering_state();
+ inline void set_osr_tiering_state(TieringState marker);
// Sets the interrupt budget based on whether the function has a feedback
// vector and any optimized code.
@@ -294,7 +300,7 @@ class JSFunction : public TorqueGeneratedJSFunction<
: JSFunction::kSizeWithoutPrototype;
}
- // Prints the name of the function using PrintF.
+ std::unique_ptr<char[]> DebugNameCStr();
void PrintName(FILE* out = stdout);
// Calculate the instance size and in-object properties count.
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 0fe9938ac9..abbcf53ebe 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -477,6 +477,23 @@ void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
}
}
+Object JSObject::RawFastPropertyAtSwap(FieldIndex index, Object value,
+ SeqCstAccessTag tag) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return RawFastPropertyAtSwap(cage_base, index, value, tag);
+}
+
+Object JSObject::RawFastPropertyAtSwap(PtrComprCageBase cage_base,
+ FieldIndex index, Object value,
+ SeqCstAccessTag tag) {
+ if (index.is_inobject()) {
+ return TaggedField<Object>::SeqCst_Swap(cage_base, *this, index.offset(),
+ value);
+ }
+ return property_array().Swap(cage_base, index.outobject_array_index(), value,
+ tag);
+}
+
int JSObject::GetInObjectPropertyOffset(int index) {
return map().GetInObjectPropertyOffset(index);
}
@@ -708,11 +725,6 @@ DEF_GETTER(JSObject, HasSlowStringWrapperElements, bool) {
return GetElementsKind(cage_base) == SLOW_STRING_WRAPPER_ELEMENTS;
}
-DEF_GETTER(JSObject, HasTypedArrayElements, bool) {
- DCHECK(!elements(cage_base).is_null());
- return map(cage_base).has_typed_array_elements();
-}
-
DEF_GETTER(JSObject, HasTypedArrayOrRabGsabTypedArrayElements, bool) {
DCHECK(!elements(cage_base).is_null());
return map(cage_base).has_typed_array_or_rab_gsab_typed_array_elements();
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 090a56c334..3f806f5a09 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -601,7 +601,7 @@ MaybeHandle<NativeContext> JSReceiver::GetCreationContext() {
}
return function.has_context()
- ? Handle<NativeContext>(function.context().native_context(),
+ ? Handle<NativeContext>(function.native_context(),
receiver.GetIsolate())
: MaybeHandle<NativeContext>();
}
@@ -629,7 +629,7 @@ MaybeHandle<NativeContext> JSReceiver::GetFunctionRealm(
}
if (current.IsJSFunction()) {
JSFunction function = JSFunction::cast(current);
- return handle(function.context().native_context(), isolate);
+ return handle(function.native_context(), isolate);
}
if (current.IsJSBoundFunction()) {
JSBoundFunction function = JSBoundFunction::cast(current);
@@ -1656,6 +1656,49 @@ Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
}
// static
+Maybe<bool> JSReceiver::AddPrivateField(LookupIterator* it,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+ Isolate* isolate = receiver->GetIsolate();
+ DCHECK(it->GetName()->IsPrivateName());
+ Handle<Symbol> symbol = Handle<Symbol>::cast(it->GetName());
+
+ switch (it->state()) {
+ case LookupIterator::JSPROXY: {
+ PropertyDescriptor new_desc;
+ new_desc.set_value(value);
+ new_desc.set_writable(true);
+ new_desc.set_enumerable(true);
+ new_desc.set_configurable(true);
+ return JSProxy::SetPrivateSymbol(isolate, Handle<JSProxy>::cast(receiver),
+ symbol, &new_desc, should_throw);
+ }
+ case LookupIterator::DATA:
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ UNREACHABLE();
+
+ case LookupIterator::ACCESS_CHECK: {
+ if (!it->HasAccess()) {
+ it->isolate()->ReportFailedAccessCheck(it->GetHolder<JSObject>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(true);
+ }
+ break;
+ }
+
+ case LookupIterator::TRANSITION:
+ case LookupIterator::NOT_FOUND:
+ break;
+ }
+
+ return Object::TransitionAndWriteDataProperty(it, value, NONE, should_throw,
+ StoreOrigin::kMaybeKeyed);
+}
+
+// static
Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> key,
@@ -2963,8 +3006,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
}
int old_number_of_fields;
- int number_of_fields =
- new_map->NumberOfFields(ConcurrencyMode::kNotConcurrent);
+ int number_of_fields = new_map->NumberOfFields(ConcurrencyMode::kSynchronous);
int inobject = new_map->GetInObjectProperties();
int unused = new_map->UnusedPropertyFields();
@@ -2972,7 +3014,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// converted to doubles.
if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
unused, &old_number_of_fields,
- ConcurrencyMode::kNotConcurrent)) {
+ ConcurrencyMode::kSynchronous)) {
object->set_map(*new_map, kReleaseStore);
return;
}
@@ -3300,7 +3342,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
}
map = MapUpdater{isolate, map}.ReconfigureElementsKind(to_kind);
}
- int number_of_fields = map->NumberOfFields(ConcurrencyMode::kNotConcurrent);
+ int number_of_fields = map->NumberOfFields(ConcurrencyMode::kSynchronous);
int inobject = map->GetInObjectProperties();
int unused = map->UnusedPropertyFields();
int total_size = number_of_fields + unused;
@@ -3417,9 +3459,8 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
Maybe<ShouldThrow> should_throw, AccessorInfoHandling handling,
- EnforceDefineSemantics semantics) {
+ EnforceDefineSemantics semantics, StoreOrigin store_origin) {
it->UpdateProtector();
- Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -3481,7 +3522,8 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
: LookupIterator(isolate, receiver, it->name(), c);
return JSObject::DefineOwnPropertyIgnoreAttributes(
- &own_lookup, value, attributes, should_throw, handling, semantics);
+ &own_lookup, value, attributes, should_throw, handling, semantics,
+ store_origin);
}
case LookupIterator::ACCESSOR: {
@@ -3516,13 +3558,11 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
return Object::SetDataProperty(it, value);
}
- // Special case: properties of typed arrays cannot be reconfigured to
- // non-writable nor to non-enumerable.
- if (it->IsElement() && object->HasTypedArrayElements()) {
- return Object::RedefineIncompatibleProperty(
- it->isolate(), it->GetName(), value, should_throw);
- }
-
+ // The non-matching attribute case for JSTypedArrays has already been
+ // handled by JSTypedArray::DefineOwnProperty.
+ DCHECK(!it->IsElement() ||
+ !Handle<JSObject>::cast(it->GetReceiver())
+ ->HasTypedArrayOrRabGsabTypedArrayElements());
// Reconfigure the data property if the attributes mismatch.
it->ReconfigureDataProperty(value, attributes);
@@ -3532,7 +3572,7 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
}
return Object::AddDataProperty(it, value, attributes, should_throw,
- StoreOrigin::kNamed);
+ store_origin, semantics);
}
MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
@@ -3799,7 +3839,7 @@ void JSObject::RequireSlowElements(NumberDictionary dictionary) {
}
Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
- DCHECK(!object->HasTypedArrayElements());
+ DCHECK(!object->HasTypedArrayOrRabGsabTypedArrayElements());
Isolate* isolate = object->GetIsolate();
bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
{
@@ -4046,16 +4086,15 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
NewTypeError(MessageTemplate::kCannotPreventExt));
}
- if (!object->HasTypedArrayElements()) {
- // If there are fast elements we normalize.
- Handle<NumberDictionary> dictionary = NormalizeElements(object);
- DCHECK(object->HasDictionaryElements() ||
- object->HasSlowArgumentsElements());
+ DCHECK(!object->HasTypedArrayOrRabGsabTypedArrayElements());
- // Make sure that we never go back to fast case.
- if (*dictionary != ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
- object->RequireSlowElements(*dictionary);
- }
+ // Normalize fast elements.
+ Handle<NumberDictionary> dictionary = NormalizeElements(object);
+ DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
+
+ // Make sure that we never go back to fast case.
+ if (*dictionary != ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
+ object->RequireSlowElements(*dictionary);
}
// Do a map transition, other objects with this map may still
@@ -4124,7 +4163,8 @@ template void JSObject::ApplyAttributesToDictionary(
Handle<NumberDictionary> CreateElementDictionary(Isolate* isolate,
Handle<JSObject> object) {
Handle<NumberDictionary> new_element_dictionary;
- if (!object->HasTypedArrayElements() && !object->HasDictionaryElements() &&
+ if (!object->HasTypedArrayOrRabGsabTypedArrayElements() &&
+ !object->HasDictionaryElements() &&
!object->HasSlowStringWrapperElements()) {
int length = object->IsJSArray()
? Smi::ToInt(Handle<JSArray>::cast(object)->length())
@@ -4155,12 +4195,16 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
NewTypeError(MessageTemplate::kNoAccess));
}
- if (attrs == NONE && !object->map().is_extensible()) return Just(true);
+ if (attrs == NONE && !object->map().is_extensible()) {
+ return Just(true);
+ }
+
{
ElementsKind old_elements_kind = object->map().elements_kind();
if (IsFrozenElementsKind(old_elements_kind)) return Just(true);
- if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
+ if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind)) {
return Just(true);
+ }
}
if (object->IsJSGlobalProxy()) {
@@ -4231,7 +4275,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
TransitionsAccessor::SearchSpecial(isolate, old_map, *transition_marker);
if (maybe_transition_map.ToHandle(&transition_map)) {
DCHECK(transition_map->has_dictionary_elements() ||
- transition_map->has_typed_array_elements() ||
+ transition_map->has_typed_array_or_rab_gsab_typed_array_elements() ||
transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS ||
transition_map->has_any_nonextensible_elements());
DCHECK(!transition_map->is_extensible());
@@ -4297,8 +4341,9 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
// Both seal and preventExtensions always go through without modifications to
// typed array elements. Freeze works only if there are no actual elements.
- if (object->HasTypedArrayElements()) {
- if (attrs == FROZEN && JSArrayBufferView::cast(*object).byte_length() > 0) {
+ if (object->HasTypedArrayOrRabGsabTypedArrayElements()) {
+ DCHECK(new_element_dictionary.is_null());
+ if (attrs == FROZEN && JSTypedArray::cast(*object).GetLength() > 0) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kCannotFreezeArrayBufferView));
return Nothing<bool>();
@@ -4488,7 +4533,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
// Ignore accessors on typed arrays.
- if (it->IsElement() && object->HasTypedArrayElements()) {
+ if (it->IsElement() && object->HasTypedArrayOrRabGsabTypedArrayElements()) {
return it->factory()->undefined_value();
}
@@ -4525,7 +4570,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
}
// Ignore accessors on typed arrays.
- if (it.IsElement() && object->HasTypedArrayElements()) {
+ if (it.IsElement() && object->HasTypedArrayOrRabGsabTypedArrayElements()) {
return it.factory()->undefined_value();
}
@@ -4654,7 +4699,7 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
if (!constructor.shared().IsApiFunction()) {
- Context context = constructor.context().native_context();
+ Context context = constructor.native_context();
JSFunction object_function = context.object_function();
new_map->SetConstructor(object_function);
}
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 898a3d044f..d6a96a8fe2 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -175,6 +175,13 @@ class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
LookupIterator* it, Handle<Object> value,
Maybe<ShouldThrow> should_throw);
+ // Add private fields to the receiver, ignoring extensibility and the
+ // traps. The caller should check that the private field does not already
+ // exist on the receiver before calling this method.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> AddPrivateField(
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
+
// ES6 9.1.6.1
V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
@@ -372,7 +379,6 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
DECL_GETTER(HasSealedElements, bool)
DECL_GETTER(HasNonextensibleElements, bool)
- DECL_GETTER(HasTypedArrayElements, bool)
DECL_GETTER(HasTypedArrayOrRabGsabTypedArrayElements, bool)
DECL_GETTER(HasFixedUint8ClampedElements, bool)
@@ -409,16 +415,6 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// to the default behavior that calls the setter.
enum AccessorInfoHandling { FORCE_FIELD, DONT_FORCE_FIELD };
- // Currently DefineOwnPropertyIgnoreAttributes invokes the setter
- // interceptor and user-defined setters during define operations,
- // even in places where it makes more sense to invoke the definer
- // interceptor and not invoke the setter: e.g. both the definer and
- // the setter interceptors are called in Object.defineProperty().
- // kDefine allows us to implement the define semantics correctly
- // in selected locations.
- // TODO(joyee): see if we can deprecate the old behavior.
- enum class EnforceDefineSemantics { kSet, kDefine };
-
V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
@@ -429,7 +425,8 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
Maybe<ShouldThrow> should_throw,
AccessorInfoHandling handling = DONT_FORCE_FIELD,
- EnforceDefineSemantics semantics = EnforceDefineSemantics::kSet);
+ EnforceDefineSemantics semantics = EnforceDefineSemantics::kSet,
+ StoreOrigin store_origin = StoreOrigin::kNamed);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> V8_EXPORT_PRIVATE
SetOwnPropertyIgnoreAttributes(Handle<JSObject> object, Handle<Name> name,
@@ -707,6 +704,12 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
inline void WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value);
+ inline Object RawFastPropertyAtSwap(FieldIndex index, Object value,
+ SeqCstAccessTag tag);
+ inline Object RawFastPropertyAtSwap(PtrComprCageBase cage_base,
+ FieldIndex index, Object value,
+ SeqCstAccessTag tag);
+
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
inline Object InObjectPropertyAt(int index);
diff --git a/deps/v8/src/objects/js-temporal-objects.cc b/deps/v8/src/objects/js-temporal-objects.cc
index 1bd7bd1cf9..4b26806a25 100644
--- a/deps/v8/src/objects/js-temporal-objects.cc
+++ b/deps/v8/src/objects/js-temporal-objects.cc
@@ -76,6 +76,10 @@ struct DateRecord {
Handle<String> calendar;
};
+struct InstantRecord : public DateTimeRecordCommon {
+ Handle<String> offset_string;
+};
+
struct DateTimeRecord : public DateTimeRecordCommon {
Handle<String> calendar;
};
@@ -93,6 +97,16 @@ struct DurationRecord {
int64_t nanoseconds;
};
+struct TimeRecord {
+ int32_t hour;
+ int32_t minute;
+ int32_t second;
+ int32_t millisecond;
+ int32_t microsecond;
+ int32_t nanosecond;
+ Handle<String> calendar;
+};
+
struct TimeZoneRecord {
bool z;
Handle<String> offset_string;
@@ -119,14 +133,27 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> ParseTemporalCalendarString(
V8_WARN_UNUSED_RESULT Maybe<DateRecord> ParseTemporalDateString(
Isolate* isolate, Handle<String> iso_string);
+// #sec-temporal-parsetemporaltimestring
+Maybe<TimeRecord> ParseTemporalTimeString(Isolate* isolate,
+ Handle<String> iso_string);
+
// #sec-temporal-parsetemporaltimezone
V8_WARN_UNUSED_RESULT MaybeHandle<String> ParseTemporalTimeZone(
Isolate* isolate, Handle<String> string);
+// #sec-temporal-parsetemporaltimezonestring
+V8_WARN_UNUSED_RESULT Maybe<TimeZoneRecord> ParseTemporalTimeZoneString(
+ Isolate* isolate, Handle<String> iso_string);
+
+// #sec-temporal-parsetimezoneoffsetstring
V8_WARN_UNUSED_RESULT Maybe<int64_t> ParseTimeZoneOffsetString(
Isolate* isolate, Handle<String> offset_string,
bool throwIfNotSatisfy = true);
+// #sec-temporal-parsetemporalinstant
+V8_WARN_UNUSED_RESULT MaybeHandle<BigInt> ParseTemporalInstant(
+ Isolate* isolate, Handle<String> iso_string);
+
void BalanceISODate(Isolate* isolate, int32_t* year, int32_t* month,
int32_t* day);
@@ -245,6 +272,10 @@ int64_t TotalDurationNanoseconds(Isolate* isolate, int64_t days, int64_t hours,
int64_t milliseconds, int64_t microseconds,
int64_t nanoseconds, int64_t offset_shift);
+// #sec-temporal-totemporaltimerecord
+Maybe<TimeRecord> ToTemporalTimeRecord(Isolate* isolate,
+ Handle<JSReceiver> temporal_time_like,
+ const char* method_name);
// Calendar Operations
// #sec-temporal-calendardateadd
@@ -643,6 +674,7 @@ MaybeHandle<JSTemporalPlainTime> CreateTemporalTime(
// 12. Return object.
return object;
}
+
MaybeHandle<JSTemporalPlainTime> CreateTemporalTime(
Isolate* isolate, int32_t hour, int32_t minute, int32_t second,
int32_t millisecond, int32_t microsecond, int32_t nanosecond) {
@@ -1558,6 +1590,7 @@ MaybeHandle<T> FromFields(Isolate* isolate, Handle<JSReceiver> calendar,
return Handle<T>::cast(result);
}
+// #sec-temporal-datefromfields
MaybeHandle<JSTemporalPlainDate> DateFromFields(Isolate* isolate,
Handle<JSReceiver> calendar,
Handle<JSReceiver> fields,
@@ -1567,8 +1600,26 @@ MaybeHandle<JSTemporalPlainDate> DateFromFields(Isolate* isolate,
isolate->factory()->dateFromFields_string(), JS_TEMPORAL_PLAIN_DATE_TYPE);
}
-// IMPL_FROM_FIELDS_ABSTRACT_OPERATION(Date, date, JS_TEMPORAL_PLAIN_DATE_TYPE)
-#undef IMPL_FROM_FIELDS_ABSTRACT_OPERATION
+// #sec-temporal-yearmonthfromfields
+MaybeHandle<JSTemporalPlainYearMonth> YearMonthFromFields(
+ Isolate* isolate, Handle<JSReceiver> calendar, Handle<JSReceiver> fields,
+ Handle<Object> options) {
+ return FromFields<JSTemporalPlainYearMonth>(
+ isolate, calendar, fields, options,
+ isolate->factory()->yearMonthFromFields_string(),
+ JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE);
+}
+
+// #sec-temporal-monthdayfromfields
+MaybeHandle<JSTemporalPlainMonthDay> MonthDayFromFields(
+ Isolate* isolate, Handle<JSReceiver> calendar, Handle<JSReceiver> fields,
+ Handle<Object> options) {
+ return FromFields<JSTemporalPlainMonthDay>(
+ isolate, calendar, fields, options,
+ isolate->factory()->monthDayFromFields_string(),
+ JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE);
+}
+
// #sec-temporal-totemporaloverflow
Maybe<ShowOverflow> ToTemporalOverflow(Isolate* isolate,
Handle<JSReceiver> options,
@@ -1597,9 +1648,44 @@ MaybeHandle<JSTemporalInstant> BuiltinTimeZoneGetInstantFor(
date_time, disambiguation, method_name);
}
+// #sec-temporal-totemporalinstant
+MaybeHandle<JSTemporalInstant> ToTemporalInstant(Isolate* isolate,
+ Handle<Object> item,
+ const char* method) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. If Type(item) is Object, then
+ // a. If item has an [[InitializedTemporalInstant]] internal slot, then
+ if (item->IsJSTemporalInstant()) {
+ // i. Return item.
+ return Handle<JSTemporalInstant>::cast(item);
+ }
+ // b. If item has an [[InitializedTemporalZonedDateTime]] internal slot, then
+ if (item->IsJSTemporalZonedDateTime()) {
+ // i. Return ! CreateTemporalInstant(item.[[Nanoseconds]]).
+ Handle<BigInt> nanoseconds =
+ handle(JSTemporalZonedDateTime::cast(*item).nanoseconds(), isolate);
+ return temporal::CreateTemporalInstant(isolate, nanoseconds);
+ }
+ // 2. Let string be ? ToString(item).
+ Handle<String> string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, string, Object::ToString(isolate, item),
+ JSTemporalInstant);
+
+ // 3. Let epochNanoseconds be ? ParseTemporalInstant(string).
+ Handle<BigInt> epoch_nanoseconds;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, epoch_nanoseconds,
+ ParseTemporalInstant(isolate, string),
+ JSTemporalInstant);
+
+ // 4. Return ? CreateTemporalInstant(ℤ(epochNanoseconds)).
+ return temporal::CreateTemporalInstant(isolate, epoch_nanoseconds);
+}
+
} // namespace
namespace temporal {
+
// #sec-temporal-totemporalcalendar
MaybeHandle<JSReceiver> ToTemporalCalendar(
Isolate* isolate, Handle<Object> temporal_calendar_like,
@@ -1761,10 +1847,10 @@ MaybeHandle<JSTemporalPlainDate> ToTemporalDate(Isolate* isolate,
// e. Let fieldNames be ? CalendarFields(calendar, « "day", "month",
// "monthCode", "year" »).
Handle<FixedArray> field_names = factory->NewFixedArray(4);
- field_names->set(0, *(factory->day_string()));
- field_names->set(1, *(factory->month_string()));
- field_names->set(2, *(factory->monthCode_string()));
- field_names->set(3, *(factory->year_string()));
+ field_names->set(0, ReadOnlyRoots(isolate).day_string());
+ field_names->set(1, ReadOnlyRoots(isolate).month_string());
+ field_names->set(2, ReadOnlyRoots(isolate).monthCode_string());
+ field_names->set(3, ReadOnlyRoots(isolate).year_string());
ASSIGN_RETURN_ON_EXCEPTION(isolate, field_names,
CalendarFields(isolate, calendar, field_names),
JSTemporalPlainDate);
@@ -1818,6 +1904,167 @@ MaybeHandle<JSTemporalPlainDate> ToTemporalDate(Isolate* isolate,
namespace temporal {
+// #sec-temporal-regulatetime
+Maybe<bool> RegulateTime(Isolate* isolate, TimeRecord* time,
+ ShowOverflow overflow) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: hour, minute, second, millisecond, microsecond and nanosecond
+ // are integers.
+ // 2. Assert: overflow is either "constrain" or "reject".
+ switch (overflow) {
+ case ShowOverflow::kConstrain:
+ // 3. If overflow is "constrain", then
+ // a. Return ! ConstrainTime(hour, minute, second, millisecond,
+ // microsecond, nanosecond).
+ time->hour = std::max(std::min(time->hour, 23), 0);
+ time->minute = std::max(std::min(time->minute, 59), 0);
+ time->second = std::max(std::min(time->second, 59), 0);
+ time->millisecond = std::max(std::min(time->millisecond, 999), 0);
+ time->microsecond = std::max(std::min(time->microsecond, 999), 0);
+ time->nanosecond = std::max(std::min(time->nanosecond, 999), 0);
+ return Just(true);
+ case ShowOverflow::kReject:
+ // 4. If overflow is "reject", then
+ // a. If ! IsValidTime(hour, minute, second, millisecond, microsecond,
+ // nanosecond) is false, throw a RangeError exception.
+ if (!IsValidTime(isolate, time->hour, time->minute, time->second,
+ time->millisecond, time->microsecond,
+ time->nanosecond)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), Nothing<bool>());
+ }
+ // b. Return the new Record { [[Hour]]: hour, [[Minute]]: minute,
+ // [[Second]]: second, [[Millisecond]]: millisecond, [[Microsecond]]:
+ // microsecond, [[Nanosecond]]: nanosecond }.
+ return Just(true);
+ }
+}
+
+// #sec-temporal-totemporaltime
+MaybeHandle<JSTemporalPlainTime> ToTemporalTime(Isolate* isolate,
+ Handle<Object> item_obj,
+ ShowOverflow overflow,
+ const char* method_name) {
+ Factory* factory = isolate->factory();
+ TimeRecord result;
+ // 2. Assert: overflow is either "constrain" or "reject".
+ // 3. If Type(item) is Object, then
+ if (item_obj->IsJSReceiver()) {
+ Handle<JSReceiver> item = Handle<JSReceiver>::cast(item_obj);
+ // a. If item has an [[InitializedTemporalTime]] internal slot, then
+ // i. Return item.
+ if (item->IsJSTemporalPlainTime()) {
+ return Handle<JSTemporalPlainTime>::cast(item);
+ }
+ // b. If item has an [[InitializedTemporalZonedDateTime]] internal slot,
+ // then
+ if (item->IsJSTemporalZonedDateTime()) {
+ // i. Let instant be ! CreateTemporalInstant(item.[[Nanoseconds]]).
+ Handle<JSTemporalZonedDateTime> zoned_date_time =
+ Handle<JSTemporalZonedDateTime>::cast(item);
+ Handle<JSTemporalInstant> instant;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, instant,
+ CreateTemporalInstant(
+ isolate, Handle<BigInt>(zoned_date_time->nanoseconds(), isolate)),
+ JSTemporalPlainTime);
+ // ii. Set plainDateTime to ?
+ // BuiltinTimeZoneGetPlainDateTimeFor(item.[[TimeZone]],
+ // instant, item.[[Calendar]]).
+ Handle<JSTemporalPlainDateTime> plain_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, plain_date_time,
+ BuiltinTimeZoneGetPlainDateTimeFor(
+ isolate,
+ Handle<JSReceiver>(zoned_date_time->time_zone(), isolate),
+ instant, Handle<JSReceiver>(zoned_date_time->calendar(), isolate),
+ method_name),
+ JSTemporalPlainTime);
+ // iii. Return !
+ // CreateTemporalTime(plainDateTime.[[ISOHour]],
+ // plainDateTime.[[ISOMinute]], plainDateTime.[[ISOSecond]],
+ // plainDateTime.[[ISOMillisecond]], plainDateTime.[[ISOMicrosecond]],
+ // plainDateTime.[[ISONanosecond]]).
+ return CreateTemporalTime(
+ isolate, plain_date_time->iso_hour(), plain_date_time->iso_minute(),
+ plain_date_time->iso_second(), plain_date_time->iso_millisecond(),
+ plain_date_time->iso_microsecond(),
+ plain_date_time->iso_nanosecond());
+ }
+ // c. If item has an [[InitializedTemporalDateTime]] internal slot, then
+ if (item->IsJSTemporalPlainDateTime()) {
+ // i. Return ! CreateTemporalTime(item.[[ISOHour]], item.[[ISOMinute]],
+ // item.[[ISOSecond]], item.[[ISOMillisecond]], item.[[ISOMicrosecond]],
+ // item.[[ISONanosecond]]).
+ Handle<JSTemporalPlainDateTime> date_time =
+ Handle<JSTemporalPlainDateTime>::cast(item);
+ return CreateTemporalTime(
+ isolate, date_time->iso_hour(), date_time->iso_minute(),
+ date_time->iso_second(), date_time->iso_millisecond(),
+ date_time->iso_microsecond(), date_time->iso_nanosecond());
+ }
+ // d. Let calendar be ? GetTemporalCalendarWithISODefault(item).
+ Handle<JSReceiver> calendar;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, calendar,
+ GetTemporalCalendarWithISODefault(isolate, item, method_name),
+ JSTemporalPlainTime);
+ // e. If ? ToString(calendar) is not "iso8601", then
+ Handle<String> identifier;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, identifier,
+ Object::ToString(isolate, calendar),
+ JSTemporalPlainTime);
+ if (!String::Equals(isolate, factory->iso8601_string(), identifier)) {
+ // i. Throw a RangeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalPlainTime);
+ }
+ // f. Let result be ? ToTemporalTimeRecord(item).
+ Maybe<TimeRecord> maybe_time_result =
+ ToTemporalTimeRecord(isolate, item, method_name);
+ MAYBE_RETURN(maybe_time_result, Handle<JSTemporalPlainTime>());
+ result = maybe_time_result.FromJust();
+ // g. Set result to ? RegulateTime(result.[[Hour]], result.[[Minute]],
+ // result.[[Second]], result.[[Millisecond]], result.[[Microsecond]],
+ // result.[[Nanosecond]], overflow).
+ Maybe<bool> maybe_regulate_time = RegulateTime(isolate, &result, overflow);
+ MAYBE_RETURN(maybe_regulate_time, Handle<JSTemporalPlainTime>());
+ DCHECK(maybe_regulate_time.FromJust());
+ } else {
+ // 4. Else,
+ // a. Let string be ? ToString(item).
+ Handle<String> string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, string,
+ Object::ToString(isolate, item_obj),
+ JSTemporalPlainTime);
+ // b. Let result be ? ParseTemporalTimeString(string).
+ Maybe<TimeRecord> maybe_result = ParseTemporalTimeString(isolate, string);
+ MAYBE_RETURN(maybe_result, MaybeHandle<JSTemporalPlainTime>());
+ result = maybe_result.FromJust();
+ // c. Assert: ! IsValidTime(result.[[Hour]], result.[[Minute]],
+ // result.[[Second]], result.[[Millisecond]], result.[[Microsecond]],
+ // result.[[Nanosecond]]) is true.
+ DCHECK(IsValidTime(isolate, result.hour, result.minute, result.second,
+ result.millisecond, result.microsecond,
+ result.nanosecond));
+ // d. If result.[[Calendar]] is not one of undefined or "iso8601", then
+ if ((result.calendar->length() > 0) /* not undefined */ &&
+ !String::Equals(isolate, result.calendar,
+ isolate->factory()->iso8601_string())) {
+ // i. Throw a RangeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalPlainTime);
+ }
+ }
+ // 5. Return ? CreateTemporalTime(result.[[Hour]], result.[[Minute]],
+ // result.[[Second]], result.[[Millisecond]], result.[[Microsecond]],
+ // result.[[Nanosecond]]).
+ return CreateTemporalTime(isolate, result.hour, result.minute, result.second,
+ result.millisecond, result.microsecond,
+ result.nanosecond);
+}
+
// #sec-temporal-totemporaltimezone
MaybeHandle<JSReceiver> ToTemporalTimeZone(
Isolate* isolate, Handle<Object> temporal_time_zone_like,
@@ -2153,6 +2400,152 @@ Maybe<DateRecord> ParseTemporalDateString(Isolate* isolate,
return Just(ret);
}
+// #sec-temporal-parsetemporaltimestring
+Maybe<TimeRecord> ParseTemporalTimeString(Isolate* isolate,
+ Handle<String> iso_string) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: Type(isoString) is String.
+ // 2. If isoString does not satisfy the syntax of a TemporalTimeString
+ // (see 13.33), then
+ Maybe<ParsedISO8601Result> maybe_parsed =
+ TemporalParser::ParseTemporalTimeString(isolate, iso_string);
+ ParsedISO8601Result parsed;
+ if (!maybe_parsed.To(&parsed)) {
+ // a. Throw a *RangeError* exception.
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<TimeRecord>());
+ }
+
+ // 3. If _isoString_ contains a |UTCDesignator|, then
+ if (parsed.utc_designator) {
+ // a. Throw a *RangeError* exception.
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<TimeRecord>());
+ }
+
+ // 3. Let result be ? ParseISODateTime(isoString).
+ Maybe<DateTimeRecord> maybe_result =
+ ParseISODateTime(isolate, iso_string, parsed);
+ MAYBE_RETURN(maybe_result, Nothing<TimeRecord>());
+ DateTimeRecord result = maybe_result.FromJust();
+ // 4. Return the Record { [[Hour]]: result.[[Hour]], [[Minute]]:
+ // result.[[Minute]], [[Second]]: result.[[Second]], [[Millisecond]]:
+ // result.[[Millisecond]], [[Microsecond]]: result.[[Microsecond]],
+ // [[Nanosecond]]: result.[[Nanosecond]], [[Calendar]]: result.[[Calendar]] }.
+ TimeRecord ret = {result.hour, result.minute, result.second,
+ result.millisecond, result.microsecond, result.nanosecond,
+ result.calendar};
+ return Just(ret);
+}
+
+// #sec-temporal-parsetemporalinstantstring
+Maybe<InstantRecord> ParseTemporalInstantString(Isolate* isolate,
+ Handle<String> iso_string) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: Type(isoString) is String.
+ // 2. If isoString does not satisfy the syntax of a TemporalInstantString
+ // (see 13.33), then
+ Maybe<ParsedISO8601Result> maybe_parsed =
+ TemporalParser::ParseTemporalInstantString(isolate, iso_string);
+ if (maybe_parsed.IsNothing()) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ Nothing<InstantRecord>());
+ }
+
+ // 3. Let result be ! ParseISODateTime(isoString).
+ Maybe<DateTimeRecord> maybe_result =
+ ParseISODateTime(isolate, iso_string, maybe_parsed.FromJust());
+
+ MAYBE_RETURN(maybe_result, Nothing<InstantRecord>());
+ DateTimeRecord result = maybe_result.FromJust();
+
+ // 4. Let timeZoneResult be ? ParseTemporalTimeZoneString(isoString).
+ Maybe<TimeZoneRecord> maybe_time_zone_result =
+ ParseTemporalTimeZoneString(isolate, iso_string);
+ MAYBE_RETURN(maybe_time_zone_result, Nothing<InstantRecord>());
+ TimeZoneRecord time_zone_result = maybe_time_zone_result.FromJust();
+ // 5. Let offsetString be timeZoneResult.[[OffsetString]].
+ Handle<String> offset_string = time_zone_result.offset_string;
+ // 6. If timeZoneResult.[[Z]] is true, then
+ if (time_zone_result.z) {
+ // a. Set offsetString to "+00:00".
+ offset_string = isolate->factory()->NewStringFromStaticChars("+00:00");
+ }
+ // 7. Assert: offsetString is not undefined.
+ DCHECK_GT(offset_string->length(), 0);
+
+ // 6. Return the new Record { [[Year]]: result.[[Year]],
+ // [[Month]]: result.[[Month]], [[Day]]: result.[[Day]],
+ // [[Hour]]: result.[[Hour]], [[Minute]]: result.[[Minute]],
+ // [[Second]]: result.[[Second]],
+ // [[Millisecond]]: result.[[Millisecond]],
+ // [[Microsecond]]: result.[[Microsecond]],
+ // [[Nanosecond]]: result.[[Nanosecond]],
+ // [[TimeZoneOffsetString]]: offsetString }.
+ InstantRecord record;
+ record.year = result.year;
+ record.month = result.month;
+ record.day = result.day;
+ record.hour = result.hour;
+ record.minute = result.minute;
+ record.second = result.second;
+ record.millisecond = result.millisecond;
+ record.microsecond = result.microsecond;
+ record.nanosecond = result.nanosecond;
+ record.offset_string = offset_string;
+ return Just(record);
+}
+
+// #sec-temporal-parsetemporalinstant
+MaybeHandle<BigInt> ParseTemporalInstant(Isolate* isolate,
+ Handle<String> iso_string) {
+ TEMPORAL_ENTER_FUNC();
+
+ Factory* factory = isolate->factory();
+ // 1. Assert: Type(isoString) is String.
+ // 2. Let result be ? ParseTemporalInstantString(isoString).
+ Maybe<InstantRecord> maybe_result =
+ ParseTemporalInstantString(isolate, iso_string);
+ MAYBE_RETURN(maybe_result, Handle<BigInt>());
+ InstantRecord result = maybe_result.FromJust();
+
+ // 3. Let offsetString be result.[[TimeZoneOffsetString]].
+ // 4. Assert: offsetString is not undefined.
+ DCHECK_NE(result.offset_string->length(), 0);
+
+ // 5. Let utc be ? GetEpochFromISOParts(result.[[Year]], result.[[Month]],
+ // result.[[Day]], result.[[Hour]], result.[[Minute]], result.[[Second]],
+ // result.[[Millisecond]], result.[[Microsecond]], result.[[Nanosecond]]).
+ Handle<BigInt> utc;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, utc,
+ GetEpochFromISOParts(isolate, result.year, result.month, result.day,
+ result.hour, result.minute, result.second,
+ result.millisecond, result.microsecond,
+ result.nanosecond),
+ BigInt);
+
+ // 6. If utc < −8.64 × 10^21 or utc > 8.64 × 10^21, then
+ if ((BigInt::CompareToNumber(utc, factory->NewNumber(-8.64e21)) ==
+ ComparisonResult::kLessThan) ||
+ (BigInt::CompareToNumber(utc, factory->NewNumber(8.64e21)) ==
+ ComparisonResult::kGreaterThan)) {
+ // a. Throw a RangeError exception.
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), BigInt);
+ }
+ // 7. Let offsetNanoseconds be ? ParseTimeZoneOffsetString(offsetString).
+ Maybe<int64_t> maybe_offset_nanoseconds =
+ ParseTimeZoneOffsetString(isolate, result.offset_string);
+ MAYBE_RETURN(maybe_offset_nanoseconds, Handle<BigInt>());
+ int64_t offset_nanoseconds = maybe_offset_nanoseconds.FromJust();
+
+ // 8. Return utc − offsetNanoseconds.
+ return BigInt::Subtract(isolate, utc,
+ BigInt::FromInt64(isolate, offset_nanoseconds));
+}
+
// #sec-temporal-parsetemporaltimezonestring
Maybe<TimeZoneRecord> ParseTemporalTimeZoneString(Isolate* isolate,
Handle<String> iso_string) {
@@ -2506,6 +2899,116 @@ MaybeHandle<JSTemporalDuration> CalendarDateUntil(
return Handle<JSTemporalDuration>::cast(duration);
}
+// #sec-temporal-defaultmergefields
+MaybeHandle<JSReceiver> DefaultMergeFields(
+ Isolate* isolate, Handle<JSReceiver> fields,
+ Handle<JSReceiver> additional_fields) {
+ Factory* factory = isolate->factory();
+ // 1. Let merged be ! OrdinaryObjectCreate(%Object.prototype%).
+ Handle<JSObject> merged =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ // 2. Let originalKeys be ? EnumerableOwnPropertyNames(fields, key).
+ Handle<FixedArray> original_keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, original_keys,
+ KeyAccumulator::GetKeys(fields, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString),
+ JSReceiver);
+ // 3. For each element nextKey of originalKeys, do
+ for (int i = 0; i < original_keys->length(); i++) {
+ // a. If nextKey is not "month" or "monthCode", then
+ Handle<Object> next_key = handle(original_keys->get(i), isolate);
+ DCHECK(next_key->IsString());
+ Handle<String> next_key_string = Handle<String>::cast(next_key);
+ if (!(String::Equals(isolate, factory->month_string(), next_key_string) ||
+ String::Equals(isolate, factory->monthCode_string(),
+ next_key_string))) {
+ // i. Let propValue be ? Get(fields, nextKey).
+ Handle<Object> prop_value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prop_value,
+ JSReceiver::GetPropertyOrElement(isolate, fields, next_key_string),
+ JSReceiver);
+ // ii. If propValue is not undefined, then
+ if (!prop_value->IsUndefined()) {
+ // 1. Perform ! CreateDataPropertyOrThrow(merged, nextKey,
+ // propValue).
+ CHECK(JSReceiver::CreateDataProperty(isolate, merged, next_key_string,
+ prop_value, Just(kDontThrow))
+ .FromJust());
+ }
+ }
+ }
+ // 4. Let newKeys be ? EnumerableOwnPropertyNames(additionalFields, key).
+ Handle<FixedArray> new_keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, new_keys,
+ KeyAccumulator::GetKeys(additional_fields, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString),
+ JSReceiver);
+ bool new_keys_has_month_or_month_code = false;
+ // 5. For each element nextKey of newKeys, do
+ for (int i = 0; i < new_keys->length(); i++) {
+ Handle<Object> next_key = handle(new_keys->get(i), isolate);
+ DCHECK(next_key->IsString());
+ Handle<String> next_key_string = Handle<String>::cast(next_key);
+ // a. Let propValue be ? Get(additionalFields, nextKey).
+ Handle<Object> prop_value;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, prop_value,
+ JSReceiver::GetPropertyOrElement(
+ isolate, additional_fields, next_key_string),
+ JSReceiver);
+ // b. If propValue is not undefined, then
+ if (!prop_value->IsUndefined()) {
+ // 1. Perform ! CreateDataPropertyOrThrow(merged, nextKey, propValue).
+ Maybe<bool> maybe_created = JSReceiver::CreateDataProperty(
+ isolate, merged, next_key_string, prop_value, Just(kThrowOnError));
+ MAYBE_RETURN(maybe_created, Handle<JSReceiver>());
+ }
+ new_keys_has_month_or_month_code |=
+ String::Equals(isolate, factory->month_string(), next_key_string) ||
+ String::Equals(isolate, factory->monthCode_string(), next_key_string);
+ }
+ // 6. If newKeys does not contain either "month" or "monthCode", then
+ if (!new_keys_has_month_or_month_code) {
+ // a. Let month be ? Get(fields, "month").
+ Handle<Object> month;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, month,
+ JSReceiver::GetPropertyOrElement(
+ isolate, fields, factory->month_string()),
+ JSReceiver);
+ // b. If month is not undefined, then
+ if (!month->IsUndefined()) {
+ // i. Perform ! CreateDataPropertyOrThrow(merged, "month", month).
+ CHECK(JSReceiver::CreateDataProperty(isolate, merged,
+ factory->month_string(), month,
+ Just(kDontThrow))
+ .FromJust());
+ }
+ // c. Let monthCode be ? Get(fields, "monthCode").
+ Handle<Object> month_code;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, month_code,
+ JSReceiver::GetPropertyOrElement(isolate, fields,
+ factory->monthCode_string()),
+ JSReceiver);
+ // d. If monthCode is not undefined, then
+ if (!month_code->IsUndefined()) {
+ // i. Perform ! CreateDataPropertyOrThrow(merged, "monthCode", monthCode).
+ CHECK(JSReceiver::CreateDataProperty(isolate, merged,
+ factory->monthCode_string(),
+ month_code, Just(kDontThrow))
+ .FromJust());
+ }
+ }
+ // 7. Return merged.
+ return merged;
+}
+
+// #sec-temporal-getoffsetnanosecondsfor
Maybe<int64_t> GetOffsetNanosecondsFor(Isolate* isolate,
Handle<JSReceiver> time_zone_obj,
Handle<Object> instant,
@@ -2815,6 +3318,61 @@ MaybeHandle<String> CanonicalizeTimeZoneName(Isolate* isolate,
}
#endif // V8_INTL_SUPPORT
+// #sec-temporal-totemporaltimerecord
+Maybe<TimeRecord> ToTemporalTimeRecord(Isolate* isolate,
+ Handle<JSReceiver> temporal_time_like,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+
+ TimeRecord result;
+ Factory* factory = isolate->factory();
+ // 1. Assert: Type(temporalTimeLike) is Object.
+ // 2. Let result be the new Record { [[Hour]]: undefined, [[Minute]]:
+ // undefined, [[Second]]: undefined, [[Millisecond]]: undefined,
+ // [[Microsecond]]: undefined, [[Nanosecond]]: undefined }.
+ // See https://github.com/tc39/proposal-temporal/pull/1862
+ // 3. Let _any_ be *false*.
+ bool any = false;
+ // 4. For each row of Table 3, except the header row, in table order, do
+ std::array<std::pair<Handle<String>, int32_t*>, 6> table3 = {
+ {{factory->hour_string(), &result.hour},
+ {factory->microsecond_string(), &result.microsecond},
+ {factory->millisecond_string(), &result.millisecond},
+ {factory->minute_string(), &result.minute},
+ {factory->nanosecond_string(), &result.nanosecond},
+ {factory->second_string(), &result.second}}};
+ for (const auto& row : table3) {
+ Handle<Object> value;
+ // a. Let property be the Property value of the current row.
+ // b. Let value be ? Get(temporalTimeLike, property).
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, temporal_time_like, row.first),
+ Nothing<TimeRecord>());
+ // c. If value is not undefined, then
+ if (!value->IsUndefined()) {
+ // i. Set _any_ to *true*.
+ any = true;
+ }
+ // d. Set value to ? ToIntegerThrowOnOInfinity(value).
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value,
+ ToIntegerThrowOnInfinity(isolate, value),
+ Nothing<TimeRecord>());
+ // e. Set result's internal slot whose name is the Internal Slot value of
+ // the current row to value.
+ *(row.second) = value->Number();
+ }
+
+ // 5. If _any_ is *false*, then
+ if (!any) {
+ // a. Throw a *TypeError* exception.
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(),
+ Nothing<TimeRecord>());
+ }
+ // 4. Return result.
+ return Just(result);
+}
+
// #sec-temporal-mergelargestunitoption
MaybeHandle<JSObject> MergeLargestUnitOption(Isolate* isolate,
Handle<JSReceiver> options,
@@ -4272,6 +4830,66 @@ MaybeHandle<Oddball> JSTemporalDuration::Blank(
: isolate->factory()->false_value();
}
+namespace {
+// #sec-temporal-createnegatedtemporalduration
+MaybeHandle<JSTemporalDuration> CreateNegatedTemporalDuration(
+ Isolate* isolate, Handle<JSTemporalDuration> duration) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Assert: Type(duration) is Object.
+ // 2. Assert: duration has an [[InitializedTemporalDuration]] internal slot.
+ // 3. Return ! CreateTemporalDuration(−duration.[[Years]],
+ // −duration.[[Months]], −duration.[[Weeks]], −duration.[[Days]],
+ // −duration.[[Hours]], −duration.[[Minutes]], −duration.[[Seconds]],
+ // −duration.[[Milliseconds]], −duration.[[Microseconds]],
+ // −duration.[[Nanoseconds]]).
+
+ return CreateTemporalDuration(
+ isolate, -NumberToInt64(duration->years()),
+ -NumberToInt64(duration->months()), -NumberToInt64(duration->weeks()),
+ -NumberToInt64(duration->days()), -NumberToInt64(duration->hours()),
+ -NumberToInt64(duration->minutes()), -NumberToInt64(duration->seconds()),
+ -NumberToInt64(duration->milliseconds()),
+ -NumberToInt64(duration->microseconds()),
+ -NumberToInt64(duration->nanoseconds()));
+}
+
+} // namespace
+
+// #sec-temporal.duration.prototype.negated
+MaybeHandle<JSTemporalDuration> JSTemporalDuration::Negated(
+ Isolate* isolate, Handle<JSTemporalDuration> duration) {
+ // Let duration be the this value.
+ // 2. Perform ? RequireInternalSlot(duration,
+ // [[InitializedTemporalDuration]]).
+
+ // 3. Return ! CreateNegatedTemporalDuration(duration).
+ return CreateNegatedTemporalDuration(isolate, duration);
+}
+
+// #sec-temporal.duration.prototype.abs
+MaybeHandle<JSTemporalDuration> JSTemporalDuration::Abs(
+ Isolate* isolate, Handle<JSTemporalDuration> duration) {
+ // 1. Let duration be the this value.
+ // 2. Perform ? RequireInternalSlot(duration,
+ // [[InitializedTemporalDuration]]).
+ // 3. Return ? CreateTemporalDuration(abs(duration.[[Years]]),
+ // abs(duration.[[Months]]), abs(duration.[[Weeks]]), abs(duration.[[Days]]),
+ // abs(duration.[[Hours]]), abs(duration.[[Minutes]]),
+ // abs(duration.[[Seconds]]), abs(duration.[[Milliseconds]]),
+ // abs(duration.[[Microseconds]]), abs(duration.[[Nanoseconds]])).
+ return CreateTemporalDuration(
+ isolate, std::abs(NumberToInt64(duration->years())),
+ std::abs(NumberToInt64(duration->months())),
+ std::abs(NumberToInt64(duration->weeks())),
+ std::abs(NumberToInt64(duration->days())),
+ std::abs(NumberToInt64(duration->hours())),
+ std::abs(NumberToInt64(duration->minutes())),
+ std::abs(NumberToInt64(duration->seconds())),
+ std::abs(NumberToInt64(duration->milliseconds())),
+ std::abs(NumberToInt64(duration->microseconds())),
+ std::abs(NumberToInt64(duration->nanoseconds())));
+}
+
// #sec-temporal.calendar
MaybeHandle<JSTemporalCalendar> JSTemporalCalendar::Constructor(
Isolate* isolate, Handle<JSFunction> target, Handle<HeapObject> new_target,
@@ -4300,6 +4918,557 @@ MaybeHandle<JSTemporalCalendar> JSTemporalCalendar::Constructor(
return CreateTemporalCalendar(isolate, target, new_target, identifier);
}
+namespace {
+
+// #sec-temporal-toisodayofyear
+int32_t ToISODayOfYear(Isolate* isolate, int32_t year, int32_t month,
+ int32_t day) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year is an integer.
+ // 2. Assert: month is an integer.
+ // 3. Assert: day is an integer.
+ // 4. Let date be the date given by year, month, and day.
+ // 5. Return date's ordinal date in the year according to ISO-8601.
+ // Note: In ISO 8601, Jan: month=1, Dec: month=12,
+ // In DateCache API, Jan: month=0, Dec: month=11 so we need to - 1 for month.
+ return day + isolate->date_cache()->DaysFromYearMonth(year, month - 1) -
+ isolate->date_cache()->DaysFromYearMonth(year, 0);
+}
+
+bool IsPlainDatePlainDateTimeOrPlainYearMonth(
+ Handle<Object> temporal_date_like) {
+ return temporal_date_like->IsJSTemporalPlainDate() ||
+ temporal_date_like->IsJSTemporalPlainDateTime() ||
+ temporal_date_like->IsJSTemporalPlainYearMonth();
+}
+
+// #sec-temporal-toisodayofweek
+int32_t ToISODayOfWeek(Isolate* isolate, int32_t year, int32_t month,
+ int32_t day) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year is an integer.
+ // 2. Assert: month is an integer.
+ // 3. Assert: day is an integer.
+ // 4. Let date be the date given by year, month, and day.
+ // 5. Return date's day of the week according to ISO-8601.
+ // Note: In ISO 8601, Jan: month=1, Dec: month=12.
+ // In DateCache API, Jan: month=0, Dec: month=11 so we need to - 1 for month.
+ // Weekday() expect "the number of days since the epoch" as input and the
+ // value of day is 1-based so we need to minus 1 to calculate "the number of
+ // days" because the number of days on the epoch (1970/1/1) should be 0,
+ // not 1.
+ int32_t weekday = isolate->date_cache()->Weekday(
+ isolate->date_cache()->DaysFromYearMonth(year, month - 1) + day - 1);
+ // Note: In ISO 8601, Sun: weekday=7 Mon: weekday=1
+ // In DateCache API, Sun: weekday=0 Mon: weekday=1
+ return weekday == 0 ? 7 : weekday;
+}
+
+// #sec-temporal-regulateisodate
+Maybe<bool> RegulateISODate(Isolate* isolate, ShowOverflow overflow,
+ int32_t year, int32_t* month, int32_t* day) {
+ TEMPORAL_ENTER_FUNC();
+
+ // 1. Assert: year, month, and day are integers.
+ // 2. Assert: overflow is either "constrain" or "reject".
+ switch (overflow) {
+ // 3. If overflow is "reject", then
+ case ShowOverflow::kReject:
+ // a. If ! IsValidISODate(year, month, day) is false, throw a RangeError
+ // exception.
+ if (!IsValidISODate(isolate, year, *month, *day)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(), Nothing<bool>());
+ }
+ // b. Return the Record { [[Year]]: year, [[Month]]: month, [[Day]]: day
+ // }.
+ return Just(true);
+ // 4. If overflow is "constrain", then
+ case ShowOverflow::kConstrain:
+ // a. Set month to ! ConstrainToRange(month, 1, 12).
+ *month = std::max(std::min(*month, 12), 1);
+ // b. Set day to ! ConstrainToRange(day, 1, ! ISODaysInMonth(year,
+ // month)).
+ *day = std::max(std::min(*day, ISODaysInMonth(isolate, year, *month)), 1);
+ // c. Return the Record { [[Year]]: year, [[Month]]: month, [[Day]]: day
+ // }.
+ return Just(true);
+ }
+}
+
+// #sec-temporal-resolveisomonth
+Maybe<int32_t> ResolveISOMonth(Isolate* isolate, Handle<JSReceiver> fields) {
+ Factory* factory = isolate->factory();
+ // 1. Let month be ? Get(fields, "month").
+ Handle<Object> month_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, month_obj,
+ Object::GetPropertyOrElement(isolate, fields, factory->month_string()),
+ Nothing<int32_t>());
+ // 2. Let monthCode be ? Get(fields, "monthCode").
+ Handle<Object> month_code_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, month_code_obj,
+ Object::GetPropertyOrElement(isolate, fields,
+ factory->monthCode_string()),
+ Nothing<int32_t>());
+ // 3. If monthCode is undefined, then
+ if (month_code_obj->IsUndefined(isolate)) {
+ // a. If month is undefined, throw a TypeError exception.
+ if (month_obj->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(), Nothing<int32_t>());
+ }
+ // b. Return month.
+ // Note: In Temporal spec, "month" in fields is always converted by
+ // ToPositiveInteger inside PrepareTemporalFields before calling
+ // ResolveISOMonth. Therefore the month_obj is always a positive integer.
+ DCHECK(month_obj->IsSmi() || month_obj->IsHeapNumber());
+ return Just(FastD2I(month_obj->Number()));
+ }
+ // 4. Assert: Type(monthCode) is String.
+ DCHECK(month_code_obj->IsString());
+ Handle<String> month_code;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, month_code,
+ Object::ToString(isolate, month_code_obj),
+ Nothing<int32_t>());
+ // 5. Let monthLength be the length of monthCode.
+ // 6. If monthLength is not 3, throw a RangeError exception.
+ if (month_code->length() != 3) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange,
+ factory->monthCode_string()),
+ Nothing<int32_t>());
+ }
+ // 7. Let numberPart be the substring of monthCode from 1.
+ // 8. Set numberPart to ! ToIntegerOrInfinity(numberPart).
+ // 9. If numberPart < 1 or numberPart > 12, throw a RangeError exception.
+ uint16_t m0 = month_code->Get(0);
+ uint16_t m1 = month_code->Get(1);
+ uint16_t m2 = month_code->Get(2);
+ if (!((m0 == 'M') && ((m1 == '0' && '1' <= m2 && m2 <= '9') ||
+ (m1 == '1' && '0' <= m2 && m2 <= '2')))) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange,
+ factory->monthCode_string()),
+ Nothing<int32_t>());
+ }
+ int32_t number_part =
+ 10 * static_cast<int32_t>(m1 - '0') + static_cast<int32_t>(m2 - '0');
+ // 10. If month is not undefined, and month ≠ numberPart, then
+ // 11. If ! SameValueNonNumeric(monthCode, ! BuildISOMonthCode(numberPart)) is
+ // false, then a. Throw a RangeError exception.
+ // Note: In Temporal spec, "month" in fields is always converted by
+ // ToPositiveInteger inside PrepareTemporalFields before calling
+ // ResolveISOMonth. Therefore the month_obj is always a positive integer.
+ if (!month_obj->IsUndefined() &&
+ FastD2I(month_obj->Number()) != number_part) {
+ // a. Throw a RangeError exception.
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange,
+ factory->month_string()),
+ Nothing<int32_t>());
+ }
+
+ // 12. Return numberPart.
+ return Just(number_part);
+}
+
+// #sec-temporal-isodatefromfields
+Maybe<bool> ISODateFromFields(Isolate* isolate, Handle<JSReceiver> fields,
+ Handle<JSReceiver> options,
+ const char* method_name, int32_t* year,
+ int32_t* month, int32_t* day) {
+ Factory* factory = isolate->factory();
+
+ // 1. Assert: Type(fields) is Object.
+ // 2. Let overflow be ? ToTemporalOverflow(options).
+ Maybe<ShowOverflow> maybe_overflow =
+ ToTemporalOverflow(isolate, options, method_name);
+ MAYBE_RETURN(maybe_overflow, Nothing<bool>());
+ // 3. Set fields to ? PrepareTemporalFields(fields, « "day", "month",
+ // "monthCode", "year" », «»).
+ Handle<FixedArray> field_names = factory->NewFixedArray(4);
+ field_names->set(0, ReadOnlyRoots(isolate).day_string());
+ field_names->set(1, ReadOnlyRoots(isolate).month_string());
+ field_names->set(2, ReadOnlyRoots(isolate).monthCode_string());
+ field_names->set(3, ReadOnlyRoots(isolate).year_string());
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, fields,
+ PrepareTemporalFields(isolate, fields, field_names,
+ RequiredFields::kNone),
+ Nothing<bool>());
+
+ // 4. Let year be ? Get(fields, "year").
+ Handle<Object> year_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, year_obj,
+ Object::GetPropertyOrElement(isolate, fields, factory->year_string()),
+ Nothing<bool>());
+ // 5. If year is undefined, throw a TypeError exception.
+ if (year_obj->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(),
+ Nothing<bool>());
+ }
+ // Note: "year" in fields is always converted by
+ // ToIntegerThrowOnInfinity inside the PrepareTemporalFields above.
+ // Therefore the year_obj is always an integer.
+ DCHECK(year_obj->IsSmi() || year_obj->IsHeapNumber());
+ *year = FastD2I(year_obj->Number());
+
+ // 6. Let month be ? ResolveISOMonth(fields).
+ Maybe<int32_t> maybe_month = ResolveISOMonth(isolate, fields);
+ MAYBE_RETURN(maybe_month, Nothing<bool>());
+ *month = maybe_month.FromJust();
+
+ // 7. Let day be ? Get(fields, "day").
+ Handle<Object> day_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, day_obj,
+ Object::GetPropertyOrElement(isolate, fields, factory->day_string()),
+ Nothing<bool>());
+ // 8. If day is undefined, throw a TypeError exception.
+ if (day_obj->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NEW_TEMPORAL_INVALD_ARG_TYPE_ERROR(),
+ Nothing<bool>());
+ }
+ // Note: "day" in fields is always converted by
+ // ToIntegerThrowOnInfinity inside the PrepareTemporalFields above.
+ // Therefore the day_obj is always an integer.
+ DCHECK(day_obj->IsSmi() || day_obj->IsHeapNumber());
+ *day = FastD2I(day_obj->Number());
+ // 9. Return ? RegulateISODate(year, month, day, overflow).
+ return RegulateISODate(isolate, maybe_overflow.FromJust(), *year, month, day);
+}
+
+} // namespace
+
+// #sec-temporal.calendar.prototype.daysinyear
+MaybeHandle<Smi> JSTemporalCalendar::DaysInYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. If Type(temporalDateLike) is not Object or temporalDateLike does not
+ // have an [[InitializedTemporalDate]], [[InitializedTemporalDateTime]] or
+ // [[InitializedTemporalYearMonth]] internal slot, then
+ if (!IsPlainDatePlainDateTimeOrPlainYearMonth(temporal_date_like)) {
+ // a. Set temporalDateLike to ? ToTemporalDate(temporalDateLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date_like,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.daysInYear"),
+ Smi);
+ }
+
+ // a. Let daysInYear be ! ISODaysInYear(temporalDateLike.[[ISOYear]]).
+ int32_t year;
+ if (temporal_date_like->IsJSTemporalPlainDate()) {
+ year = Handle<JSTemporalPlainDate>::cast(temporal_date_like)->iso_year();
+ } else if (temporal_date_like->IsJSTemporalPlainDateTime()) {
+ year =
+ Handle<JSTemporalPlainDateTime>::cast(temporal_date_like)->iso_year();
+ } else {
+ DCHECK(temporal_date_like->IsJSTemporalPlainYearMonth());
+ year =
+ Handle<JSTemporalPlainYearMonth>::cast(temporal_date_like)->iso_year();
+ }
+ int32_t days_in_year = ISODaysInYear(isolate, year);
+ // 6. Return 𝔽(daysInYear).
+ return handle(Smi::FromInt(days_in_year), isolate);
+}
+
+// #sec-temporal.calendar.prototype.daysinmonth
+MaybeHandle<Smi> JSTemporalCalendar::DaysInMonth(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1 Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. If Type(temporalDateLike) is not Object or temporalDateLike does not
+ // have an [[InitializedTemporalDate]], [[InitializedTemporalDateTime]] or
+ // [[InitializedTemporalYearMonth]] internal slot, then
+ if (!IsPlainDatePlainDateTimeOrPlainYearMonth(temporal_date_like)) {
+ // a. Set temporalDateLike to ? ToTemporalDate(temporalDateLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date_like,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.daysInMonth"),
+ Smi);
+ }
+
+ // 5. Return 𝔽(! ISODaysInMonth(temporalDateLike.[[ISOYear]],
+ // temporalDateLike.[[ISOMonth]])).
+ int32_t year;
+ int32_t month;
+ if (temporal_date_like->IsJSTemporalPlainDate()) {
+ year = Handle<JSTemporalPlainDate>::cast(temporal_date_like)->iso_year();
+ month = Handle<JSTemporalPlainDate>::cast(temporal_date_like)->iso_month();
+ } else if (temporal_date_like->IsJSTemporalPlainDateTime()) {
+ year =
+ Handle<JSTemporalPlainDateTime>::cast(temporal_date_like)->iso_year();
+ month =
+ Handle<JSTemporalPlainDateTime>::cast(temporal_date_like)->iso_month();
+ } else {
+ DCHECK(temporal_date_like->IsJSTemporalPlainYearMonth());
+ year =
+ Handle<JSTemporalPlainYearMonth>::cast(temporal_date_like)->iso_year();
+ month =
+ Handle<JSTemporalPlainYearMonth>::cast(temporal_date_like)->iso_month();
+ }
+ return handle(Smi::FromInt(ISODaysInMonth(isolate, year, month)), isolate);
+}
+
+// #sec-temporal.calendar.prototype.year
+MaybeHandle<Smi> JSTemporalCalendar::Year(Isolate* isolate,
+ Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. If Type(temporalDateLike) is not Object or temporalDateLike does not
+ // have an [[InitializedTemporalDate]], [[InitializedTemporalDateTime]],
+ // or [[InitializedTemporalYearMonth]]
+ // internal slot, then
+ if (!IsPlainDatePlainDateTimeOrPlainYearMonth(temporal_date_like)) {
+ // a. Set temporalDateLike to ? ToTemporalDate(temporalDateLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date_like,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.year"),
+ Smi);
+ }
+
+ // a. Let year be ! ISOYear(temporalDateLike).
+ int32_t year;
+ if (temporal_date_like->IsJSTemporalPlainDate()) {
+ year = Handle<JSTemporalPlainDate>::cast(temporal_date_like)->iso_year();
+ } else if (temporal_date_like->IsJSTemporalPlainDateTime()) {
+ year =
+ Handle<JSTemporalPlainDateTime>::cast(temporal_date_like)->iso_year();
+ } else {
+ DCHECK(temporal_date_like->IsJSTemporalPlainYearMonth());
+ year =
+ Handle<JSTemporalPlainYearMonth>::cast(temporal_date_like)->iso_year();
+ }
+
+ // 6. Return 𝔽(year).
+ return handle(Smi::FromInt(year), isolate);
+}
+
+// #sec-temporal.calendar.prototype.dayofyear
+MaybeHandle<Smi> JSTemporalCalendar::DayOfYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. Let temporalDate be ? ToTemporalDate(temporalDateLike).
+ Handle<JSTemporalPlainDate> temporal_date;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.dayOfYear"),
+ Smi);
+ // a. Let value be ! ToISODayOfYear(temporalDate.[[ISOYear]],
+ // temporalDate.[[ISOMonth]], temporalDate.[[ISODay]]).
+ int32_t value =
+ ToISODayOfYear(isolate, temporal_date->iso_year(),
+ temporal_date->iso_month(), temporal_date->iso_day());
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// #sec-temporal.calendar.prototype.dayofweek
+MaybeHandle<Smi> JSTemporalCalendar::DayOfWeek(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. Let temporalDate be ? ToTemporalDate(temporalDateLike).
+ Handle<JSTemporalPlainDate> temporal_date;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.dayOfWeek"),
+ Smi);
+ // a. Let value be ! ToISODayOfWeek(temporalDate.[[ISOYear]],
+ // temporalDate.[[ISOMonth]], temporalDate.[[ISODay]]).
+ int32_t value =
+ ToISODayOfWeek(isolate, temporal_date->iso_year(),
+ temporal_date->iso_month(), temporal_date->iso_day());
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// #sec-temporal.calendar.prototype.monthsinyear
+MaybeHandle<Smi> JSTemporalCalendar::MonthsInYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. If Type(temporalDateLike) is not Object or temporalDateLike does not
+ // have an [[InitializedTemporalDate]], [[InitializedTemporalDateTime]], or
+ // [[InitializedTemporalYearMonth]] internal slot, then
+ if (!IsPlainDatePlainDateTimeOrPlainYearMonth(temporal_date_like)) {
+ // a. Set temporalDateLike to ? ToTemporalDate(temporalDateLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date_like,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.monthsInYear"),
+ Smi);
+ }
+
+ // a. a. Let monthsInYear be 12.
+ int32_t months_in_year = 12;
+ // 6. Return 𝔽(monthsInYear).
+ return handle(Smi::FromInt(months_in_year), isolate);
+}
+
+// #sec-temporal.calendar.prototype.inleapyear
+MaybeHandle<Oddball> JSTemporalCalendar::InLeapYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. If Type(temporalDateLike) is not Object or temporalDateLike does not
+ // have an [[InitializedTemporalDate]], [[InitializedTemporalDateTime]], or
+ // [[InitializedTemporalYearMonth]] internal slot, then
+ if (!IsPlainDatePlainDateTimeOrPlainYearMonth(temporal_date_like)) {
+ // a. Set temporalDateLike to ? ToTemporalDate(temporalDateLike).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date_like,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.inLeapYear"),
+ Oddball);
+ }
+
+ // a. Let inLeapYear be ! IsISOLeapYear(temporalDateLike.[[ISOYear]]).
+ int32_t year;
+ if (temporal_date_like->IsJSTemporalPlainDate()) {
+ year = Handle<JSTemporalPlainDate>::cast(temporal_date_like)->iso_year();
+ } else if (temporal_date_like->IsJSTemporalPlainDateTime()) {
+ year =
+ Handle<JSTemporalPlainDateTime>::cast(temporal_date_like)->iso_year();
+ } else {
+ DCHECK(temporal_date_like->IsJSTemporalPlainYearMonth());
+ year =
+ Handle<JSTemporalPlainYearMonth>::cast(temporal_date_like)->iso_year();
+ }
+ return isolate->factory()->ToBoolean(IsISOLeapYear(isolate, year));
+}
+
+// #sec-temporal.calendar.prototype.daysinweek
+MaybeHandle<Smi> JSTemporalCalendar::DaysInWeek(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. Perform ? ToTemporalDate(temporalDateLike).
+ Handle<JSTemporalPlainDate> date;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date,
+ ToTemporalDate(isolate, temporal_date_like,
+ isolate->factory()->NewJSObjectWithNullProto(),
+ "Temporal.Calendar.prototype.daysInWeek"),
+ Smi);
+ // 5. Return 7𝔽.
+ return handle(Smi::FromInt(7), isolate);
+}
+
+// #sec-temporal.calendar.prototype.datefromfields
+MaybeHandle<JSTemporalPlainDate> JSTemporalCalendar::DateFromFields(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> fields_obj, Handle<Object> options_obj) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. If Type(fields) is not Object, throw a TypeError exception.
+ const char* method_name = "Temporal.Calendar.prototype.dateFromFields";
+ if (!fields_obj->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)),
+ JSTemporalPlainDate);
+ }
+ Handle<JSReceiver> fields = Handle<JSReceiver>::cast(fields_obj);
+
+ // 5. Set options to ? GetOptionsObject(options).
+ Handle<JSReceiver> options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options, GetOptionsObject(isolate, options_obj, method_name),
+ JSTemporalPlainDate);
+ if (calendar->calendar_index() == 0) {
+ int32_t year;
+ int32_t month;
+ int32_t day;
+ // 6. Let result be ? ISODateFromFields(fields, options).
+ Maybe<bool> maybe_result = ISODateFromFields(
+ isolate, fields, options, method_name, &year, &month, &day);
+ MAYBE_RETURN(maybe_result, Handle<JSTemporalPlainDate>());
+ DCHECK(maybe_result.FromJust());
+ // 7. Return ? CreateTemporalDate(result.[[Year]], result.[[Month]],
+ // result.[[Day]], calendar).
+ return CreateTemporalDate(isolate, year, month, day, calendar);
+ }
+ // TODO(ftang) add intl implementation inside #ifdef V8_INTL_SUPPORT
+ UNREACHABLE();
+}
+
+// #sec-temporal.calendar.prototype.mergefields
+MaybeHandle<JSReceiver> JSTemporalCalendar::MergeFields(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> fields_obj, Handle<Object> additional_fields_obj) {
+ // 1. Let calendar be the this value.
+ // 2. Perform ? RequireInternalSlot(calendar,
+ // [[InitializedTemporalCalendar]]).
+ // 3. Assert: calendar.[[Identifier]] is "iso8601".
+ // 4. Set fields to ? ToObject(fields).
+ Handle<JSReceiver> fields;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, fields,
+ Object::ToObject(isolate, fields_obj), JSReceiver);
+
+ // 5. Set additionalFields to ? ToObject(additionalFields).
+ Handle<JSReceiver> additional_fields;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, additional_fields,
+ Object::ToObject(isolate, additional_fields_obj),
+ JSReceiver);
+ // 5. If calendar.[[Identifier]] is "iso8601", then
+ if (calendar->calendar_index() == 0) {
+ // a. Return ? DefaultMergeFields(fields, additionalFields).
+ return DefaultMergeFields(isolate, fields, additional_fields);
+ }
+#ifdef V8_INTL_SUPPORT
+ // TODO(ftang) add Intl code.
+#endif // V8_INTL_SUPPORT
+ UNREACHABLE();
+}
+
// #sec-temporal.calendar.prototype.tostring
MaybeHandle<String> JSTemporalCalendar::ToString(
Isolate* isolate, Handle<JSTemporalCalendar> calendar,
@@ -4909,6 +6078,36 @@ MaybeHandle<JSTemporalPlainTime> JSTemporalPlainTime::NowISO(
date_time->iso_microsecond(), date_time->iso_nanosecond());
}
+// #sec-temporal.plaintime.from
+MaybeHandle<JSTemporalPlainTime> JSTemporalPlainTime::From(
+ Isolate* isolate, Handle<Object> item_obj, Handle<Object> options_obj) {
+ const char* method_name = "Temporal.PlainTime.from";
+ // 1. Set options to ? GetOptionsObject(options).
+ Handle<JSReceiver> options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options, GetOptionsObject(isolate, options_obj, method_name),
+ JSTemporalPlainTime);
+ // 2. Let overflow be ? ToTemporalOverflow(options).
+ Maybe<ShowOverflow> maybe_overflow =
+ ToTemporalOverflow(isolate, options, method_name);
+ MAYBE_RETURN(maybe_overflow, Handle<JSTemporalPlainTime>());
+ ShowOverflow overflow = maybe_overflow.FromJust();
+ // 3. If Type(item) is Object and item has an [[InitializedTemporalTime]]
+ // internal slot, then
+ if (item_obj->IsJSTemporalPlainTime()) {
+ // a. Return ? CreateTemporalTime(item.[[ISOHour]], item.[[ISOMinute]],
+ // item.[[ISOSecond]], item.[[ISOMillisecond]], item.[[ISOMicrosecond]],
+ // item.[[ISONanosecond]]).
+ Handle<JSTemporalPlainTime> item =
+ Handle<JSTemporalPlainTime>::cast(item_obj);
+ return CreateTemporalTime(isolate, item->iso_hour(), item->iso_minute(),
+ item->iso_second(), item->iso_millisecond(),
+ item->iso_microsecond(), item->iso_nanosecond());
+ }
+ // 4. Return ? ToTemporalTime(item, overflow).
+ return temporal::ToTemporalTime(isolate, item_obj, overflow, method_name);
+}
+
// #sec-temporal.plaintime.prototype.getisofields
MaybeHandle<JSReceiver> JSTemporalPlainTime::GetISOFields(
Isolate* isolate, Handle<JSTemporalPlainTime> temporal_time) {
@@ -5040,13 +6239,83 @@ MaybeHandle<JSTemporalZonedDateTime> JSTemporalZonedDateTime::WithTimeZone(
// 4. Return ? CreateTemporalZonedDateTime(zonedDateTime.[[Nanoseconds]],
// timeZone, zonedDateTime.[[Calendar]]).
- Handle<BigInt> nanoseconds =
- Handle<BigInt>(zoned_date_time->nanoseconds(), isolate);
- Handle<JSReceiver> calendar =
- Handle<JSReceiver>(zoned_date_time->calendar(), isolate);
+ Handle<BigInt> nanoseconds = handle(zoned_date_time->nanoseconds(), isolate);
+ Handle<JSReceiver> calendar = handle(zoned_date_time->calendar(), isolate);
return CreateTemporalZonedDateTime(isolate, nanoseconds, time_zone, calendar);
}
+// Common code shared by ZonedDateTime.prototype.toPlainYearMonth and
+// toPlainMonthDay
+template <typename T,
+ MaybeHandle<T> (*from_fields_func)(
+ Isolate*, Handle<JSReceiver>, Handle<JSReceiver>, Handle<Object>)>
+MaybeHandle<T> ZonedDateTimeToPlainYearMonthOrMonthDay(
+ Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time,
+ Handle<String> field_name_1, Handle<String> field_name_2,
+ const char* method_name) {
+ TEMPORAL_ENTER_FUNC();
+ Factory* factory = isolate->factory();
+ // 1. Let zonedDateTime be the this value.
+ // 2. Perform ? RequireInternalSlot(zonedDateTime,
+ // [[InitializedTemporalZonedDateTime]]).
+ // 3. Let timeZone be zonedDateTime.[[TimeZone]].
+ Handle<JSReceiver> time_zone = handle(zoned_date_time->time_zone(), isolate);
+ // 4. Let instant be ! CreateTemporalInstant(zonedDateTime.[[Nanoseconds]]).
+ Handle<JSTemporalInstant> instant;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, instant,
+ temporal::CreateTemporalInstant(
+ isolate, Handle<BigInt>(zoned_date_time->nanoseconds(), isolate)),
+ T);
+ // 5. Let calendar be zonedDateTime.[[Calendar]].
+ Handle<JSReceiver> calendar = handle(zoned_date_time->calendar(), isolate);
+ // 6. Let temporalDateTime be ?
+ // temporal::BuiltinTimeZoneGetPlainDateTimeFor(timeZone, instant, calendar).
+ Handle<JSTemporalPlainDateTime> temporal_date_time;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, temporal_date_time,
+ temporal::BuiltinTimeZoneGetPlainDateTimeFor(isolate, time_zone, instant,
+ calendar, method_name),
+ T);
+ // 7. Let fieldNames be ? CalendarFields(calendar, « field_name_1,
+ // field_name_2 »).
+ Handle<FixedArray> field_names = factory->NewFixedArray(2);
+ field_names->set(0, *field_name_1);
+ field_names->set(1, *field_name_2);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, field_names,
+ CalendarFields(isolate, calendar, field_names), T);
+ // 8. Let fields be ? PrepareTemporalFields(temporalDateTime, fieldNames, «»).
+ Handle<JSReceiver> fields;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, fields,
+ PrepareTemporalFields(isolate, temporal_date_time, field_names,
+ RequiredFields::kNone),
+ T);
+ // 9. Return ? XxxFromFields(calendar, fields).
+ return from_fields_func(isolate, calendar, fields,
+ factory->undefined_value());
+}
+
+// #sec-temporal.zoneddatetime.prototype.toplainyearmonth
+MaybeHandle<JSTemporalPlainYearMonth> JSTemporalZonedDateTime::ToPlainYearMonth(
+ Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time) {
+ return ZonedDateTimeToPlainYearMonthOrMonthDay<JSTemporalPlainYearMonth,
+ YearMonthFromFields>(
+ isolate, zoned_date_time, isolate->factory()->monthCode_string(),
+ isolate->factory()->year_string(),
+ "Temporal.ZonedDateTime.prototype.toPlainYearMonth");
+}
+
+// #sec-temporal.zoneddatetime.prototype.toplainmonthday
+MaybeHandle<JSTemporalPlainMonthDay> JSTemporalZonedDateTime::ToPlainMonthDay(
+ Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time) {
+ return ZonedDateTimeToPlainYearMonthOrMonthDay<JSTemporalPlainMonthDay,
+ MonthDayFromFields>(
+ isolate, zoned_date_time, isolate->factory()->day_string(),
+ isolate->factory()->monthCode_string(),
+ "Temporal.ZonedDateTime.prototype.toPlainMonthDay");
+}
+
// #sec-temporal.now.zoneddatetime
MaybeHandle<JSTemporalZonedDateTime> JSTemporalZonedDateTime::Now(
Isolate* isolate, Handle<Object> calendar_like,
@@ -5192,5 +6461,108 @@ MaybeHandle<JSTemporalInstant> JSTemporalInstant::Constructor(
epoch_nanoseconds);
}
+namespace {
+
+// The logic in Temporal.Instant.fromEpochSeconds and fromEpochMilliseconds,
+// are the same except a scaling factor, code all of them into the follow
+// function.
+MaybeHandle<JSTemporalInstant> ScaleNumberToNanosecondsVerifyAndMake(
+ Isolate* isolate, Handle<BigInt> bigint, uint32_t scale) {
+ TEMPORAL_ENTER_FUNC();
+ DCHECK(scale == 1 || scale == 1000 || scale == 1000000 ||
+ scale == 1000000000);
+ // 2. Let epochNanoseconds be epochXseconds × scaleℤ.
+ Handle<BigInt> epoch_nanoseconds;
+ if (scale == 1) {
+ epoch_nanoseconds = bigint;
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, epoch_nanoseconds,
+ BigInt::Multiply(isolate, BigInt::FromUint64(isolate, scale), bigint),
+ JSTemporalInstant);
+ }
+ // 3. If ! IsValidEpochNanoseconds(epochNanoseconds) is false, throw a
+ // RangeError exception.
+ if (!IsValidEpochNanoseconds(isolate, epoch_nanoseconds)) {
+ THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALD_ARG_RANGE_ERROR(),
+ JSTemporalInstant);
+ }
+ return temporal::CreateTemporalInstant(isolate, epoch_nanoseconds);
+}
+
+MaybeHandle<JSTemporalInstant> ScaleNumberToNanosecondsVerifyAndMake(
+ Isolate* isolate, Handle<Object> epoch_Xseconds, uint32_t scale) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Set epochXseconds to ? ToNumber(epochXseconds).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, epoch_Xseconds,
+ Object::ToNumber(isolate, epoch_Xseconds),
+ JSTemporalInstant);
+ // 2. Set epochMilliseconds to ? NumberToBigInt(epochMilliseconds).
+ Handle<BigInt> bigint;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, bigint,
+ BigInt::FromNumber(isolate, epoch_Xseconds),
+ JSTemporalInstant);
+ return ScaleNumberToNanosecondsVerifyAndMake(isolate, bigint, scale);
+}
+
+MaybeHandle<JSTemporalInstant> ScaleToNanosecondsVerifyAndMake(
+ Isolate* isolate, Handle<Object> epoch_Xseconds, uint32_t scale) {
+ TEMPORAL_ENTER_FUNC();
+ // 1. Set epochMicroseconds to ? ToBigInt(epochMicroseconds).
+ Handle<BigInt> bigint;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, bigint,
+ BigInt::FromObject(isolate, epoch_Xseconds),
+ JSTemporalInstant);
+ return ScaleNumberToNanosecondsVerifyAndMake(isolate, bigint, scale);
+}
+
+} // namespace
+
+// #sec-temporal.instant.fromepochseconds
+MaybeHandle<JSTemporalInstant> JSTemporalInstant::FromEpochSeconds(
+ Isolate* isolate, Handle<Object> epoch_seconds) {
+ TEMPORAL_ENTER_FUNC();
+ return ScaleNumberToNanosecondsVerifyAndMake(isolate, epoch_seconds,
+ 1000000000);
+}
+
+// #sec-temporal.instant.fromepochmilliseconds
+MaybeHandle<JSTemporalInstant> JSTemporalInstant::FromEpochMilliseconds(
+ Isolate* isolate, Handle<Object> epoch_milliseconds) {
+ TEMPORAL_ENTER_FUNC();
+ return ScaleNumberToNanosecondsVerifyAndMake(isolate, epoch_milliseconds,
+ 1000000);
+}
+
+// #sec-temporal.instant.fromepochmicroseconds
+MaybeHandle<JSTemporalInstant> JSTemporalInstant::FromEpochMicroseconds(
+ Isolate* isolate, Handle<Object> epoch_microseconds) {
+ TEMPORAL_ENTER_FUNC();
+ return ScaleToNanosecondsVerifyAndMake(isolate, epoch_microseconds, 1000);
+}
+
+// #sec-temporal.instant.fromepochnanoeconds
+MaybeHandle<JSTemporalInstant> JSTemporalInstant::FromEpochNanoseconds(
+ Isolate* isolate, Handle<Object> epoch_nanoseconds) {
+ TEMPORAL_ENTER_FUNC();
+ return ScaleToNanosecondsVerifyAndMake(isolate, epoch_nanoseconds, 1);
+}
+
+// #sec-temporal.instant.from
+MaybeHandle<JSTemporalInstant> JSTemporalInstant::From(Isolate* isolate,
+ Handle<Object> item) {
+ TEMPORAL_ENTER_FUNC();
+ const char* method_name = "Temporal.Instant.from";
+ // 1. If Type(item) is Object and item has an [[InitializedTemporalInstant]]
+ // internal slot, then
+ if (item->IsJSTemporalInstant()) {
+ // a. Return ? CreateTemporalInstant(item.[[Nanoseconds]]).
+ return temporal::CreateTemporalInstant(
+ isolate, handle(JSTemporalInstant::cast(*item).nanoseconds(), isolate));
+ }
+ // 2. Return ? ToTemporalInstant(item).
+ return ToTemporalInstant(isolate, item, method_name);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-temporal-objects.h b/deps/v8/src/objects/js-temporal-objects.h
index 8650584e15..d2524df293 100644
--- a/deps/v8/src/objects/js-temporal-objects.h
+++ b/deps/v8/src/objects/js-temporal-objects.h
@@ -52,6 +52,56 @@ class JSTemporalCalendar
Isolate* isolate, Handle<JSFunction> target,
Handle<HeapObject> new_target, Handle<Object> identifier);
+ // #sec-temporal.calendar.prototype.year
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> Year(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.daysinyear
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> DaysInYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.dayofweek
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> DayOfWeek(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.dayofyear
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> DayOfYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.monthsinyear
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> MonthsInYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.inleapyear
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Oddball> InLeapYear(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.daysinmonth
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> DaysInMonth(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.daysinweek
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Smi> DaysInWeek(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> temporal_date_like);
+
+ // #sec-temporal.calendar.prototype.datefromfields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainDate> DateFromFields(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> fields, Handle<Object> options);
+
+ // #sec-temporal.calendar.prototype.mergefields
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> MergeFields(
+ Isolate* isolate, Handle<JSTemporalCalendar> calendar,
+ Handle<Object> fields, Handle<Object> additional_fields);
+
// #sec-temporal.calendar.prototype.tostring
static MaybeHandle<String> ToString(Isolate* isolate,
Handle<JSTemporalCalendar> calendar,
@@ -86,6 +136,14 @@ class JSTemporalDuration
V8_WARN_UNUSED_RESULT static MaybeHandle<Oddball> Blank(
Isolate* isolate, Handle<JSTemporalDuration> duration);
+ // #sec-temporal.duration.prototype.negated
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalDuration> Negated(
+ Isolate* isolate, Handle<JSTemporalDuration> duration);
+
+ // #sec-temporal.duration.prototype.abs
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalDuration> Abs(
+ Isolate* isolate, Handle<JSTemporalDuration> duration);
+
DECL_PRINTER(JSTemporalDuration)
TQ_OBJECT_CONSTRUCTORS(JSTemporalDuration)
@@ -103,6 +161,23 @@ class JSTemporalInstant
V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant> Now(
Isolate* isolate);
+ // #sec-temporal.instant.fromepochseconds
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant> FromEpochSeconds(
+ Isolate* isolate, Handle<Object> epoch_seconds);
+ // #sec-temporal.instant.fromepochmilliseconds
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant>
+ FromEpochMilliseconds(Isolate* isolate, Handle<Object> epoch_milliseconds);
+ // #sec-temporal.instant.fromepochmicroseconds
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant>
+ FromEpochMicroseconds(Isolate* isolate, Handle<Object> epoch_microseconds);
+ // #sec-temporal.instant.fromepochnanoeconds
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant>
+ FromEpochNanoseconds(Isolate* isolate, Handle<Object> epoch_nanoseconds);
+
+ // #sec-temporal.instant.from
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalInstant> From(
+ Isolate* isolate, Handle<Object> item);
+
DECL_PRINTER(JSTemporalInstant)
TQ_OBJECT_CONSTRUCTORS(JSTemporalInstant)
@@ -225,6 +300,10 @@ class JSTemporalPlainTime
Handle<Object> second, Handle<Object> millisecond,
Handle<Object> microsecond, Handle<Object> nanosecond);
+ // #sec-temporal.plaintime.from
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainTime> From(
+ Isolate* isolate, Handle<Object> item, Handle<Object> options);
+
// #sec-temporal.plaintime.prototype.getisofields
V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
Isolate* isolate, Handle<JSTemporalPlainTime> plain_time);
@@ -332,6 +411,16 @@ class JSTemporalZonedDateTime
V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetISOFields(
Isolate* isolate, Handle<JSTemporalZonedDateTime> zoned_date_time);
+ // #sec-temporal.zoneddatetime.prototype.toplainyearmonth
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainYearMonth>
+ ToPlainYearMonth(Isolate* isolate,
+ Handle<JSTemporalZonedDateTime> zoned_date_time);
+
+ // #sec-temporal.zoneddatetime.prototype.toplainmonthday
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalPlainMonthDay>
+ ToPlainMonthDay(Isolate* isolate,
+ Handle<JSTemporalZonedDateTime> zoned_date_time);
+
// #sec-temporal.now.zoneddatetime
V8_WARN_UNUSED_RESULT static MaybeHandle<JSTemporalZonedDateTime> Now(
Isolate* isolate, Handle<Object> calendar_like,
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 790b9c0e08..81f83302e7 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -475,7 +475,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement(*holder)) {
- DCHECK(!holder_obj->HasTypedArrayElements(isolate_));
+ DCHECK(!holder_obj->HasTypedArrayOrRabGsabTypedArrayElements(isolate_));
DCHECK(attributes != NONE || !holder_obj->HasFastElements(isolate_));
Handle<FixedArrayBase> elements(holder_obj->elements(isolate_), isolate());
holder_obj->GetElementsAccessor(isolate_)->Reconfigure(
@@ -559,13 +559,15 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
#endif
}
-// Can only be called when the receiver is a JSObject. JSProxy has to be handled
-// via a trap. Adding properties to primitive values is not observable.
+// Can only be called when the receiver is a JSObject, or when the name is a
+// private field, otherwise JSProxy has to be handled via a trap.
+// Adding properties to primitive values is not observable.
void LookupIterator::PrepareTransitionToDataProperty(
Handle<JSReceiver> receiver, Handle<Object> value,
PropertyAttributes attributes, StoreOrigin store_origin) {
DCHECK_IMPLIES(receiver->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
- DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
+ DCHECK_IMPLIES(!receiver.is_identical_to(GetStoreTarget<JSReceiver>()),
+ name()->IsPrivateName());
if (state_ == TRANSITION) return;
if (!IsElement() && name()->IsPrivate(isolate_)) {
@@ -624,7 +626,8 @@ void LookupIterator::ApplyTransitionToDataProperty(
Handle<JSReceiver> receiver) {
DCHECK_EQ(TRANSITION, state_);
- DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
+ DCHECK_IMPLIES(!receiver.is_identical_to(GetStoreTarget<JSReceiver>()),
+ name()->IsPrivateName());
holder_ = receiver;
if (receiver->IsJSGlobalObject(isolate_)) {
JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
@@ -1137,6 +1140,20 @@ void LookupIterator::WriteDataValue(Handle<Object> value, SeqCstAccessTag tag) {
holder->FastPropertyAtPut(field_index, *value, tag);
}
+Handle<Object> LookupIterator::SwapDataValue(Handle<Object> value,
+ SeqCstAccessTag tag) {
+ DCHECK_EQ(DATA, state_);
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
+ DCHECK_EQ(PropertyKind::kData, property_details_.kind());
+ // Currently only shared structs support sequentially consistent access.
+ Handle<JSSharedStruct> holder = GetHolder<JSSharedStruct>();
+ DisallowGarbageCollection no_gc;
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
+ return handle(holder->RawFastPropertyAtSwap(field_index, *value, tag),
+ isolate_);
+}
+
#if V8_ENABLE_WEBASSEMBLY
wasm::ValueType LookupIterator::wasm_value_type() const {
@@ -1221,7 +1238,7 @@ bool HasInterceptor(Map map, size_t index) {
if (index > JSObject::kMaxElementIndex) {
// There is currently no way to install interceptors on an object with
// typed array elements.
- DCHECK(!map.has_typed_array_elements());
+ DCHECK(!map.has_typed_array_or_rab_gsab_typed_array_elements());
return map.has_named_interceptor();
}
return map.has_indexed_interceptor();
@@ -1371,7 +1388,7 @@ Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
if (!access_check_info.is_null()) {
// There is currently no way to create objects with typed array elements
// and access checks.
- DCHECK(!holder_->map().has_typed_array_elements());
+ DCHECK(!holder_->map().has_typed_array_or_rab_gsab_typed_array_elements());
Object interceptor = is_js_array_element(IsElement())
? access_check_info.indexed_interceptor()
: access_check_info.named_interceptor();
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index fb62f407eb..dd880343a7 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -190,6 +190,7 @@ class V8_EXPORT_PRIVATE LookupIterator final {
void WriteDataValue(Handle<Object> value, bool initializing_store);
Handle<Object> GetDataValue(SeqCstAccessTag tag) const;
void WriteDataValue(Handle<Object> value, SeqCstAccessTag tag);
+ Handle<Object> SwapDataValue(Handle<Object> value, SeqCstAccessTag tag);
inline void UpdateProtector();
static inline void UpdateProtector(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name);
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index ac31be483d..f77f5f6339 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -191,8 +191,8 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
return external > limit || counts.GetTotal() > kMaxNumberOfDescriptors;
} else {
int limit = std::max({kFastPropertiesSoftLimit, GetInObjectProperties()});
- int external = NumberOfFields(ConcurrencyMode::kNotConcurrent) -
- GetInObjectProperties();
+ int external =
+ NumberOfFields(ConcurrencyMode::kSynchronous) - GetInObjectProperties();
return external > limit;
}
}
@@ -590,14 +590,6 @@ bool Map::has_fast_string_wrapper_elements() const {
return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
}
-bool Map::has_typed_array_elements() const {
- return IsTypedArrayElementsKind(elements_kind());
-}
-
-bool Map::has_rab_gsab_typed_array_elements() const {
- return IsRabGsabTypedArrayElementsKind(elements_kind());
-}
-
bool Map::has_typed_array_or_rab_gsab_typed_array_elements() const {
return IsTypedArrayOrRabGsabTypedArrayElementsKind(elements_kind());
}
@@ -768,8 +760,7 @@ void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
// static
Map Map::ElementsTransitionMap(Isolate* isolate, ConcurrencyMode cmode) {
- return TransitionsAccessor(isolate, *this,
- cmode == ConcurrencyMode::kConcurrent)
+ return TransitionsAccessor(isolate, *this, IsConcurrent(cmode))
.SearchSpecial(ReadOnlyRoots(isolate).elements_transition_symbol());
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 34de7eb21e..67bc5f57ec 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -297,14 +297,13 @@ struct IntegrityLevelTransitionInfo {
IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
Map map, Isolate* isolate, DisallowGarbageCollection* no_gc,
ConcurrencyMode cmode) {
- const bool is_concurrent = cmode == ConcurrencyMode::kConcurrent;
IntegrityLevelTransitionInfo info(map);
// Figure out the most restrictive integrity level transition (it should
// be the last one in the transition tree).
DCHECK(!map.is_extensible());
Map previous = Map::cast(map.GetBackPointer(isolate));
- TransitionsAccessor last_transitions(isolate, previous, is_concurrent);
+ TransitionsAccessor last_transitions(isolate, previous, IsConcurrent(cmode));
if (!last_transitions.HasIntegrityLevelTransitionTo(
map, &info.integrity_level_symbol, &info.integrity_level)) {
// The last transition was not integrity level transition - just bail out.
@@ -322,7 +321,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// with integrity level transitions, just bail out.
while (!source_map.is_extensible()) {
previous = Map::cast(source_map.GetBackPointer(isolate));
- TransitionsAccessor transitions(isolate, previous, is_concurrent);
+ TransitionsAccessor transitions(isolate, previous, IsConcurrent(cmode));
if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
return info;
}
@@ -390,8 +389,7 @@ base::Optional<Map> MapUpdater::TryUpdateNoLock(Isolate* isolate, Map old_map,
if (info.has_integrity_level_transition) {
// Now replay the integrity level transition.
- result = TransitionsAccessor(isolate, result,
- cmode == ConcurrencyMode::kConcurrent)
+ result = TransitionsAccessor(isolate, result, IsConcurrent(cmode))
.SearchSpecial(info.integrity_level_symbol);
}
if (result.is_null()) return {};
@@ -571,7 +569,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
}
if (!old_map_->EquivalentToForTransition(*root_map_,
- ConcurrencyMode::kNotConcurrent)) {
+ ConcurrencyMode::kSynchronous)) {
return Normalize("Normalize_NotEquivalent");
} else if (old_map_->is_extensible() != root_map_->is_extensible()) {
DCHECK(!old_map_->is_extensible());
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 6aaccd72cc..7336fe8574 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -172,6 +172,9 @@ VisitorId Map::GetVisitorId(Map map) {
case FEEDBACK_METADATA_TYPE:
return kVisitFeedbackMetadata;
+ case ODDBALL_TYPE:
+ return kVisitOddball;
+
case MAP_TYPE:
return kVisitMap;
@@ -348,6 +351,9 @@ VisitorId Map::GetVisitorId(Map map) {
#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ if (instance_type == PROMISE_ON_STACK_TYPE) {
+ return kVisitPromiseOnStack;
+ }
if (instance_type == PROTOTYPE_INFO_TYPE) {
return kVisitPrototypeInfo;
}
@@ -496,10 +502,10 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
if (target_number_of_fields != *old_number_of_fields) return true;
// If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray old_desc = cmode == ConcurrencyMode::kConcurrent
+ DescriptorArray old_desc = IsConcurrent(cmode)
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
- DescriptorArray new_desc = cmode == ConcurrencyMode::kConcurrent
+ DescriptorArray new_desc = IsConcurrent(cmode)
? target.instance_descriptors(kAcquireLoad)
: target.instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
@@ -525,7 +531,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
}
int Map::NumberOfFields(ConcurrencyMode cmode) const {
- DescriptorArray descriptors = cmode == ConcurrencyMode::kConcurrent
+ DescriptorArray descriptors = IsConcurrent(cmode)
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
int result = 0;
@@ -558,7 +564,7 @@ Map::FieldCounts Map::GetFieldCounts() const {
bool Map::HasOutOfObjectProperties() const {
return GetInObjectProperties() <
- NumberOfFields(ConcurrencyMode::kNotConcurrent);
+ NumberOfFields(ConcurrencyMode::kSynchronous);
}
void Map::DeprecateTransitionTree(Isolate* isolate) {
@@ -675,7 +681,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
}
SLOW_DCHECK(MapUpdater::TryUpdateNoLock(
- isolate, old_map, ConcurrencyMode::kNotConcurrent) == target);
+ isolate, old_map, ConcurrencyMode::kSynchronous) == target);
return target;
}
} // namespace
@@ -695,7 +701,7 @@ MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
}
base::Optional<Map> new_map = MapUpdater::TryUpdateNoLock(
- isolate, *old_map, ConcurrencyMode::kNotConcurrent);
+ isolate, *old_map, ConcurrencyMode::kSynchronous);
if (!new_map.has_value()) return MaybeHandle<Map>();
if (FLAG_fast_map_update) {
TransitionsAccessor::SetMigrationTarget(isolate, old_map, new_map.value());
@@ -707,7 +713,6 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
ConcurrencyMode cmode) {
DisallowGarbageCollection no_gc;
- const bool is_concurrent = cmode == ConcurrencyMode::kConcurrent;
const int root_nof = NumberOfOwnDescriptors();
const int old_nof = old_map.NumberOfOwnDescriptors();
// TODO(jgruber,chromium:1239009): The main thread should use non-atomic
@@ -720,7 +725,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
Map transition =
- TransitionsAccessor(isolate, new_map, is_concurrent)
+ TransitionsAccessor(isolate, new_map, IsConcurrent(cmode))
.SearchTransition(old_descriptors.GetKey(i), old_details.kind(),
old_details.attributes());
if (transition.is_null()) return Map();
@@ -1064,7 +1069,7 @@ Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
ElementsKind kind) {
Handle<Map> closest_map(
FindClosestElementsTransition(isolate, *map, kind,
- ConcurrencyMode::kNotConcurrent),
+ ConcurrencyMode::kSynchronous),
isolate);
if (closest_map->elements_kind() == kind) {
@@ -1340,7 +1345,7 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
result->set_owns_descriptors(false);
result->UpdateDescriptors(isolate, descriptors, number_of_own_descriptors);
- DCHECK_EQ(result->NumberOfFields(ConcurrencyMode::kNotConcurrent),
+ DCHECK_EQ(result->NumberOfFields(ConcurrencyMode::kSynchronous),
result->GetInObjectProperties() - result->UnusedPropertyFields());
}
@@ -1550,7 +1555,7 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
map->NumberOfOwnDescriptors());
maybe_elements_transition_map =
- map->ElementsTransitionMap(isolate, ConcurrencyMode::kNotConcurrent);
+ map->ElementsTransitionMap(isolate, ConcurrencyMode::kSynchronous);
DCHECK(
maybe_elements_transition_map.is_null() ||
(maybe_elements_transition_map.elements_kind() == DICTIONARY_ELEMENTS &&
@@ -1696,7 +1701,7 @@ Handle<Map> Map::CopyForPreventExtensions(
CopyReplaceDescriptors(isolate, map, new_desc, flag, transition_marker,
reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
- if (!IsTypedArrayElementsKind(map->elements_kind())) {
+ if (!IsTypedArrayOrRabGsabTypedArrayElementsKind(map->elements_kind())) {
ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
? SLOW_STRING_WRAPPER_ELEMENTS
: DICTIONARY_ELEMENTS;
@@ -1872,8 +1877,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
!JSFunction::cast(*maybe_constructor).shared().native()) {
Handle<JSFunction> constructor =
Handle<JSFunction>::cast(maybe_constructor);
- DCHECK_NE(*constructor,
- constructor->context().native_context().object_function());
+ DCHECK_NE(*constructor, constructor->native_context().object_function());
Handle<Map> initial_map(constructor->initial_map(), isolate);
result = Map::Normalize(isolate, initial_map, CLEAR_INOBJECT_PROPERTIES,
reason);
@@ -2120,13 +2124,12 @@ bool Map::EquivalentToForTransition(const Map other,
// not equivalent to strict function.
int nof =
std::min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
- DescriptorArray this_descriptors = cmode == ConcurrencyMode::kConcurrent
+ DescriptorArray this_descriptors = IsConcurrent(cmode)
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
DescriptorArray that_descriptors =
- cmode == ConcurrencyMode::kConcurrent
- ? other.instance_descriptors(kAcquireLoad)
- : other.instance_descriptors();
+ IsConcurrent(cmode) ? other.instance_descriptors(kAcquireLoad)
+ : other.instance_descriptors();
return this_descriptors.IsEqualUpTo(that_descriptors, nof);
}
return true;
@@ -2139,7 +2142,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other,
// Ensure that we don't try to generate elements kind transitions from maps
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
- DescriptorArray descriptors = cmode == ConcurrencyMode::kConcurrent
+ DescriptorArray descriptors = IsConcurrent(cmode)
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 1e95302283..cc38d4e694 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -56,7 +56,9 @@ enum InstanceType : uint16_t;
V(JSWeakCollection) \
V(Map) \
V(NativeContext) \
+ V(Oddball) \
V(PreparseData) \
+ V(PromiseOnStack) \
V(PropertyArray) \
V(PropertyCell) \
V(PrototypeInfo) \
@@ -417,8 +419,6 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
inline bool has_sloppy_arguments_elements() const;
inline bool has_fast_sloppy_arguments_elements() const;
inline bool has_fast_string_wrapper_elements() const;
- inline bool has_typed_array_elements() const;
- inline bool has_rab_gsab_typed_array_elements() const;
inline bool has_typed_array_or_rab_gsab_typed_array_elements() const;
inline bool has_any_typed_array_or_wasm_array_elements() const;
inline bool has_dictionary_elements() const;
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 5e9349d401..5cb4e93daa 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -376,8 +376,16 @@ MaybeHandle<Object> JSModuleNamespace::GetExport(Isolate* isolate,
Handle<Object> value(Cell::cast(*object).value(), isolate);
if (value->IsTheHole(isolate)) {
- THROW_NEW_ERROR(
- isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
+ // According to https://tc39.es/ecma262/#sec-InnerModuleLinking
+ // step 10 and
+ // https://tc39.es/ecma262/#sec-source-text-module-record-initialize-environment
+ // step 8-25, variables must be declared in Link. And according to
+ // https://tc39.es/ecma262/#sec-module-namespace-exotic-objects-get-p-receiver,
+ // here accessing uninitialized variable error should be throwed.
+ THROW_NEW_ERROR(isolate,
+ NewReferenceError(
+ MessageTemplate::kAccessedUninitializedVariable, name),
+ Object);
}
return value;
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 70651aab83..10cbdc720b 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -105,6 +105,13 @@
} \
type holder::name(PtrComprCageBase cage_base, AcquireLoadTag) const
+#define TQ_FIELD_TYPE(name, tq_type) \
+ static constexpr const char* k##name##TqFieldType = tq_type;
+
+#define DECL_FIELD_OFFSET_TQ(name, value, tq_type) \
+ static const int k##name##Offset = value; \
+ TQ_FIELD_TYPE(name, tq_type)
+
#define DECL_SETTER(name, type) \
inline void set_##name(type value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -683,3 +690,6 @@ static_assert(sizeof(unsigned) == sizeof(uint32_t),
#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type) \
inline Type::Type(Address ptr) \
: TorqueGenerated##Type<Type, Type::Super>(ptr) {}
+
+#define TQ_CPP_OBJECT_DEFINITION_ASSERTS(_class, parent) \
+ template class TorqueGenerated##_class##Asserts<_class, parent>;
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index e2eb0da133..289f1fa053 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -606,6 +606,25 @@ class PreparseData::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class PromiseOnStack::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= HeapObject::kHeaderSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, Struct::kHeaderSize, kPromiseOffset, v);
+ IterateMaybeWeakPointer(obj, kPromiseOffset, v);
+ STATIC_ASSERT(kPromiseOffset + kTaggedSize == kHeaderSize);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return obj.SizeFromMap(map);
+ }
+};
+
class PrototypeInfo::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -1264,11 +1283,13 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
case HEAP_NUMBER_TYPE:
return CALL_APPLY(HeapNumber);
case BYTE_ARRAY_TYPE:
- return CALL_APPLY(BigInt);
+ return CALL_APPLY(ByteArray);
case BIGINT_TYPE:
return CALL_APPLY(BigInt);
case ALLOCATION_SITE_TYPE:
return CALL_APPLY(AllocationSite);
+ case ODDBALL_TYPE:
+ return CALL_APPLY(Oddball);
#define MAKE_STRUCT_CASE(TYPE, Name, name) \
case TYPE: \
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index eb1cab6664..16c09d7c86 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -151,6 +151,7 @@ namespace internal {
V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
V(_, MODULE_REQUEST_TYPE, ModuleRequest, module_request) \
V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
+ V(_, PROMISE_ON_STACK_TYPE, PromiseOnStack, promise_on_stack) \
V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
V(_, PROPERTY_DESCRIPTOR_OBJECT_TYPE, PropertyDescriptorObject, \
property_descriptor_object) \
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index e80a6d699c..4616ef7ab7 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -494,6 +494,9 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
Handle<String> fun_str;
if (input->IsJSBoundFunction()) {
fun_str = JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(input));
+ } else if (input->IsJSWrappedFunction()) {
+ fun_str =
+ JSWrappedFunction::ToString(Handle<JSWrappedFunction>::cast(input));
} else {
DCHECK(input->IsJSFunction());
fun_str = JSFunction::ToString(Handle<JSFunction>::cast(input));
@@ -549,9 +552,8 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
isolate, Handle<JSBoundFunction>::cast(ctor))
.ToHandleChecked();
} else if (ctor->IsJSFunction()) {
- Handle<Object> ctor_name_obj =
+ ctor_name =
JSFunction::GetName(isolate, Handle<JSFunction>::cast(ctor));
- ctor_name = AsStringOrEmpty(isolate, ctor_name_obj);
}
if (ctor_name->length() != 0) {
@@ -2861,7 +2863,8 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
PropertyAttributes attributes,
Maybe<ShouldThrow> should_throw,
- StoreOrigin store_origin) {
+ StoreOrigin store_origin,
+ EnforceDefineSemantics semantics) {
if (!it->GetReceiver()->IsJSReceiver()) {
return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
value, should_throw);
@@ -2889,9 +2892,11 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
Isolate* isolate = it->isolate();
if (it->ExtendingNonExtensible(receiver)) {
- RETURN_FAILURE(
- isolate, GetShouldThrow(it->isolate(), should_throw),
- NewTypeError(MessageTemplate::kObjectNotExtensible, it->GetName()));
+ RETURN_FAILURE(isolate, GetShouldThrow(it->isolate(), should_throw),
+ NewTypeError(semantics == EnforceDefineSemantics::kDefine
+ ? MessageTemplate::kDefineDisallowed
+ : MessageTemplate::kObjectNotExtensible,
+ it->GetName()));
}
if (it->IsElement(*receiver)) {
@@ -2911,28 +2916,36 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
Nothing<bool>());
JSObject::ValidateElements(*receiver_obj);
return Just(true);
- } else {
- it->UpdateProtector();
- // Migrate to the most up-to-date map that will be able to store |value|
- // under it->name() with |attributes|.
- it->PrepareTransitionToDataProperty(receiver, value, attributes,
- store_origin);
- DCHECK_EQ(LookupIterator::TRANSITION, it->state());
- it->ApplyTransitionToDataProperty(receiver);
+ }
- // Write the property value.
- it->WriteDataValue(value, true);
+ return Object::TransitionAndWriteDataProperty(it, value, attributes,
+ should_throw, store_origin);
+}
+
+// static
+Maybe<bool> Object::TransitionAndWriteDataProperty(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ Maybe<ShouldThrow> should_throw, StoreOrigin store_origin) {
+ Handle<JSReceiver> receiver = it->GetStoreTarget<JSReceiver>();
+ it->UpdateProtector();
+ // Migrate to the most up-to-date map that will be able to store |value|
+ // under it->name() with |attributes|.
+ it->PrepareTransitionToDataProperty(receiver, value, attributes,
+ store_origin);
+ DCHECK_EQ(LookupIterator::TRANSITION, it->state());
+ it->ApplyTransitionToDataProperty(receiver);
+
+ // Write the property value.
+ it->WriteDataValue(value, true);
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->HeapObjectVerify(isolate);
+ receiver->HeapObjectVerify(it->isolate());
}
#endif
- }
- return Just(true);
+ return Just(true);
}
-
// static
MaybeHandle<Object> Object::ShareSlow(Isolate* isolate,
Handle<HeapObject> value,
@@ -3594,7 +3607,6 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
PropertyDescriptor* desc,
Maybe<ShouldThrow> should_throw) {
- DCHECK(!private_name->IsPrivateName());
// Despite the generic name, this can only add private data properties.
if (!PropertyDescriptor::IsDataDescriptor(desc) ||
desc->ToAttributes() != DONT_ENUM) {
@@ -4762,36 +4774,6 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
return value;
}
-Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
- Handle<Context> native_context,
- Handle<Map> initial_map) {
- // Replace all of the cached initial array maps in the native context with
- // the appropriate transitioned elements kind maps.
- Handle<Map> current_map = initial_map;
- ElementsKind kind = current_map->elements_kind();
- DCHECK_EQ(GetInitialFastElementsKind(), kind);
- native_context->set(Context::ArrayMapIndex(kind), *current_map,
- UPDATE_WRITE_BARRIER, kReleaseStore);
- for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
- i < kFastElementsKindCount; ++i) {
- Handle<Map> new_map;
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- Map maybe_elements_transition = current_map->ElementsTransitionMap(
- isolate, ConcurrencyMode::kNotConcurrent);
- if (!maybe_elements_transition.is_null()) {
- new_map = handle(maybe_elements_transition, isolate);
- } else {
- new_map = Map::CopyAsElementsKind(isolate, current_map, next_kind,
- INSERT_TRANSITION);
- }
- DCHECK_EQ(next_kind, new_map->elements_kind());
- native_context->set(Context::ArrayMapIndex(next_kind), *new_map,
- UPDATE_WRITE_BARRIER, kReleaseStore);
- current_map = new_map;
- }
- return initial_map;
-}
-
STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
Oddball::kToNumberRawOffset);
@@ -5180,8 +5162,6 @@ void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
}
Maybe<bool> JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
- // We should never end in here with a pixel or external array.
- DCHECK(array->AllowsSetLength());
if (array->SetLengthWouldNormalize(new_length)) {
JSObject::NormalizeElements(array);
}
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index d57ad0a847..316f870e31 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -183,6 +183,7 @@
// - CallSiteInfo
// - CodeCache
// - PropertyDescriptorObject
+// - PromiseOnStack
// - PrototypeInfo
// - Microtask
// - CallbackTask
@@ -276,6 +277,16 @@ enum class OnNonExistent { kThrowReferenceError, kReturnUndefined };
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
+// Currently DefineOwnPropertyIgnoreAttributes invokes the setter
+// interceptor and user-defined setters during define operations,
+// even in places where it makes more sense to invoke the definer
+// interceptor and not invoke the setter: e.g. both the definer and
+// the setter interceptors are called in Object.defineProperty().
+// kDefine allows us to implement the define semantics correctly
+// in selected locations.
+// TODO(joyee): see if we can deprecate the old behavior.
+enum class EnforceDefineSemantics { kSet, kDefine };
+
// TODO(mythria): Move this to a better place.
ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw);
@@ -539,7 +550,13 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
LookupIterator* it, Handle<Object> value);
V8_WARN_UNUSED_RESULT static Maybe<bool> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ Maybe<ShouldThrow> should_throw, StoreOrigin store_origin,
+ EnforceDefineSemantics semantics = EnforceDefineSemantics::kSet);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> TransitionAndWriteDataProperty(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
Maybe<ShouldThrow> should_throw, StoreOrigin store_origin);
+
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Isolate* isolate, Handle<Object> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index c5a38dbb38..ec05b7dae3 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -17,18 +17,30 @@
namespace v8 {
namespace internal {
-#include "torque-generated/src/objects/oddball-tq-inl.inc"
+TQ_CPP_OBJECT_DEFINITION_ASSERTS(Oddball, PrimitiveHeapObject)
-TQ_OBJECT_CONSTRUCTORS_IMPL(Oddball)
+OBJECT_CONSTRUCTORS_IMPL(Oddball, PrimitiveHeapObject)
+
+CAST_ACCESSOR(Oddball)
+
+DEF_PRIMITIVE_ACCESSORS(Oddball, to_number_raw, kToNumberRawOffset, double)
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
// Bug(v8:8875): HeapNumber's double may be unaligned.
base::WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
}
-byte Oddball::kind() const { return TorqueGeneratedOddball::kind(); }
+ACCESSORS(Oddball, to_string, String, kToStringOffset)
+ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
-void Oddball::set_kind(byte value) { TorqueGeneratedOddball::set_kind(value); }
+byte Oddball::kind() const {
+ return Smi::ToInt(TaggedField<Smi>::load(*this, kKindOffset));
+}
+
+void Oddball::set_kind(byte value) {
+ WRITE_FIELD(*this, kKindOffset, Smi::FromInt(value));
+}
// static
Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
index eb7b72c7e2..71253aea6b 100644
--- a/deps/v8/src/objects/oddball.h
+++ b/deps/v8/src/objects/oddball.h
@@ -16,28 +16,48 @@ namespace internal {
#include "torque-generated/src/objects/oddball-tq.inc"
// The Oddball describes objects null, undefined, true, and false.
-class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
+class Oddball : public PrimitiveHeapObject {
public:
// [to_number_raw]: Cached raw to_number computed at startup.
+ DECL_PRIMITIVE_ACCESSORS(to_number_raw, double)
inline void set_to_number_raw_as_bits(uint64_t bits);
+ // [to_string]: Cached to_string computed at startup.
+ DECL_ACCESSORS(to_string, String)
+
+ // [to_number]: Cached to_number computed at startup.
+ DECL_ACCESSORS(to_number, Object)
+
+ // [typeof]: Cached type_of computed at startup.
+ DECL_ACCESSORS(type_of, String)
+
inline byte kind() const;
inline void set_kind(byte kind);
- // Oddball has a custom verifier.
- void OddballVerify(Isolate* isolate);
-
// ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
Isolate* isolate, Handle<Oddball> input);
V8_INLINE bool ToBool(Isolate* isolate) const;
+ DECL_CAST(Oddball)
+
+ // Dispatched behavior.
+ DECL_VERIFIER(Oddball)
+
// Initialize the fields.
static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
const char* type_of, byte kind);
+ // Layout description.
+ DECL_FIELD_OFFSET_TQ(ToNumberRaw, HeapObject::kHeaderSize, "float64")
+ DECL_FIELD_OFFSET_TQ(ToString, kToNumberRawOffset + kDoubleSize, "String")
+ DECL_FIELD_OFFSET_TQ(ToNumber, kToStringOffset + kTaggedSize, "Number")
+ DECL_FIELD_OFFSET_TQ(TypeOf, kToNumberOffset + kTaggedSize, "String")
+ DECL_FIELD_OFFSET_TQ(Kind, kTypeOfOffset + kTaggedSize, "Smi")
+ static const int kSize = kKindOffset + kTaggedSize;
+
static const byte kFalse = 0;
static const byte kTrue = 1;
static const byte kNotBooleanMask = static_cast<byte>(~1);
@@ -53,7 +73,8 @@ class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
static const byte kSelfReferenceMarker = 10;
static const byte kBasicBlockCountersMarker = 11;
- class BodyDescriptor;
+ using BodyDescriptor =
+ FixedBodyDescriptor<kToStringOffset, kKindOffset, kSize>;
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
STATIC_ASSERT(kNull == Internals::kNullOddballKind);
@@ -61,7 +82,7 @@ class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
DECL_PRINTER(Oddball)
- TQ_OBJECT_CONSTRUCTORS(Oddball)
+ OBJECT_CONSTRUCTORS(Oddball, PrimitiveHeapObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/oddball.tq b/deps/v8/src/objects/oddball.tq
index 3edee2dbb9..749c156ddd 100644
--- a/deps/v8/src/objects/oddball.tq
+++ b/deps/v8/src/objects/oddball.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateBodyDescriptor
+@cppObjectDefinition
@apiExposedInstanceTypeValue(0x83)
@highestInstanceTypeWithinParentClassRange
extern class Oddball extends PrimitiveHeapObject {
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.cc b/deps/v8/src/objects/osr-optimized-code-cache.cc
index 4ffefd59a8..cd19f57197 100644
--- a/deps/v8/src/objects/osr-optimized-code-cache.cc
+++ b/deps/v8/src/objects/osr-optimized-code-cache.cc
@@ -12,22 +12,32 @@
namespace v8 {
namespace internal {
-const int OSROptimizedCodeCache::kInitialLength;
-const int OSROptimizedCodeCache::kMaxLength;
+// static
+Handle<OSROptimizedCodeCache> OSROptimizedCodeCache::Empty(Isolate* isolate) {
+ return Handle<OSROptimizedCodeCache>::cast(
+ isolate->factory()->empty_weak_fixed_array());
+}
-void OSROptimizedCodeCache::AddOptimizedCode(
- Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared,
- Handle<CodeT> code, BytecodeOffset osr_offset) {
+// static
+void OSROptimizedCodeCache::Insert(Isolate* isolate,
+ Handle<NativeContext> native_context,
+ Handle<SharedFunctionInfo> shared,
+ Handle<CodeT> code,
+ BytecodeOffset osr_offset) {
DCHECK(!osr_offset.IsNone());
- DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
- STATIC_ASSERT(kEntryLength == 3);
- Isolate* isolate = native_context->GetIsolate();
DCHECK(!isolate->serializer_enabled());
+ DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
- DCHECK_EQ(osr_cache->FindEntry(shared, osr_offset), -1);
+ if (shared->osr_code_cache_state() == kNotCached) {
+ DCHECK_EQ(osr_cache->FindEntry(*shared, osr_offset), -1);
+ } else if (osr_cache->FindEntry(*shared, osr_offset) != -1) {
+ return; // Already cached for a different JSFunction.
+ }
+
+ STATIC_ASSERT(kEntryLength == 3);
int entry = -1;
for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
@@ -37,28 +47,32 @@ void OSROptimizedCodeCache::AddOptimizedCode(
}
}
- if (entry == -1 && osr_cache->length() + kEntryLength <= kMaxLength) {
- entry = GrowOSRCache(native_context, &osr_cache);
- } else if (entry == -1) {
- // We reached max capacity and cannot grow further. Reuse an existing entry.
- // TODO(mythria): We could use better mechanisms (like lru) to replace
- // existing entries. Though we don't expect this to be a common case, so
- // for now choosing to replace the first entry.
- entry = 0;
+ if (entry == -1) {
+ if (osr_cache->length() + kEntryLength <= kMaxLength) {
+ entry = GrowOSRCache(isolate, native_context, &osr_cache);
+ } else {
+ // We reached max capacity and cannot grow further. Reuse an existing
+ // entry.
+ // TODO(mythria): We could use better mechanisms (like lru) to replace
+ // existing entries. Though we don't expect this to be a common case, so
+ // for now choosing to replace the first entry.
+ osr_cache->ClearEntry(0, isolate);
+ entry = 0;
+ }
}
osr_cache->InitializeEntry(entry, *shared, *code, osr_offset);
}
-void OSROptimizedCodeCache::Clear(NativeContext native_context) {
- native_context.set_osr_code_cache(
- *native_context.GetIsolate()->factory()->empty_weak_fixed_array());
+void OSROptimizedCodeCache::Clear(Isolate* isolate,
+ NativeContext native_context) {
+ native_context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate));
}
-void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), native_context->GetIsolate());
- Isolate* isolate = native_context->GetIsolate();
+void OSROptimizedCodeCache::Compact(Isolate* isolate,
+ Handle<NativeContext> native_context) {
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
// Re-adjust the cache so all the valid entries are on one side. This will
// enable us to compress the cache if needed.
@@ -83,29 +97,31 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
DCHECK_LT(new_osr_cache->length(), osr_cache->length());
{
DisallowGarbageCollection no_gc;
- new_osr_cache->CopyElements(native_context->GetIsolate(), 0, *osr_cache, 0,
+ new_osr_cache->CopyElements(isolate, 0, *osr_cache, 0,
new_osr_cache->length(),
new_osr_cache->GetWriteBarrierMode(no_gc));
}
native_context->set_osr_code_cache(*new_osr_cache);
}
-CodeT OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared,
- BytecodeOffset osr_offset,
- Isolate* isolate) {
+CodeT OSROptimizedCodeCache::TryGet(SharedFunctionInfo shared,
+ BytecodeOffset osr_offset,
+ Isolate* isolate) {
DisallowGarbageCollection no_gc;
int index = FindEntry(shared, osr_offset);
- if (index == -1) return CodeT();
+ if (index == -1) return {};
+
CodeT code = GetCodeFromEntry(index);
if (code.is_null()) {
ClearEntry(index, isolate);
- return CodeT();
+ return {};
}
+
DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
return code;
}
-void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) {
+void OSROptimizedCodeCache::EvictDeoptimizedCode(Isolate* isolate) {
// This is called from DeoptimizeMarkedCodeForContext that uses raw pointers
// and hence the DisallowGarbageCollection scope here.
DisallowGarbageCollection no_gc;
@@ -122,22 +138,41 @@ void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) {
}
}
-std::vector<int> OSROptimizedCodeCache::GetBytecodeOffsetsFromSFI(
+std::vector<BytecodeOffset> OSROptimizedCodeCache::OsrOffsetsFor(
SharedFunctionInfo shared) {
- std::vector<int> bytecode_offsets;
DisallowGarbageCollection gc;
+
+ const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
+ if (state == kNotCached) return {};
+
+ std::vector<BytecodeOffset> offsets;
for (int index = 0; index < length(); index += kEntryLength) {
- if (GetSFIFromEntry(index) == shared) {
- bytecode_offsets.push_back(GetBytecodeOffsetFromEntry(index).ToInt());
- }
+ if (GetSFIFromEntry(index) != shared) continue;
+ offsets.emplace_back(GetBytecodeOffsetFromEntry(index));
+ if (state == kCachedOnce) return offsets;
}
- return bytecode_offsets;
+
+ return offsets;
+}
+
+base::Optional<BytecodeOffset> OSROptimizedCodeCache::FirstOsrOffsetFor(
+ SharedFunctionInfo shared) {
+ DisallowGarbageCollection gc;
+
+ const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
+ if (state == kNotCached) return {};
+
+ for (int index = 0; index < length(); index += kEntryLength) {
+ if (GetSFIFromEntry(index) != shared) continue;
+ return GetBytecodeOffsetFromEntry(index);
+ }
+
+ return {};
}
int OSROptimizedCodeCache::GrowOSRCache(
- Handle<NativeContext> native_context,
+ Isolate* isolate, Handle<NativeContext> native_context,
Handle<OSROptimizedCodeCache>* osr_cache) {
- Isolate* isolate = native_context->GetIsolate();
int old_length = (*osr_cache)->length();
int grow_by = CapacityForLength(old_length) - old_length;
DCHECK_GT(grow_by, kEntryLength);
@@ -178,12 +213,12 @@ BytecodeOffset OSROptimizedCodeCache::GetBytecodeOffsetFromEntry(int index) {
return BytecodeOffset(osr_offset_entry.value());
}
-int OSROptimizedCodeCache::FindEntry(Handle<SharedFunctionInfo> shared,
+int OSROptimizedCodeCache::FindEntry(SharedFunctionInfo shared,
BytecodeOffset osr_offset) {
DisallowGarbageCollection no_gc;
DCHECK(!osr_offset.IsNone());
for (int index = 0; index < length(); index += kEntryLength) {
- if (GetSFIFromEntry(index) != *shared) continue;
+ if (GetSFIFromEntry(index) != shared) continue;
if (GetBytecodeOffsetFromEntry(index) != osr_offset) continue;
return index;
}
@@ -256,5 +291,13 @@ bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries,
return curr_length > kInitialLength && curr_length > num_valid_entries * 3;
}
+MaybeObject OSROptimizedCodeCache::RawGetForTesting(int index) const {
+ return WeakFixedArray::Get(index);
+}
+
+void OSROptimizedCodeCache::RawSetForTesting(int index, MaybeObject value) {
+ WeakFixedArray::Set(index, value);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.h b/deps/v8/src/objects/osr-optimized-code-cache.h
index 7b28ba0001..ef48456a3a 100644
--- a/deps/v8/src/objects/osr-optimized-code-cache.h
+++ b/deps/v8/src/objects/osr-optimized-code-cache.h
@@ -6,68 +6,94 @@
#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
#include "src/objects/fixed-array.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
-// This enum are states that how many OSR code caches belong to a SFI. Without
-// this enum, need to check all OSR code cache entries to know whether a
-// JSFunction's SFI has OSR code cache. The enum value kCachedMultiple is for
-// doing time-consuming loop check only when the very unlikely state change
-// kCachedMultiple -> { kCachedOnce | kCachedMultiple }.
+// This enum is a performance optimization for accessing the OSR code cache -
+// we can skip cache iteration in many cases unless there are multiple entries
+// for a particular SharedFunctionInfo.
enum OSRCodeCacheStateOfSFI : uint8_t {
- kNotCached, // Likely state, no OSR code cache
- kCachedOnce, // Unlikely state, one OSR code cache
- kCachedMultiple, // Very unlikely state, multiple OSR code caches
+ kNotCached, // Likely state.
+ kCachedOnce, // Unlikely state, one entry.
+ kCachedMultiple, // Very unlikely state, multiple entries.
};
+// TODO(jgruber): There are a few issues with the current implementation:
+//
+// - The cache is a flat list, thus any search operation is O(N). This resulted
+// in optimization attempts, see OSRCodeCacheStateOfSFI.
+// - We always iterate up to `length` (== capacity).
+// - We essentially reimplement WeakArrayList, i.e. growth and shrink logic.
+// - On overflow, new entries always pick slot 0.
+//
+// There are a few alternatives:
+//
+// 1) we could reuse WeakArrayList logic (but then we'd still have to
+// implement custom compaction due to our entry tuple structure).
+// 2) we could reuse CompilationCacheTable (but then we lose weakness and have
+// to deal with aging).
+// 3) we could try to base on a weak HashTable variant (EphemeronHashTable?).
class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
public:
DECL_CAST(OSROptimizedCodeCache)
- enum OSRCodeCacheConstants {
- kSharedOffset,
- kCachedCodeOffset,
- kOsrIdOffset,
- kEntryLength
- };
-
- static const int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
- static const int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
+ static Handle<OSROptimizedCodeCache> Empty(Isolate* isolate);
// Caches the optimized code |code| corresponding to the shared function
// |shared| and bailout id |osr_offset| in the OSROptimized code cache.
// If the OSR code cache wasn't created before it creates a code cache with
// kOSRCodeCacheInitialLength entries.
- static void AddOptimizedCode(Handle<NativeContext> context,
- Handle<SharedFunctionInfo> shared,
- Handle<CodeT> code, BytecodeOffset osr_offset);
- // Reduces the size of the OSR code cache if the number of valid entries are
- // less than the current capacity of the cache.
- static void Compact(Handle<NativeContext> context);
- // Sets the OSR optimized code cache to an empty array.
- static void Clear(NativeContext context);
+ static void Insert(Isolate* isolate, Handle<NativeContext> context,
+ Handle<SharedFunctionInfo> shared, Handle<CodeT> code,
+ BytecodeOffset osr_offset);
// Returns the code corresponding to the shared function |shared| and
// BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
// object otherwise.
- CodeT GetOptimizedCode(Handle<SharedFunctionInfo> shared,
- BytecodeOffset osr_offset, Isolate* isolate);
+ CodeT TryGet(SharedFunctionInfo shared, BytecodeOffset osr_offset,
+ Isolate* isolate);
+
+ std::vector<BytecodeOffset> OsrOffsetsFor(SharedFunctionInfo shared);
+ base::Optional<BytecodeOffset> FirstOsrOffsetFor(SharedFunctionInfo shared);
// Remove all code objects marked for deoptimization from OSR code cache.
- void EvictMarkedCode(Isolate* isolate);
+ void EvictDeoptimizedCode(Isolate* isolate);
+
+ // Reduces the size of the OSR code cache if the number of valid entries are
+ // less than the current capacity of the cache.
+ static void Compact(Isolate* isolate, Handle<NativeContext> context);
- // Returns vector of bytecode offsets corresponding to the shared function
- // |shared|
- std::vector<int> GetBytecodeOffsetsFromSFI(SharedFunctionInfo shared);
+ // Sets the OSR optimized code cache to an empty array.
+ static void Clear(Isolate* isolate, NativeContext context);
+
+ enum OSRCodeCacheConstants {
+ kSharedOffset,
+ kCachedCodeOffset,
+ kOsrIdOffset,
+ kEntryLength
+ };
+
+ static constexpr int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
+ static constexpr int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
+
+ // For osr-code-cache-unittest.cc.
+ MaybeObject RawGetForTesting(int index) const;
+ void RawSetForTesting(int index, MaybeObject value);
private:
+ // Hide raw accessors to avoid terminology confusion.
+ using WeakFixedArray::Get;
+ using WeakFixedArray::Set;
+
// Functions that implement heuristics on when to grow / shrink the cache.
static int CapacityForLength(int curr_capacity);
static bool NeedsTrimming(int num_valid_entries, int curr_capacity);
- static int GrowOSRCache(Handle<NativeContext> native_context,
+ static int GrowOSRCache(Isolate* isolate,
+ Handle<NativeContext> native_context,
Handle<OSROptimizedCodeCache>* osr_cache);
// Helper functions to get individual items from an entry in the cache.
@@ -75,8 +101,7 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
SharedFunctionInfo GetSFIFromEntry(int index);
BytecodeOffset GetBytecodeOffsetFromEntry(int index);
- inline int FindEntry(Handle<SharedFunctionInfo> shared,
- BytecodeOffset osr_offset);
+ inline int FindEntry(SharedFunctionInfo shared, BytecodeOffset osr_offset);
inline void ClearEntry(int src, Isolate* isolate);
inline void InitializeEntry(int entry, SharedFunctionInfo shared, CodeT code,
BytecodeOffset osr_offset);
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index 3bc39847e6..8295637d9e 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -81,6 +81,25 @@ void PropertyArray::set(int index, Object value, SeqCstAccessTag tag) {
// space, so the generational write barrier is also not needed.
}
+Object PropertyArray::Swap(int index, Object value, SeqCstAccessTag tag) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return Swap(cage_base, index, value, tag);
+}
+
+Object PropertyArray::Swap(PtrComprCageBase cage_base, int index, Object value,
+ SeqCstAccessTag tag) {
+ DCHECK(IsPropertyArray());
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length(kAcquireLoad)));
+ DCHECK(value.IsShared());
+ return TaggedField<Object>::SeqCst_Swap(cage_base, *this,
+ OffsetOfElementAt(index), value);
+ // JSSharedStructs are allocated in the shared old space, which is currently
+ // collected by stopping the world, so the incremental write barrier is not
+ // needed. They can only store Smis and other HeapObjects in the shared old
+ // space, so the generational write barrier is also not needed.
+}
+
ObjectSlot PropertyArray::data_start() { return RawField(kHeaderSize); }
int PropertyArray::length() const {
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 9599ca4d1b..9a5c5f8508 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -40,6 +40,10 @@ class PropertyArray
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
+ inline Object Swap(int index, Object value, SeqCstAccessTag tag);
+ inline Object Swap(PtrComprCageBase cage_base, int index, Object value,
+ SeqCstAccessTag tag);
+
// Signature must be in sync with FixedArray::CopyElements().
inline void CopyElements(Isolate* isolate, int dst_index, PropertyArray src,
int src_index, int len, WriteBarrierMode mode);
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index c8783c362e..6fffdf6b19 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -261,6 +261,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, maglev_compilation_failed,
+ SharedFunctionInfo::MaglevCompilationFailedBit)
+
BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, syntax_kind,
SharedFunctionInfo::FunctionSyntaxKindBits)
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index dd33a8ce12..ed7ceca422 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -15,7 +15,6 @@
#include "src/objects/function-kind.h"
#include "src/objects/function-syntax-kind.h"
#include "src/objects/objects.h"
-#include "src/objects/osr-optimized-code-cache.h"
#include "src/objects/script.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
@@ -42,6 +41,8 @@ class WasmCapiFunctionData;
class WasmExportedFunctionData;
class WasmJSFunctionData;
+enum OSRCodeCacheStateOfSFI : uint8_t;
+
namespace wasm {
struct WasmModule;
class ValueType;
@@ -446,6 +447,8 @@ class SharedFunctionInfo
DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand)
DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors)
+ DECL_BOOLEAN_ACCESSORS(maglev_compilation_failed)
+
// Is this function a top-level function (scripts, evals).
DECL_BOOLEAN_ACCESSORS(is_toplevel)
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 048e871e7e..3908eeeac7 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -44,6 +44,7 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
bitfield struct SharedFunctionInfoFlags2 extends uint8 {
class_scope_has_private_brand: bool: 1 bit;
has_static_private_methods_or_accessors: bool: 1 bit;
+ maglev_compilation_failed: bool: 1 bit;
}
@generateBodyDescriptor
diff --git a/deps/v8/src/objects/swiss-hash-table-helpers.h b/deps/v8/src/objects/swiss-hash-table-helpers.h
index 792742b243..ff2189be3a 100644
--- a/deps/v8/src/objects/swiss-hash-table-helpers.h
+++ b/deps/v8/src/objects/swiss-hash-table-helpers.h
@@ -17,51 +17,50 @@
#ifndef V8_OBJECTS_SWISS_HASH_TABLE_HELPERS_H_
#define V8_OBJECTS_SWISS_HASH_TABLE_HELPERS_H_
-// The following #defines are taken from Abseil's have_sse.h (but renamed). They
-// are only defined within this file. However, we also take cross platform
-// snapshot creation into account, by only using SSE if the target supports it,
-// too. The SSE implementation uses a group width of 16, whereas the non-SSE
-// version uses 8. We therefore have to avoid building a snapshot that contains
-// Swiss Tables with one group size and use it in code that excepts a different
-// group size.
-#ifndef SWISS_TABLE_HAVE_SSE2
-#if (defined(__SSE2__) || \
- (defined(_MSC_VER) && \
- (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))) && \
- (defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64))
-#define SWISS_TABLE_HAVE_SSE2 1
+// The following #defines are taken from Abseil's have_sse.h (but renamed).
+#ifndef V8_SWISS_TABLE_HAVE_SSE2_HOST
+#if (defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2))))
+#define V8_SWISS_TABLE_HAVE_SSE2_HOST 1
#else
-#define SWISS_TABLE_HAVE_SSE2 0
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
-// TODO(v8:11388) Currently, building on a non-SSE platform for a SSE target
-// means that we cannot use the (more performant) SSE implementations of Swiss
-// Tables, even if the target would support it, just because the host doesn't.
-// This is due to the difference in group sizes (see comment at the beginning of
-// the file). We can solve this by implementating a new non-SSE Group that
-// behaves like GroupSse2Impl (and uses group size 16) in the future.
-#warning "You should avoid building on a non-SSE platform for a SSE target!"
-#endif
+#define V8_SWISS_TABLE_HAVE_SSE2_HOST 0
#endif
#endif
-#ifndef SWISS_TABLE_HAVE_SSSE3
-#if defined(__SSSE3__) && \
- (defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64))
-#define SWISS_TABLE_HAVE_SSSE3 1
+#ifndef V8_SWISS_TABLE_HAVE_SSSE3_HOST
+#if defined(__SSSE3__)
+#define V8_SWISS_TABLE_HAVE_SSSE3_HOST 1
#else
-#define SWISS_TABLE_HAVE_SSSE3 0
+#define V8_SWISS_TABLE_HAVE_SSSE3_HOST 0
#endif
#endif
-#if SWISS_TABLE_HAVE_SSSE3 && !SWISS_TABLE_HAVE_SSE2
+#if V8_SWISS_TABLE_HAVE_SSSE3_HOST && !V8_SWISS_TABLE_HAVE_SSE2_HOST
#error "Bad configuration!"
#endif
-#if SWISS_TABLE_HAVE_SSE2
+// Unlike Abseil, we cannot select SSE purely by host capabilities. When
+// creating a snapshot, the group width must be compatible. The SSE
+// implementation uses a group width of 16, whereas the non-SSE version uses 8.
+// Thus we select the group size based on target capabilities and, if the host
+// does not match, select a polyfill implementation. This means, in supported
+// cross-compiling configurations, we must be able to determine matching target
+// capabilities from the host.
+#ifndef V8_SWISS_TABLE_HAVE_SSE2_TARGET
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+// x64 always has SSE2, and ia32 without SSE2 is not supported by V8.
+#define V8_SWISS_TABLE_HAVE_SSE2_TARGET 1
+#else
+#define V8_SWISS_TABLE_HAVE_SSE2_TARGET 0
+#endif
+#endif
+
+#if V8_SWISS_TABLE_HAVE_SSE2_HOST
#include <emmintrin.h>
#endif
-#if SWISS_TABLE_HAVE_SSSE3
+#if V8_SWISS_TABLE_HAVE_SSSE3_HOST
#include <tmmintrin.h>
#endif
@@ -126,9 +125,9 @@ class BitMask {
public:
// These are useful for unit tests (gunit).
- // using value_type = int;
- // using iterator = BitMask;
- // using const_iterator = BitMask;
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
explicit BitMask(T mask) : mask_(mask) {}
BitMask& operator++() {
@@ -219,7 +218,7 @@ inline static swiss_table::ctrl_t H2(uint32_t hash) {
return hash & ((1 << kH2Bits) - 1);
}
-#if SWISS_TABLE_HAVE_SSE2
+#if V8_SWISS_TABLE_HAVE_SSE2_HOST
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
@@ -252,7 +251,7 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of empty slots.
BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if SWISS_TABLE_HAVE_SSSE3
+#if V8_SWISS_TABLE_HAVE_SSSE3_HOST
// This only works because kEmpty is -128.
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
@@ -278,7 +277,7 @@ struct GroupSse2Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if SWISS_TABLE_HAVE_SSSE3
+#if V8_SWISS_TABLE_HAVE_SSSE3_HOST
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -290,7 +289,64 @@ struct GroupSse2Impl {
__m128i ctrl;
};
-#endif // SWISS_TABLE_HAVE_SSE2
+#endif // V8_SWISS_TABLE_HAVE_SSE2_HOST
+
+// A portable, inefficient version of GroupSse2Impl. This exists so SSE2-less
+// hosts can generate snapshots for SSE2-capable targets.
+struct GroupSse2Polyfill {
+ static constexpr size_t kWidth = 16; // the number of slots per group
+
+ explicit GroupSse2Polyfill(const ctrl_t* pos) { memcpy(ctrl_, pos, kWidth); }
+
+ // Returns a bitmask representing the positions of slots that match |hash|.
+ BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+ uint32_t mask = 0;
+ for (size_t i = 0; i < kWidth; i++) {
+ if (static_cast<h2_t>(ctrl_[i]) == hash) {
+ mask |= 1u << i;
+ }
+ }
+ return BitMask<uint32_t, kWidth>(mask);
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ BitMask<uint32_t, kWidth> MatchEmpty() const {
+ return Match(static_cast<h2_t>(kEmpty));
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
+ return BitMask<uint32_t, kWidth>(MatchEmptyOrDeletedMask());
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ return base::bits::CountTrailingZerosNonZero(MatchEmptyOrDeletedMask() + 1);
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ for (size_t i = 0; i < kWidth; i++) {
+ if (ctrl_[i] < 0) {
+ dst[i] = kEmpty;
+ } else {
+ dst[i] = kDeleted;
+ }
+ }
+ }
+
+ private:
+ uint32_t MatchEmptyOrDeletedMask() const {
+ uint32_t mask = 0;
+ for (size_t i = 0; i < kWidth; i++) {
+ if (ctrl_[i] < kSentinel) {
+ mask |= 1u << i;
+ }
+ }
+ return mask;
+ }
+
+ ctrl_t ctrl_[kWidth];
+};
struct GroupPortableImpl {
static constexpr size_t kWidth = 8; // the number of slots per group
@@ -366,16 +422,23 @@ struct GroupPortableImpl {
// backend should only use SSE2 when compiling the SIMD version of
// SwissNameDictionary into the builtin.
using Group = GroupPortableImpl;
-#else
-#if SWISS_TABLE_HAVE_SSE2
+#elif V8_SWISS_TABLE_HAVE_SSE2_TARGET
+// Use a matching group size between host and target.
+#if V8_SWISS_TABLE_HAVE_SSE2_HOST
using Group = GroupSse2Impl;
#else
-using Group = GroupPortableImpl;
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+// If we do not detect SSE2 when building for the ia32/x64 target, the
+// V8_SWISS_TABLE_HAVE_SSE2_TARGET logic will incorrectly cause the final output
+// to use the inefficient polyfill implementation. Detect this case and warn if
+// it happens.
+#warning "Did not detect required SSE2 support on ia32/x64."
#endif
+using Group = GroupSse2Polyfill;
+#endif
+#else
+using Group = GroupPortableImpl;
#endif
-
-#undef SWISS_TABLE_HAVE_SSE2
-#undef SWISS_TABLE_HAVE_SSE3
} // namespace swiss_table
} // namespace internal
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
index e1b73bd2bb..3ed08a95c9 100644
--- a/deps/v8/src/objects/tagged-field-inl.h
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -236,6 +236,29 @@ void TaggedField<T, kFieldOffset>::SeqCst_Store(HeapObject host, int offset,
AsAtomicTagged::SeqCst_Store(location(host, offset), full_to_tagged(ptr));
}
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::SeqCst_Swap(HeapObject host, int offset,
+ T value) {
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ AtomicTagged_t old_value =
+ AsAtomicTagged::SeqCst_Swap(location(host, offset), full_to_tagged(ptr));
+ return T(tagged_to_full(host.ptr(), old_value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::SeqCst_Swap(PtrComprCageBase cage_base,
+ HeapObject host, int offset,
+ T value) {
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ AtomicTagged_t old_value =
+ AsAtomicTagged::SeqCst_Swap(location(host, offset), full_to_tagged(ptr));
+ return T(tagged_to_full(cage_base, old_value));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 2076ca96c3..7eb6a5a9cf 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
@@ -57,16 +57,20 @@ class TaggedField : public AllStatic {
static inline T Acquire_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
- static inline void Release_Store(HeapObject host, T value);
- static inline void Release_Store(HeapObject host, int offset, T value);
-
static inline T SeqCst_Load(HeapObject host, int offset = 0);
static inline T SeqCst_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
+ static inline void Release_Store(HeapObject host, T value);
+ static inline void Release_Store(HeapObject host, int offset, T value);
+
static inline void SeqCst_Store(HeapObject host, T value);
static inline void SeqCst_Store(HeapObject host, int offset, T value);
+ static inline T SeqCst_Swap(HeapObject host, int offset, T value);
+ static inline T SeqCst_Swap(PtrComprCageBase cage_base, HeapObject host,
+ int offset, T value);
+
static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
T value);
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 7e7a93999e..911ee9a7c6 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -907,10 +907,6 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
WriteVarint(index.FromJust());
return ThrowIfOutOfMemory();
}
- if (!array_buffer->is_detachable()) {
- return ThrowDataCloneError(
- MessageTemplate::kDataCloneErrorNonDetachableArrayBuffer);
- }
uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
if (transfer_entry) {
@@ -1375,6 +1371,32 @@ void ValueDeserializer::TransferArrayBuffer(
}
}
+MaybeHandle<Object> ValueDeserializer::ReadObjectWrapper() {
+ // We had a bug which produced invalid version 13 data (see
+ // crbug.com/1284506). This compatibility mode tries to first read the data
+ // normally, and if it fails, and the version is 13, tries to read the broken
+ // format.
+ const uint8_t* original_position = position_;
+ suppress_deserialization_errors_ = true;
+ MaybeHandle<Object> result = ReadObject();
+
+ // The deserialization code doesn't throw errors for invalid data. It throws
+ // errors for stack overflows, though, and in that case we won't retry.
+ if (result.is_null() && version_ == 13 &&
+ !isolate_->has_pending_exception()) {
+ version_13_broken_data_mode_ = true;
+ position_ = original_position;
+ result = ReadObject();
+ }
+
+ if (result.is_null() && !isolate_->has_pending_exception()) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ }
+
+ return result;
+}
+
MaybeHandle<Object> ValueDeserializer::ReadObject() {
DisallowJavascriptExecution no_js(isolate_);
// If we are at the end of the stack, abort. This function may recurse.
@@ -1392,7 +1414,8 @@ MaybeHandle<Object> ValueDeserializer::ReadObject() {
result = ReadJSArrayBufferView(Handle<JSArrayBuffer>::cast(object));
}
- if (result.is_null() && !isolate_->has_pending_exception()) {
+ if (result.is_null() && !suppress_deserialization_errors_ &&
+ !isolate_->has_pending_exception()) {
isolate_->Throw(*isolate_->factory()->NewError(
MessageTemplate::kDataCloneDeserializationError));
}
@@ -1968,7 +1991,8 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
byte_length > buffer_byte_length - byte_offset) {
return MaybeHandle<JSArrayBufferView>();
}
- if (version_ >= 14 && !ReadVarint<uint32_t>().To(&flags)) {
+ const bool should_read_flags = version_ >= 14 || version_13_broken_data_mode_;
+ if (should_read_flags && !ReadVarint<uint32_t>().To(&flags)) {
return MaybeHandle<JSArrayBufferView>();
}
uint32_t id = next_id_++;
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index 7dc8842a09..d6804e04a7 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -212,7 +212,7 @@ class ValueDeserializer {
/*
* Deserializes a V8 object from the buffer.
*/
- MaybeHandle<Object> ReadObject() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<Object> ReadObjectWrapper() V8_WARN_UNUSED_RESULT;
/*
* Reads an object, consuming the entire buffer.
@@ -256,6 +256,7 @@ class ValueDeserializer {
Maybe<double> ReadDouble() V8_WARN_UNUSED_RESULT;
Maybe<base::Vector<const uint8_t>> ReadRawBytes(size_t size)
V8_WARN_UNUSED_RESULT;
+ MaybeHandle<Object> ReadObject() V8_WARN_UNUSED_RESULT;
// Reads a string if it matches the one provided.
// Returns true if this was the case. Otherwise, nothing is consumed.
@@ -322,6 +323,8 @@ class ValueDeserializer {
const bool supports_shared_values_;
uint32_t version_ = 0;
uint32_t next_id_ = 0;
+ bool version_13_broken_data_mode_ = false;
+ bool suppress_deserialization_errors_ = false;
// Always global handles.
Handle<FixedArray> id_map_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 6fba987818..c3a28660f9 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -850,13 +850,11 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
} else if (object.IsContext()) {
return AddEntry(object, HeapEntry::kObject, "system / Context");
- } else if (object.IsFixedArray() || object.IsFixedDoubleArray() ||
- object.IsByteArray()) {
- return AddEntry(object, HeapEntry::kArray, "");
} else if (object.IsHeapNumber()) {
return AddEntry(object, HeapEntry::kHeapNumber, "heap number");
}
- return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
+ return AddEntry(object, GetSystemEntryType(object),
+ GetSystemEntryName(object));
}
HeapEntry* V8HeapExplorer::AddEntry(HeapObject object, HeapEntry::Type type,
@@ -894,7 +892,17 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
}
}
- switch (object.map().instance_type()) {
+ InstanceType type = object.map().instance_type();
+
+ // Empty string names are special: TagObject can overwrite them, and devtools
+ // will report them as "(internal array)".
+ if (InstanceTypeChecker::IsFixedArray(type) ||
+ InstanceTypeChecker::IsFixedDoubleArray(type) ||
+ InstanceTypeChecker::IsByteArray(type)) {
+ return "";
+ }
+
+ switch (type) {
#define MAKE_TORQUE_CASE(Name, TYPE) \
case TYPE: \
return "system / " #Name;
@@ -916,6 +924,40 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
}
}
+HeapEntry::Type V8HeapExplorer::GetSystemEntryType(HeapObject object) {
+ InstanceType type = object.map().instance_type();
+ if (InstanceTypeChecker::IsAllocationSite(type) ||
+ InstanceTypeChecker::IsArrayBoilerplateDescription(type) ||
+ InstanceTypeChecker::IsBytecodeArray(type) ||
+ InstanceTypeChecker::IsClosureFeedbackCellArray(type) ||
+ InstanceTypeChecker::IsCodeDataContainer(type) ||
+ InstanceTypeChecker::IsFeedbackCell(type) ||
+ InstanceTypeChecker::IsFeedbackMetadata(type) ||
+ InstanceTypeChecker::IsFeedbackVector(type) ||
+ InstanceTypeChecker::IsInterpreterData(type) ||
+ InstanceTypeChecker::IsLoadHandler(type) ||
+ InstanceTypeChecker::IsObjectBoilerplateDescription(type) ||
+ InstanceTypeChecker::IsPreparseData(type) ||
+ InstanceTypeChecker::IsRegExpBoilerplateDescription(type) ||
+ InstanceTypeChecker::IsScopeInfo(type) ||
+ InstanceTypeChecker::IsStoreHandler(type) ||
+ InstanceTypeChecker::IsTemplateObjectDescription(type) ||
+ InstanceTypeChecker::IsTurbofanType(type) ||
+ InstanceTypeChecker::IsUncompiledData(type)) {
+ return HeapEntry::kCode;
+ }
+
+ // This check must come second, because some subtypes of FixedArray are
+ // determined above to represent code content.
+ if (InstanceTypeChecker::IsFixedArray(type) ||
+ InstanceTypeChecker::IsFixedDoubleArray(type) ||
+ InstanceTypeChecker::IsByteArray(type)) {
+ return HeapEntry::kArray;
+ }
+
+ return HeapEntry::kHidden;
+}
+
uint32_t V8HeapExplorer::EstimateObjectsCount() {
CombinedHeapObjectIterator it(heap_, HeapObjectIterator::kFilterUnreachable);
uint32_t objects_count = 0;
@@ -1061,6 +1103,9 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
} else if (obj.IsArrayBoilerplateDescription()) {
ExtractArrayBoilerplateDescriptionReferences(
entry, ArrayBoilerplateDescription::cast(obj));
+ } else if (obj.IsRegExpBoilerplateDescription()) {
+ ExtractRegExpBoilerplateDescriptionReferences(
+ entry, RegExpBoilerplateDescription::cast(obj));
} else if (obj.IsFeedbackVector()) {
ExtractFeedbackVectorReferences(entry, FeedbackVector::cast(obj));
} else if (obj.IsDescriptorArray()) {
@@ -1083,6 +1128,10 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
if (snapshot_->capture_numeric_value()) {
ExtractNumberReference(entry, obj);
}
+ } else if (obj.IsBytecodeArray()) {
+ ExtractBytecodeArrayReferences(entry, BytecodeArray::cast(obj));
+ } else if (obj.IsScopeInfo()) {
+ ExtractScopeInfoReferences(entry, ScopeInfo::cast(obj));
}
}
@@ -1400,9 +1449,13 @@ void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script script) {
SetInternalReference(entry, "name", script.name(), Script::kNameOffset);
SetInternalReference(entry, "context_data", script.context_data(),
Script::kContextDataOffset);
- TagObject(script.line_ends(), "(script line ends)");
+ TagObject(script.line_ends(), "(script line ends)", HeapEntry::kCode);
SetInternalReference(entry, "line_ends", script.line_ends(),
Script::kLineEndsOffset);
+ TagObject(script.shared_function_infos(), "(shared function infos)",
+ HeapEntry::kCode);
+ TagObject(script.host_defined_options(), "(host-defined options)",
+ HeapEntry::kCode);
}
void V8HeapExplorer::ExtractAccessorInfoReferences(HeapEntry* entry,
@@ -1447,7 +1500,7 @@ void V8HeapExplorer::TagBuiltinCodeObject(CodeT code, const char* name) {
}
void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
- TagObject(code.relocation_info(), "(code relocation info)");
+ TagObject(code.relocation_info(), "(code relocation info)", HeapEntry::kCode);
SetInternalReference(entry, "relocation_info", code.relocation_info(),
Code::kRelocationInfoOffset);
@@ -1456,16 +1509,27 @@ void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
SetInternalReference(entry, "interpreter_data",
code.bytecode_or_interpreter_data(),
Code::kDeoptimizationDataOrInterpreterDataOffset);
- TagObject(code.bytecode_offset_table(), "(bytecode offset table)");
+ TagObject(code.bytecode_offset_table(), "(bytecode offset table)",
+ HeapEntry::kCode);
SetInternalReference(entry, "bytecode_offset_table",
code.bytecode_offset_table(),
Code::kPositionTableOffset);
} else {
- TagObject(code.deoptimization_data(), "(code deopt data)");
- SetInternalReference(entry, "deoptimization_data",
- code.deoptimization_data(),
+ DeoptimizationData deoptimization_data =
+ DeoptimizationData::cast(code.deoptimization_data());
+ TagObject(deoptimization_data, "(code deopt data)", HeapEntry::kCode);
+ SetInternalReference(entry, "deoptimization_data", deoptimization_data,
Code::kDeoptimizationDataOrInterpreterDataOffset);
- TagObject(code.source_position_table(), "(source position table)");
+ if (deoptimization_data.length() > 0) {
+ TagObject(deoptimization_data.TranslationByteArray(), "(code deopt data)",
+ HeapEntry::kCode);
+ TagObject(deoptimization_data.LiteralArray(), "(code deopt data)",
+ HeapEntry::kCode);
+ TagObject(deoptimization_data.InliningPositions(), "(code deopt data)",
+ HeapEntry::kCode);
+ }
+ TagObject(code.source_position_table(), "(source position table)",
+ HeapEntry::kCode);
SetInternalReference(entry, "source_position_table",
code.source_position_table(),
Code::kPositionTableOffset);
@@ -1499,15 +1563,22 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(HeapEntry* entry,
AllocationSite::kTransitionInfoOrBoilerplateOffset);
SetInternalReference(entry, "nested_site", site.nested_site(),
AllocationSite::kNestedSiteOffset);
- TagObject(site.dependent_code(), "(dependent code)");
+ TagObject(site.dependent_code(), "(dependent code)", HeapEntry::kCode);
SetInternalReference(entry, "dependent_code", site.dependent_code(),
AllocationSite::kDependentCodeOffset);
}
void V8HeapExplorer::ExtractArrayBoilerplateDescriptionReferences(
HeapEntry* entry, ArrayBoilerplateDescription value) {
- SetInternalReference(entry, "constant_elements", value.constant_elements(),
+ FixedArrayBase constant_elements = value.constant_elements();
+ SetInternalReference(entry, "constant_elements", constant_elements,
ArrayBoilerplateDescription::kConstantElementsOffset);
+ TagObject(constant_elements, "(constant elements)", HeapEntry::kCode);
+}
+
+void V8HeapExplorer::ExtractRegExpBoilerplateDescriptionReferences(
+ HeapEntry* entry, RegExpBoilerplateDescription value) {
+ TagObject(value.data(), "(RegExp data)", HeapEntry::kCode);
}
class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
@@ -1596,6 +1667,23 @@ void V8HeapExplorer::ExtractNumberReference(HeapEntry* entry, Object number) {
generator_);
}
+void V8HeapExplorer::ExtractBytecodeArrayReferences(HeapEntry* entry,
+ BytecodeArray bytecode) {
+ RecursivelyTagConstantPool(bytecode.constant_pool(), "(constant pool)",
+ HeapEntry::kCode, 3);
+ TagObject(bytecode.handler_table(), "(handler table)", HeapEntry::kCode);
+ TagObject(bytecode.source_position_table(kAcquireLoad),
+ "(source position table)", HeapEntry::kCode);
+}
+
+void V8HeapExplorer::ExtractScopeInfoReferences(HeapEntry* entry,
+ ScopeInfo info) {
+ if (!info.HasInlinedLocalNames()) {
+ TagObject(info.context_local_names_hashtable(), "(context local names)",
+ HeapEntry::kCode);
+ }
+}
+
void V8HeapExplorer::ExtractFeedbackVectorReferences(
HeapEntry* entry, FeedbackVector feedback_vector) {
MaybeObject code = feedback_vector.maybe_optimized_code();
@@ -1604,6 +1692,15 @@ void V8HeapExplorer::ExtractFeedbackVectorReferences(
SetWeakReference(entry, "optimized code", code_heap_object,
FeedbackVector::kMaybeOptimizedCodeOffset);
}
+ for (int i = 0; i < feedback_vector.length(); ++i) {
+ MaybeObject maybe_entry = *(feedback_vector.slots_start() + i);
+ HeapObject entry;
+ if (maybe_entry.GetHeapObjectIfStrong(&entry) &&
+ (entry.map(isolate()).instance_type() == WEAK_FIXED_ARRAY_TYPE ||
+ entry.IsFixedArrayExact())) {
+ TagObject(entry, "(feedback)", HeapEntry::kCode);
+ }
+ }
}
void V8HeapExplorer::ExtractDescriptorArrayReferences(HeapEntry* entry,
@@ -2180,12 +2277,33 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object object) {
return it != strong_gc_subroot_names_.end() ? it->second : nullptr;
}
-void V8HeapExplorer::TagObject(Object obj, const char* tag) {
+void V8HeapExplorer::TagObject(Object obj, const char* tag,
+ base::Optional<HeapEntry::Type> type) {
if (IsEssentialObject(obj)) {
HeapEntry* entry = GetEntry(obj);
if (entry->name()[0] == '\0') {
entry->set_name(tag);
}
+ if (type.has_value()) {
+ entry->set_type(*type);
+ }
+ }
+}
+
+void V8HeapExplorer::RecursivelyTagConstantPool(Object obj, const char* tag,
+ HeapEntry::Type type,
+ int recursion_limit) {
+ --recursion_limit;
+ if (obj.IsFixedArrayExact(isolate())) {
+ FixedArray arr = FixedArray::cast(obj);
+ TagObject(arr, tag, type);
+ if (recursion_limit <= 0) return;
+ for (int i = 0; i < arr.length(); ++i) {
+ RecursivelyTagConstantPool(arr.get(i), tag, type, recursion_limit);
+ }
+ } else if (obj.IsNameDictionary(isolate()) ||
+ obj.IsNumberDictionary(isolate())) {
+ TagObject(obj, tag, type);
}
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 3c3918ea2a..56e64944e5 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -402,6 +402,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
const char* name);
const char* GetSystemEntryName(HeapObject object);
+ HeapEntry::Type GetSystemEntryType(HeapObject object);
void ExtractLocation(HeapEntry* entry, HeapObject object);
void ExtractLocationForJSFunction(HeapEntry* entry, JSFunction func);
@@ -433,12 +434,16 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAllocationSiteReferences(HeapEntry* entry, AllocationSite site);
void ExtractArrayBoilerplateDescriptionReferences(
HeapEntry* entry, ArrayBoilerplateDescription value);
+ void ExtractRegExpBoilerplateDescriptionReferences(
+ HeapEntry* entry, RegExpBoilerplateDescription value);
void ExtractJSArrayBufferReferences(HeapEntry* entry, JSArrayBuffer buffer);
void ExtractJSPromiseReferences(HeapEntry* entry, JSPromise promise);
void ExtractJSGeneratorObjectReferences(HeapEntry* entry,
JSGeneratorObject generator);
void ExtractFixedArrayReferences(HeapEntry* entry, FixedArray array);
void ExtractNumberReference(HeapEntry* entry, Object number);
+ void ExtractBytecodeArrayReferences(HeapEntry* entry, BytecodeArray bytecode);
+ void ExtractScopeInfoReferences(HeapEntry* entry, ScopeInfo info);
void ExtractFeedbackVectorReferences(HeapEntry* entry,
FeedbackVector feedback_vector);
void ExtractDescriptorArrayReferences(HeapEntry* entry,
@@ -486,7 +491,10 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void SetGcSubrootReference(Root root, const char* description, bool is_weak,
Object child);
const char* GetStrongGcSubrootName(Object object);
- void TagObject(Object obj, const char* tag);
+ void TagObject(Object obj, const char* tag,
+ base::Optional<HeapEntry::Type> type = {});
+ void RecursivelyTagConstantPool(Object obj, const char* tag,
+ HeapEntry::Type type, int recursion_limit);
HeapEntry* GetEntry(Object obj);
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 4ef7d9f010..afbe860cb2 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -899,9 +899,20 @@ CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
USE(isolate_);
}
+CpuProfilingResult CpuProfilesCollection::StartProfilingForTesting(
+ ProfilerId id) {
+ return StartProfiling(id);
+}
+
CpuProfilingResult CpuProfilesCollection::StartProfiling(
const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
+ return StartProfiling(++last_id_, title, options, std::move(delegate));
+}
+
+CpuProfilingResult CpuProfilesCollection::StartProfiling(
+ ProfilerId id, const char* title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
current_profiles_semaphore_.Wait();
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
@@ -912,22 +923,22 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
};
}
- if (title != nullptr) {
- for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
- if (profile->title() != nullptr && strcmp(profile->title(), title) == 0) {
- // Ignore attempts to start profile with the same title...
- current_profiles_semaphore_.Signal();
- // ... though return kAlreadyStarted to force it collect a sample.
- return {
- profile->id(),
- CpuProfilingStatus::kAlreadyStarted,
- };
- }
+ for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
+ if ((profile->title() != nullptr && title != nullptr &&
+ strcmp(profile->title(), title) == 0) ||
+ profile->id() == id) {
+ // Ignore attempts to start profile with the same title or id
+ current_profiles_semaphore_.Signal();
+ // ... though return kAlreadyStarted to force it collect a sample.
+ return {
+ profile->id(),
+ CpuProfilingStatus::kAlreadyStarted,
+ };
}
}
- CpuProfile* profile = new CpuProfile(profiler_, ++last_id_, title, options,
- std::move(delegate));
+ CpuProfile* profile =
+ new CpuProfile(profiler_, id, title, options, std::move(delegate));
current_profiles_.emplace_back(profile);
current_profiles_semaphore_.Signal();
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index d8d38ce034..2c3b44e6b0 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -544,6 +544,8 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
const char* title = nullptr, CpuProfilingOptions options = {},
std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
+ // This Method is only visible for testing
+ CpuProfilingResult StartProfilingForTesting(ProfilerId id);
CpuProfile* StopProfiling(ProfilerId id);
bool IsLastProfileLeft(ProfilerId id);
CpuProfile* Lookup(const char* title);
@@ -574,6 +576,10 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
static const int kMaxSimultaneousProfiles = 100;
private:
+ CpuProfilingResult StartProfiling(
+ ProfilerId id, const char* title = nullptr,
+ CpuProfilingOptions options = {},
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
StringsStorage resource_names_;
std::vector<std::unique_ptr<CpuProfile>> finished_profiles_;
CpuProfiler* profiler_;
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 78be35552e..b4c9471131 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -12,7 +12,7 @@
#include "src/logging/log.h"
#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 812f4f98fd..e1330b0255 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -813,7 +813,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
CPURegList registers_to_retain = kCalleeSaved;
DCHECK_EQ(registers_to_retain.Count(), kNumCalleeSavedRegisters);
- __ PushCPURegList<TurboAssembler::kDontStoreLR>(registers_to_retain);
+ __ PushCPURegList(registers_to_retain);
__ Push<TurboAssembler::kSignLR>(lr, fp);
__ PushCPURegList(argument_registers);
@@ -1128,7 +1128,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
// Restore registers.
- __ PopCPURegList<TurboAssembler::kDontLoadLR>(registers_to_retain);
+ __ PopCPURegList(registers_to_retain);
__ Ret();
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
index c60a714339..f347f8c189 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -11,7 +11,7 @@
#include "src/logging/log.h"
#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index dafc657f81..8904201d02 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -11,7 +11,7 @@
#include "src/logging/log.h"
#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 17546ed52d..b5c54848f4 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -12,7 +12,7 @@
#include "src/logging/log.h"
#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index fb9425f008..da81f89320 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -12,7 +12,7 @@
#include "src/logging/log.h"
#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
index 8f6b5e278d..bea3a7cb3d 100644
--- a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
+++ b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
@@ -12,7 +12,7 @@
#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/strings/unicode.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index bf22b69222..f4fa2a9ae6 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -13,7 +13,7 @@
#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 75eb6b7700..cdace8ed7b 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -323,7 +323,8 @@ class Symbol;
V(ArrayList, basic_block_profiling_data, BasicBlockProfilingData) \
V(WeakArrayList, shared_wasm_memories, SharedWasmMemories) \
IF_WASM(V, HeapObject, active_continuation, ActiveContinuation) \
- IF_WASM(V, HeapObject, active_suspender, ActiveSuspender)
+ IF_WASM(V, HeapObject, active_suspender, ActiveSuspender) \
+ IF_WASM(V, WeakArrayList, wasm_canonical_rtts, WasmCanonicalRtts)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index fc03476f2a..9f8c1005b8 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -157,7 +157,7 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSObject> array = args.at<JSObject>(0);
- CHECK(!array->HasTypedArrayElements());
+ CHECK(!array->HasTypedArrayOrRabGsabTypedArrayElements());
CHECK(!array->IsJSGlobalProxy());
JSObject::NormalizeElements(array);
return *array;
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 31f1c8f743..e372c2a1c9 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -640,9 +640,34 @@ RUNTIME_FUNCTION(Runtime_AtomicsStoreSharedStructField) {
}
// Shared structs are non-extensible. Instead of duplicating logic, call
// Object::AddDataProperty to handle the error case.
- CHECK(Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
- StoreOrigin::kMaybeKeyed)
- .IsNothing());
+ Maybe<bool> result =
+ Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
+ StoreOrigin::kMaybeKeyed);
+ DCHECK(result.IsNothing());
+ USE(result);
+ return ReadOnlyRoots(isolate).exception();
+}
+
+RUNTIME_FUNCTION(Runtime_AtomicsExchangeSharedStructField) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
+ Handle<Name> field_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
+ Object::ToName(isolate, args.at(1)));
+ Handle<Object> shared_value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, shared_value, Object::Share(isolate, args.at(2), kThrowOnError));
+ // Shared structs are prototypeless.
+ LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
+ if (it.IsFound()) return *it.SwapDataValue(shared_value, kSeqCstAccess);
+ // Shared structs are non-extensible. Instead of duplicating logic, call
+ // Object::AddDataProperty to handle the error case.
+ Maybe<bool> result =
+ Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
+ StoreOrigin::kMaybeKeyed);
+ DCHECK(result.IsNothing());
+ USE(result);
return ReadOnlyRoots(isolate).exception();
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 9c7686a14a..2565be1c32 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -31,17 +31,19 @@ namespace {
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
CodeKind target_kind, ConcurrencyMode mode) {
+ // As a pre- and post-condition of CompileOptimized, the function *must* be
+ // compiled, i.e. the installed Code object must not be CompileLazy.
+ IsCompiledScope is_compiled_scope(function->shared(), isolate);
+ DCHECK(is_compiled_scope.is_compiled());
+
StackLimitCheck check(isolate);
// Concurrent optimization runs on another thread, thus no additional gap.
- const int gap = mode == ConcurrencyMode::kConcurrent
- ? 0
- : kStackSpaceRequiredForCompilation * KB;
+ const int gap =
+ IsConcurrent(mode) ? 0 : kStackSpaceRequiredForCompilation * KB;
if (check.JsHasOverflowed(gap)) return isolate->StackOverflow();
Compiler::CompileOptimized(isolate, function, mode, target_kind);
- // As a post-condition of CompileOptimized, the function *must* be compiled,
- // i.e. the installed Code object must not be the CompileLazy builtin.
DCHECK(function->is_compiled());
return function->code();
}
@@ -57,9 +59,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
#ifdef DEBUG
if (FLAG_trace_lazy && !sfi->is_compiled()) {
- PrintF("[unoptimized: ");
- function->PrintName();
- PrintF("]\n");
+ PrintF("[unoptimized: %s]\n", function->DebugNameCStr().get());
}
#endif
@@ -84,7 +84,6 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
DCHECK(sfi->HasBaselineCode());
IsCompiledScope is_compiled_scope(*sfi, isolate);
DCHECK(!function->HasAvailableOptimizedCode());
- DCHECK(!function->HasOptimizationMarker());
DCHECK(!function->has_feedback_vector());
JSFunction::CreateAndAttachFeedbackVector(isolate, function,
&is_compiled_scope);
@@ -101,12 +100,12 @@ RUNTIME_FUNCTION(Runtime_CompileMaglev_Concurrent) {
ConcurrencyMode::kConcurrent);
}
-RUNTIME_FUNCTION(Runtime_CompileMaglev_NotConcurrent) {
+RUNTIME_FUNCTION(Runtime_CompileMaglev_Synchronous) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
return CompileOptimized(isolate, function, CodeKind::MAGLEV,
- ConcurrencyMode::kNotConcurrent);
+ ConcurrencyMode::kSynchronous);
}
RUNTIME_FUNCTION(Runtime_CompileTurbofan_Concurrent) {
@@ -117,12 +116,12 @@ RUNTIME_FUNCTION(Runtime_CompileTurbofan_Concurrent) {
ConcurrencyMode::kConcurrent);
}
-RUNTIME_FUNCTION(Runtime_CompileTurbofan_NotConcurrent) {
+RUNTIME_FUNCTION(Runtime_CompileTurbofan_Synchronous) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
return CompileOptimized(isolate, function, CodeKind::TURBOFAN,
- ConcurrencyMode::kNotConcurrent);
+ ConcurrencyMode::kSynchronous);
}
RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
@@ -178,7 +177,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
DCHECK_EQ(0, args.length());
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
DCHECK(CodeKindCanDeoptimize(deoptimizer->compiled_code()->kind()));
- DCHECK(deoptimizer->compiled_code()->is_turbofanned());
DCHECK(AllowGarbageCollection::IsAllowed());
DCHECK(isolate->context().is_null());
@@ -203,8 +201,8 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
JavaScriptFrame* top_frame = top_it.frame();
isolate->set_context(Context::cast(top_frame->context()));
- // Invalidate the underlying optimized code on eager and soft deopts.
- if (type == DeoptimizeKind::kEager || type == DeoptimizeKind::kSoft) {
+ // Invalidate the underlying optimized code on eager deopts.
+ if (type == DeoptimizeKind::kEager) {
Deoptimizer::DeoptimizeFunction(*function, *optimized_code);
}
@@ -228,157 +226,121 @@ RUNTIME_FUNCTION(Runtime_VerifyType) {
return *obj;
}
-static bool IsSuitableForOnStackReplacement(Isolate* isolate,
- Handle<JSFunction> function) {
- // Don't OSR during serialization.
- if (isolate->serializer_enabled()) return false;
- // Keep track of whether we've succeeded in optimizing.
- if (function->shared().optimization_disabled()) return false;
- // TODO(chromium:1031479): Currently, OSR triggering mechanism is tied to the
- // bytecode array. So, it might be possible to mark closure in one native
- // context and optimize a closure from a different native context. So check if
- // there is a feedback vector before OSRing. We don't expect this to happen
- // often.
- if (!function->has_feedback_vector()) return false;
- // If we are trying to do OSR when there are already optimized
- // activations of the function, it means (a) the function is directly or
- // indirectly recursive and (b) an optimized invocation has been
- // deoptimized so that we are currently in an unoptimized activation.
- // Check for optimized activations of this function.
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == *function) return false;
- }
-
- return true;
-}
-
-namespace {
-
-BytecodeOffset DetermineEntryAndDisarmOSRForUnoptimized(
- JavaScriptFrame* js_frame) {
- UnoptimizedFrame* frame = reinterpret_cast<UnoptimizedFrame*>(js_frame);
+RUNTIME_FUNCTION(Runtime_CompileOptimizedOSR) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(0, args.length());
+ DCHECK(FLAG_use_osr);
- // Note that the bytecode array active on the stack might be different from
- // the one installed on the function (e.g. patched by debugger). This however
- // is fine because we guarantee the layout to be in sync, hence any
- // BytecodeOffset representing the entry point will be valid for any copy of
- // the bytecode.
- Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), frame->isolate());
+ // Determine the frame that triggered the OSR request.
+ JavaScriptFrameIterator it(isolate);
+ UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
DCHECK_IMPLIES(frame->is_interpreted(),
frame->LookupCode().is_interpreter_trampoline_builtin());
DCHECK_IMPLIES(frame->is_baseline(),
frame->LookupCode().kind() == CodeKind::BASELINE);
- DCHECK(frame->is_unoptimized());
DCHECK(frame->function().shared().HasBytecodeArray());
- // Reset the OSR loop nesting depth to disarm back edges.
- bytecode->set_osr_loop_nesting_level(0);
-
- // Return a BytecodeOffset representing the bytecode offset of the back
- // branch.
- return BytecodeOffset(frame->GetBytecodeOffset());
-}
-
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
- HandleScope handle_scope(isolate);
- DCHECK_EQ(0, args.length());
-
- // Only reachable when OST is enabled.
- CHECK(FLAG_use_osr);
-
- // Determine frame triggering OSR request.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- DCHECK(frame->is_unoptimized());
-
- // Determine the entry point for which this OSR request has been fired and
- // also disarm all back edges in the calling code to stop new requests.
- BytecodeOffset osr_offset = DetermineEntryAndDisarmOSRForUnoptimized(frame);
+ // Determine the entry point for which this OSR request has been fired.
+ BytecodeOffset osr_offset = BytecodeOffset(frame->GetBytecodeOffset());
DCHECK(!osr_offset.IsNone());
- MaybeHandle<CodeT> maybe_result;
+ ConcurrencyMode mode =
+ V8_LIKELY(isolate->concurrent_recompilation_enabled() &&
+ FLAG_concurrent_osr)
+ ? ConcurrencyMode::kConcurrent
+ : ConcurrencyMode::kSynchronous;
+
Handle<JSFunction> function(frame->function(), isolate);
- if (IsSuitableForOnStackReplacement(isolate, function)) {
- if (FLAG_trace_osr) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[OSR - Compiling: ");
- function->PrintName(scope.file());
- PrintF(scope.file(), " at OSR bytecode offset %d]\n", osr_offset.ToInt());
+ if (IsConcurrent(mode)) {
+ // The synchronous fallback mechanism triggers if we've already got OSR'd
+ // code for the current function but at a different OSR offset - that may
+ // indicate we're having trouble hitting the correct JumpLoop for code
+ // installation. In this case, fall back to synchronous OSR.
+ base::Optional<BytecodeOffset> cached_osr_offset =
+ function->native_context().osr_code_cache().FirstOsrOffsetFor(
+ function->shared());
+ if (cached_osr_offset.has_value() &&
+ cached_osr_offset.value() != osr_offset) {
+ if (V8_UNLIKELY(FLAG_trace_osr)) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(
+ scope.file(),
+ "[OSR - falling back to synchronous compilation due to mismatched "
+ "cached entry. function: %s, requested: %d, cached: %d]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt(),
+ cached_osr_offset.value().ToInt());
+ }
+ mode = ConcurrencyMode::kSynchronous;
}
- maybe_result =
- Compiler::GetOptimizedCodeForOSR(isolate, function, osr_offset, frame);
}
- // Check whether we ended up with usable optimized code.
Handle<CodeT> result;
- if (maybe_result.ToHandle(&result) &&
- CodeKindIsOptimizedJSFunction(result->kind())) {
- DeoptimizationData data =
- DeoptimizationData::cast(result->deoptimization_data());
-
- if (data.OsrPcOffset().value() >= 0) {
- DCHECK(BytecodeOffset(data.OsrBytecodeOffset().value()) == osr_offset);
- if (FLAG_trace_osr) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(),
- "[OSR - Entry at OSR bytecode offset %d, offset %d in optimized "
- "code]\n",
- osr_offset.ToInt(), data.OsrPcOffset().value());
- }
-
- DCHECK(result->is_turbofanned());
- if (function->feedback_vector().invocation_count() <= 1 &&
- function->HasOptimizationMarker()) {
- // With lazy feedback allocation we may not have feedback for the
- // initial part of the function that was executed before we allocated a
- // feedback vector. Reset any optimization markers for such functions.
- //
- // TODO(mythria): Instead of resetting the optimization marker here we
- // should only mark a function for optimization if it has sufficient
- // feedback. We cannot do this currently since we OSR only after we mark
- // a function for optimization. We should instead change it to be based
- // based on number of ticks.
- DCHECK(!function->IsInOptimizationQueue());
- function->ClearOptimizationMarker();
- }
- // TODO(mythria): Once we have OSR code cache we may not need to mark
- // the function for non-concurrent compilation. We could arm the loops
- // early so the second execution uses the already compiled OSR code and
- // the optimization occurs concurrently off main thread.
- if (!function->HasAvailableOptimizedCode() &&
- function->feedback_vector().invocation_count() > 1) {
- // If we're not already optimized, set to optimize non-concurrently on
- // the next call, otherwise we'd run unoptimized once more and
- // potentially compile for OSR again.
- if (FLAG_trace_osr) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[OSR - Re-marking ");
- function->PrintName(scope.file());
- PrintF(scope.file(), " for non-concurrent optimization]\n");
- }
- function->SetOptimizationMarker(
- OptimizationMarker::kCompileTurbofan_NotConcurrent);
- }
- return *result;
+ if (!Compiler::CompileOptimizedOSR(isolate, function, osr_offset, frame, mode)
+ .ToHandle(&result)) {
+ // An empty result can mean one of two things:
+ // 1) we've started a concurrent compilation job - everything is fine.
+ // 2) synchronous compilation failed for some reason.
+
+ if (!function->HasAttachedOptimizedCode()) {
+ function->set_code(function->shared().GetCode(), kReleaseStore);
}
+
+ return {};
}
- // Failed.
+ DCHECK(!result.is_null());
+ DCHECK(result->is_turbofanned()); // TODO(v8:7700): Support Maglev.
+ DCHECK(CodeKindIsOptimizedJSFunction(result->kind()));
+
+ DeoptimizationData data =
+ DeoptimizationData::cast(result->deoptimization_data());
+ DCHECK_EQ(BytecodeOffset(data.OsrBytecodeOffset().value()), osr_offset);
+ DCHECK_GE(data.OsrPcOffset().value(), 0);
+
if (FLAG_trace_osr) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[OSR - Failed: ");
- function->PrintName(scope.file());
- PrintF(scope.file(), " at OSR bytecode offset %d]\n", osr_offset.ToInt());
+ PrintF(scope.file(),
+ "[OSR - entry. function: %s, osr offset: %d, pc offset: %d]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt(),
+ data.OsrPcOffset().value());
+ }
+
+ if (function->feedback_vector().invocation_count() <= 1 &&
+ !IsNone(function->tiering_state()) &&
+ !IsInProgress(function->tiering_state())) {
+ // With lazy feedback allocation we may not have feedback for the
+ // initial part of the function that was executed before we allocated a
+ // feedback vector. Reset any tiering states for such functions.
+ //
+ // TODO(mythria): Instead of resetting the tiering state here we
+ // should only mark a function for optimization if it has sufficient
+ // feedback. We cannot do this currently since we OSR only after we mark
+ // a function for optimization. We should instead change it to be based
+ // based on number of ticks.
+ function->reset_tiering_state();
}
- if (!function->HasAttachedOptimizedCode()) {
- function->set_code(function->shared().GetCode(), kReleaseStore);
+ // TODO(mythria): Once we have OSR code cache we may not need to mark
+ // the function for non-concurrent compilation. We could arm the loops
+ // early so the second execution uses the already compiled OSR code and
+ // the optimization occurs concurrently off main thread.
+ if (!function->HasAvailableOptimizedCode() &&
+ function->feedback_vector().invocation_count() > 1) {
+ // If we're not already optimized, set to optimize non-concurrently on the
+ // next call, otherwise we'd run unoptimized once more and potentially
+ // compile for OSR again.
+ if (FLAG_trace_osr) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - forcing synchronous optimization on next entry. function: "
+ "%s]\n",
+ function->DebugNameCStr().get());
+ }
+ function->set_tiering_state(TieringState::kRequestTurbofan_Synchronous);
}
- return Object();
+
+ return *result;
}
static Object CompileGlobalEval(Isolate* isolate,
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index b1c1b1c5a8..40a4426863 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -4,6 +4,7 @@
#include <memory>
+#include "src/api/api-inl.h"
#include "src/api/api.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
@@ -296,6 +297,19 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
}
+RUNTIME_FUNCTION(Runtime_ThrowNoAccess) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+
+ // TODO(verwaest): We would like to throw using the calling context instead
+ // of the entered context but we don't currently have access to that.
+ HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ SaveAndSwitchContext save(
+ isolate, impl->LastEnteredOrMicrotaskContext()->native_context());
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(MessageTemplate::kNoAccess));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 9d7cf09241..e4da1da95c 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -606,20 +606,23 @@ MaybeHandle<Object> Runtime::DefineObjectOwnProperty(
if (!success) return MaybeHandle<Object>();
LookupIterator it(isolate, object, lookup_key, LookupIterator::OWN);
- if (it.IsFound() && key->IsSymbol() && Symbol::cast(*key).is_private_name()) {
+ if (key->IsSymbol() && Symbol::cast(*key).is_private_name()) {
Handle<Symbol> private_symbol = Handle<Symbol>::cast(key);
- Handle<Object> name_string(private_symbol->description(), isolate);
- DCHECK(name_string->IsString());
- MessageTemplate message =
- private_symbol->is_private_brand()
- ? MessageTemplate::kInvalidPrivateBrandReinitialization
- : MessageTemplate::kInvalidPrivateFieldReinitialization;
- THROW_NEW_ERROR(isolate, NewTypeError(message, name_string), Object);
+ if (it.IsFound()) {
+ Handle<Object> name_string(private_symbol->description(), isolate);
+ DCHECK(name_string->IsString());
+ MessageTemplate message =
+ private_symbol->is_private_brand()
+ ? MessageTemplate::kInvalidPrivateBrandReinitialization
+ : MessageTemplate::kInvalidPrivateFieldReinitialization;
+ THROW_NEW_ERROR(isolate, NewTypeError(message, name_string), Object);
+ } else {
+ MAYBE_RETURN_NULL(JSReceiver::AddPrivateField(&it, value, should_throw));
+ }
+ } else {
+ MAYBE_RETURN_NULL(JSReceiver::CreateDataProperty(&it, value, should_throw));
}
- MAYBE_RETURN_NULL(
- Object::SetProperty(&it, value, store_origin, should_throw));
-
return value;
}
@@ -1334,7 +1337,7 @@ void CheckExcludedPropertiesAreOnCallerStack(Isolate* isolate, Address base,
RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedPropertiesOnStack) {
HandleScope scope(isolate);
- DCHECK_LE(3, args.length());
+ DCHECK_EQ(3, args.length());
Handle<Object> source = args.at(0);
int excluded_property_count = args.smi_value_at(1);
// The excluded_property_base is passed as a raw stack pointer. This is safe
diff --git a/deps/v8/src/runtime/runtime-shadow-realm.cc b/deps/v8/src/runtime/runtime-shadow-realm.cc
new file mode 100644
index 0000000000..a515eb0102
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-shadow-realm.cc
@@ -0,0 +1,22 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/arguments-inl.h"
+#include "src/objects/js-function.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_ShadowRealmWrappedFunctionCreate) {
+ DCHECK_EQ(2, args.length());
+ HandleScope scope(isolate);
+ Handle<NativeContext> native_context = args.at<NativeContext>(0);
+ Handle<JSReceiver> value = args.at<JSReceiver>(1);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSWrappedFunction::Create(isolate, native_context, value));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index a351e85e93..fcaada5711 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -262,7 +262,7 @@ bool CanOptimizeFunction<CodeKind::TURBOFAN>(
if (function->HasAvailableOptimizedCode() ||
function->HasAvailableCodeKind(kind)) {
DCHECK(function->HasAttachedOptimizedCode() ||
- function->ChecksOptimizationMarker());
+ function->ChecksTieringState());
if (FLAG_testing_d8_test_runner) {
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
@@ -305,7 +305,7 @@ Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate) {
return ReadOnlyRoots(isolate).undefined_value();
}
- ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
+ ConcurrencyMode concurrency_mode = ConcurrencyMode::kSynchronous;
if (args.length() == 2) {
Handle<Object> type = args.at(1);
if (!type->IsString()) return CrashUnlessFuzzing(isolate);
@@ -450,7 +450,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeMaglevOnNextCall) {
DCHECK(function->is_compiled());
// TODO(v8:7700): Support concurrent compiles.
- const ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
+ const ConcurrencyMode concurrency_mode = ConcurrencyMode::kSynchronous;
TraceManualRecompile(*function, kCodeKind, concurrency_mode);
JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
@@ -521,6 +521,47 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
return ReadOnlyRoots(isolate).undefined_value();
}
+namespace {
+
+void FinalizeOptimization(Isolate* isolate) {
+ DCHECK(isolate->concurrent_recompilation_enabled());
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->optimizing_compile_dispatcher()->set_finalize(true);
+}
+
+BytecodeOffset OffsetOfNextJumpLoop(Isolate* isolate, UnoptimizedFrame* frame) {
+ Handle<BytecodeArray> bytecode_array(frame->GetBytecodeArray(), isolate);
+ const int current_offset = frame->GetBytecodeOffset();
+
+ interpreter::BytecodeArrayIterator it(bytecode_array, current_offset);
+
+ // First, look for a loop that contains the current bytecode offset.
+ for (; !it.done(); it.Advance()) {
+ if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop) {
+ continue;
+ }
+ if (!base::IsInRange(current_offset, it.GetJumpTargetOffset(),
+ it.current_offset())) {
+ continue;
+ }
+
+ return BytecodeOffset(it.current_offset());
+ }
+
+ // Fall back to any loop after the current offset.
+ it.SetOffset(current_offset);
+ for (; !it.done(); it.Advance()) {
+ if (it.current_bytecode() == interpreter::Bytecode::kJumpLoop) {
+ return BytecodeOffset(it.current_offset());
+ }
+ }
+
+ return BytecodeOffset::None();
+}
+
+} // namespace
+
RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 0 || args.length() == 1);
@@ -540,7 +581,9 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
if (!it.done()) function = handle(it.frame()->function(), isolate);
if (function.is_null()) return CrashUnlessFuzzing(isolate);
- if (!FLAG_opt) return ReadOnlyRoots(isolate).undefined_value();
+ if (V8_UNLIKELY(!FLAG_opt) || V8_UNLIKELY(!FLAG_use_osr)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
if (!function->shared().allows_lazy_compilation()) {
return CrashUnlessFuzzing(isolate);
@@ -558,7 +601,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
if (function->HasAvailableOptimizedCode()) {
DCHECK(function->HasAttachedOptimizedCode() ||
- function->ChecksOptimizationMarker());
+ function->ChecksTieringState());
// If function is already optimized, remove the bytecode array from the
// pending optimize for test table and return.
if (FLAG_testing_d8_test_runner) {
@@ -567,6 +610,11 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
return ReadOnlyRoots(isolate).undefined_value();
}
+ if (!it.frame()->is_unoptimized()) {
+ // Nothing to be done.
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
// Ensure that the function is marked for non-concurrent optimization, so that
// subsequent runs don't also optimize.
if (FLAG_trace_osr) {
@@ -579,13 +627,42 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
function->shared().is_compiled_scope(isolate));
JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
function->MarkForOptimization(isolate, CodeKind::TURBOFAN,
- ConcurrencyMode::kNotConcurrent);
+ ConcurrencyMode::kSynchronous);
+
+ isolate->tiering_manager()->RequestOsrAtNextOpportunity(*function);
+
+ // If concurrent OSR is enabled, the testing workflow is a bit tricky. We
+ // must guarantee that the next JumpLoop installs the finished OSR'd code
+ // object, but we still want to exercise concurrent code paths. To do so,
+ // we attempt to find the next JumpLoop, start an OSR job for it now, and
+ // immediately force finalization.
+ // If this succeeds and we correctly match up the next JumpLoop, once we
+ // reach the JumpLoop we'll hit the OSR cache and install the generated code.
+ // If not (e.g. because we enter a nested loop first), the next JumpLoop will
+ // see the cached OSR code with a mismatched offset, and trigger
+ // non-concurrent OSR compilation and installation.
+ if (isolate->concurrent_recompilation_enabled() && FLAG_concurrent_osr) {
+ const BytecodeOffset osr_offset =
+ OffsetOfNextJumpLoop(isolate, UnoptimizedFrame::cast(it.frame()));
+ if (osr_offset.IsNone()) {
+ // The loop may have been elided by bytecode generation (e.g. for
+ // patterns such as `do { ... } while (false);`.
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ // Finalize first to ensure all pending tasks are done (since we can't
+ // queue more than one OSR job for each function).
+ FinalizeOptimization(isolate);
+
+ // Queue the job.
+ auto unused_result = Compiler::CompileOptimizedOSR(
+ isolate, function, osr_offset, UnoptimizedFrame::cast(it.frame()),
+ ConcurrencyMode::kConcurrent);
+ USE(unused_result);
- // Make the profiler arm all back edges in unoptimized code.
- if (it.frame()->is_unoptimized()) {
- isolate->tiering_manager()->AttemptOnStackReplacement(
- UnoptimizedFrame::cast(it.frame()),
- AbstractCode::kMaxLoopNestingMarker);
+ // Finalize again to finish the queued job. The next call into
+ // Runtime::kCompileOptimizedOSR will pick up the cached Code object.
+ FinalizeOptimization(isolate);
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -640,6 +717,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
+
int status = 0;
if (FLAG_lite_mode || FLAG_jitless) {
// Both jitless and lite modes cannot optimize. Unit tests should handle
@@ -659,16 +737,26 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
Handle<Object> function_object = args.at(0);
if (function_object->IsUndefined()) return Smi::FromInt(status);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
+
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
status |= static_cast<int>(OptimizationStatus::kIsFunction);
- if (function->IsMarkedForOptimization()) {
- status |= static_cast<int>(OptimizationStatus::kMarkedForOptimization);
- } else if (function->IsMarkedForConcurrentOptimization()) {
- status |=
- static_cast<int>(OptimizationStatus::kMarkedForConcurrentOptimization);
- } else if (function->IsInOptimizationQueue()) {
- status |= static_cast<int>(OptimizationStatus::kOptimizingConcurrently);
+ switch (function->tiering_state()) {
+ case TieringState::kRequestTurbofan_Synchronous:
+ status |= static_cast<int>(OptimizationStatus::kMarkedForOptimization);
+ break;
+ case TieringState::kRequestTurbofan_Concurrent:
+ status |= static_cast<int>(
+ OptimizationStatus::kMarkedForConcurrentOptimization);
+ break;
+ case TieringState::kInProgress:
+ status |= static_cast<int>(OptimizationStatus::kOptimizingConcurrently);
+ break;
+ case TieringState::kNone:
+ case TieringState::kRequestMaglev_Synchronous:
+ case TieringState::kRequestMaglev_Concurrent:
+ // TODO(v8:7700): Maglev support.
+ break;
}
if (function->HasAttachedOptimizedCode()) {
@@ -678,7 +766,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
} else {
status |= static_cast<int>(OptimizationStatus::kOptimized);
}
- if (code.is_turbofanned()) {
+ if (code.is_maglevved()) {
+ status |= static_cast<int>(OptimizationStatus::kMaglevved);
+ } else if (code.is_turbofanned()) {
status |= static_cast<int>(OptimizationStatus::kTurboFanned);
}
}
@@ -738,9 +828,7 @@ RUNTIME_FUNCTION(Runtime_WaitForBackgroundOptimization) {
RUNTIME_FUNCTION(Runtime_FinalizeOptimization) {
DCHECK_EQ(0, args.length());
if (isolate->concurrent_recompilation_enabled()) {
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ FinalizeOptimization(isolate);
}
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1106,6 +1194,9 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
// Get the function and make sure it is compiled.
Handle<JSFunction> func = args.at<JSFunction>(0);
IsCompiledScope is_compiled_scope;
+ if (!func->is_compiled() && func->HasAvailableOptimizedCode()) {
+ func->set_code(func->feedback_vector().optimized_code());
+ }
CHECK(func->is_compiled() ||
Compiler::Compile(isolate, func, Compiler::KEEP_EXCEPTION,
&is_compiled_scope));
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index e7c695947e..a6712673c0 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -676,7 +676,7 @@ RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
UPDATE_WRITE_BARRIER);
}
} else {
- int element_size_bytes = element_type.element_size_bytes();
+ int element_size_bytes = element_type.value_kind_size();
void* dst = ArrayElementAddress(dst_array, dst_index, element_size_bytes);
void* src = ArrayElementAddress(src_array, src_index, element_size_bytes);
size_t copy_size = length * element_size_bytes;
@@ -791,25 +791,7 @@ RUNTIME_FUNCTION(Runtime_WasmCreateResumePromise) {
Handle<Object> promise = args.at(0);
Handle<WasmSuspenderObject> suspender = args.at<WasmSuspenderObject>(1);
- // Instantiate onFulfilled callback.
- Handle<WasmOnFulfilledData> function_data =
- isolate->factory()->NewWasmOnFulfilledData(suspender);
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfoForWasmOnFulfilled(
- function_data);
- Handle<WasmInstanceObject> instance(
- GetWasmInstanceOnStackTop(isolate,
- {StackFrame::EXIT, StackFrame::WASM_TO_JS}),
- isolate);
- isolate->set_context(instance->native_context());
- Handle<Context> context(isolate->native_context());
- Handle<Map> function_map = isolate->strict_function_map();
- Handle<JSObject> on_fulfilled =
- Factory::JSFunctionBuilder{isolate, shared, context}
- .set_map(function_map)
- .Build();
-
- i::Handle<i::Object> argv[] = {on_fulfilled};
+ i::Handle<i::Object> argv[] = {handle(suspender->resume(), isolate)};
i::Handle<i::Object> result;
bool has_pending_exception =
!i::Execution::CallBuiltin(isolate, isolate->promise_then(), promise,
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 9554a167d8..a140f9b526 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -67,7 +67,8 @@ namespace internal {
F(AtomicsXor, 3, 1) \
F(SetAllowAtomicsWait, 1, 1) \
F(AtomicsLoadSharedStructField, 2, 1) \
- F(AtomicsStoreSharedStructField, 3, 1)
+ F(AtomicsStoreSharedStructField, 3, 1) \
+ F(AtomicsExchangeSharedStructField, 3, 1)
#define FOR_EACH_INTRINSIC_BIGINT(F, I) \
F(BigIntBinaryOp, 3, 1) \
@@ -106,13 +107,13 @@ namespace internal {
F(WeakCollectionSet, 4, 1)
#define FOR_EACH_INTRINSIC_COMPILER(F, I) \
- F(CompileForOnStackReplacement, 0, 1) \
+ F(CompileOptimizedOSR, 0, 1) \
F(CompileLazy, 1, 1) \
F(CompileBaseline, 1, 1) \
F(CompileMaglev_Concurrent, 1, 1) \
- F(CompileMaglev_NotConcurrent, 1, 1) \
+ F(CompileMaglev_Synchronous, 1, 1) \
F(CompileTurbofan_Concurrent, 1, 1) \
- F(CompileTurbofan_NotConcurrent, 1, 1) \
+ F(CompileTurbofan_Synchronous, 1, 1) \
F(InstallBaselineCode, 1, 1) \
F(HealOptimizedCodeSlot, 1, 1) \
F(InstantiateAsmJs, 4, 1) \
@@ -251,6 +252,7 @@ namespace internal {
F(ThrowIteratorError, 1, 1) \
F(ThrowSpreadArgError, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowNoAccess, 0, 1) \
F(ThrowNotConstructor, 1, 1) \
F(ThrowPatternAssignmentNonCoercible, 1, 1) \
F(ThrowRangeError, -1 /* >= 1 */, 1) \
@@ -437,6 +439,9 @@ namespace internal {
F(StoreLookupSlot_Strict, 2, 1) \
F(ThrowConstAssignError, 0, 1)
+#define FOR_EACH_INTRINSIC_SHADOW_REALM(F, I) \
+ F(ShadowRealmWrappedFunctionCreate, 2, 1)
+
#define FOR_EACH_INTRINSIC_STRINGS(F, I) \
F(FlattenString, 1, 1) \
F(GetSubstitution, 5, 1) \
@@ -694,6 +699,7 @@ namespace internal {
FOR_EACH_INTRINSIC_PROXY(F, I) \
FOR_EACH_INTRINSIC_REGEXP(F, I) \
FOR_EACH_INTRINSIC_SCOPES(F, I) \
+ FOR_EACH_INTRINSIC_SHADOW_REALM(F, I) \
FOR_EACH_INTRINSIC_STRINGS(F, I) \
FOR_EACH_INTRINSIC_SYMBOL(F, I) \
FOR_EACH_INTRINSIC_TEST(F, I) \
@@ -889,18 +895,19 @@ enum class OptimizationStatus {
kAlwaysOptimize = 1 << 2,
kMaybeDeopted = 1 << 3,
kOptimized = 1 << 4,
- kTurboFanned = 1 << 5,
- kInterpreted = 1 << 6,
- kMarkedForOptimization = 1 << 7,
- kMarkedForConcurrentOptimization = 1 << 8,
- kOptimizingConcurrently = 1 << 9,
- kIsExecuting = 1 << 10,
- kTopmostFrameIsTurboFanned = 1 << 11,
- kLiteMode = 1 << 12,
- kMarkedForDeoptimization = 1 << 13,
- kBaseline = 1 << 14,
- kTopmostFrameIsInterpreted = 1 << 15,
- kTopmostFrameIsBaseline = 1 << 16,
+ kMaglevved = 1 << 5,
+ kTurboFanned = 1 << 6,
+ kInterpreted = 1 << 7,
+ kMarkedForOptimization = 1 << 8,
+ kMarkedForConcurrentOptimization = 1 << 9,
+ kOptimizingConcurrently = 1 << 10,
+ kIsExecuting = 1 << 11,
+ kTopmostFrameIsTurboFanned = 1 << 12,
+ kLiteMode = 1 << 13,
+ kMarkedForDeoptimization = 1 << 14,
+ kBaseline = 1 << 15,
+ kTopmostFrameIsInterpreted = 1 << 16,
+ kTopmostFrameIsBaseline = 1 << 17,
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 0a05c74d67..618cf9e975 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -109,13 +109,14 @@ AlignedCachedData* CodeSerializer::SerializeSharedFunctionInfo(
return data.GetScriptData();
}
-bool CodeSerializer::SerializeReadOnlyObject(Handle<HeapObject> obj) {
- if (!ReadOnlyHeap::Contains(*obj)) return false;
+bool CodeSerializer::SerializeReadOnlyObject(
+ HeapObject obj, const DisallowGarbageCollection& no_gc) {
+ if (!ReadOnlyHeap::Contains(obj)) return false;
// For objects on the read-only heap, never serialize the object, but instead
// create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset.
- Address address = obj->address();
+ Address address = obj.address();
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
@@ -131,77 +132,93 @@ bool CodeSerializer::SerializeReadOnlyObject(Handle<HeapObject> obj) {
}
void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
- if (SerializeHotObject(obj)) return;
-
- if (SerializeRoot(obj)) return;
-
- if (SerializeBackReference(obj)) return;
-
- if (SerializeReadOnlyObject(obj)) return;
-
- CHECK(!obj->IsCode(cage_base()));
-
ReadOnlyRoots roots(isolate());
- if (ElideObject(*obj)) {
- return SerializeObject(roots.undefined_value_handle());
- }
-
- if (obj->IsScript()) {
- Handle<Script> script_obj = Handle<Script>::cast(obj);
- DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
- // We want to differentiate between undefined and uninitialized_symbol for
- // context_data for now. It is hack to allow debugging for scripts that are
- // included as a part of custom snapshot. (see debug::Script::IsEmbedded())
- Object context_data = script_obj->context_data();
- if (context_data != roots.undefined_value() &&
- context_data != roots.uninitialized_symbol()) {
- script_obj->set_context_data(roots.undefined_value());
+ InstanceType instance_type;
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject raw = *obj;
+ if (SerializeHotObject(raw)) return;
+ if (SerializeRoot(raw)) return;
+ if (SerializeBackReference(raw)) return;
+ if (SerializeReadOnlyObject(raw, no_gc)) return;
+
+ instance_type = raw.map().instance_type();
+ CHECK(!InstanceTypeChecker::IsCode(instance_type));
+
+ if (ElideObject(raw)) {
+ AllowGarbageCollection allow_gc;
+ return SerializeObject(roots.undefined_value_handle());
}
- // We don't want to serialize host options to avoid serializing unnecessary
- // object graph.
- FixedArray host_options = script_obj->host_defined_options();
- script_obj->set_host_defined_options(roots.empty_fixed_array());
- SerializeGeneric(obj);
- script_obj->set_host_defined_options(host_options);
- script_obj->set_context_data(context_data);
- return;
}
- if (obj->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> sfi = Handle<SharedFunctionInfo>::cast(obj);
- DCHECK(!sfi->IsApiFunction());
+ if (InstanceTypeChecker::IsScript(instance_type)) {
+ Handle<FixedArray> host_options;
+ Handle<Object> context_data;
+ {
+ DisallowGarbageCollection no_gc;
+ Script script_obj = Script::cast(*obj);
+ DCHECK_NE(script_obj.compilation_type(), Script::COMPILATION_TYPE_EVAL);
+ // We want to differentiate between undefined and uninitialized_symbol for
+ // context_data for now. It is hack to allow debugging for scripts that
+ // are included as a part of custom snapshot. (see
+ // debug::Script::IsEmbedded())
+ Object raw_context_data = script_obj.context_data();
+ if (raw_context_data != roots.undefined_value() &&
+ raw_context_data != roots.uninitialized_symbol()) {
+ script_obj.set_context_data(roots.undefined_value());
+ }
+ context_data = handle(raw_context_data, isolate());
+ // We don't want to serialize host options to avoid serializing
+ // unnecessary object graph.
+ host_options = handle(script_obj.host_defined_options(), isolate());
+ script_obj.set_host_defined_options(roots.empty_fixed_array());
+ }
+ SerializeGeneric(obj);
+ {
+ DisallowGarbageCollection no_gc;
+ Script script_obj = Script::cast(*obj);
+ script_obj.set_host_defined_options(*host_options);
+ script_obj.set_context_data(*context_data);
+ }
+ return;
+ } else if (InstanceTypeChecker::IsSharedFunctionInfo(instance_type)) {
+ Handle<DebugInfo> debug_info;
+ bool restore_bytecode = false;
+ {
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(*obj);
+ DCHECK(!sfi.IsApiFunction());
#if V8_ENABLE_WEBASSEMBLY
- // TODO(7110): Enable serializing of Asm modules once the AsmWasmData
- // is context independent.
- DCHECK(!sfi->HasAsmWasmData());
+ // TODO(7110): Enable serializing of Asm modules once the AsmWasmData
+ // is context independent.
+ DCHECK(!sfi.HasAsmWasmData());
#endif // V8_ENABLE_WEBASSEMBLY
- DebugInfo debug_info;
- BytecodeArray debug_bytecode_array;
- if (sfi->HasDebugInfo()) {
- // Clear debug info.
- debug_info = sfi->GetDebugInfo();
- if (debug_info.HasInstrumentedBytecodeArray()) {
- debug_bytecode_array = debug_info.DebugBytecodeArray();
- sfi->SetActiveBytecodeArray(debug_info.OriginalBytecodeArray());
+ if (sfi.HasDebugInfo()) {
+ // Clear debug info.
+ DebugInfo raw_debug_info = sfi.GetDebugInfo();
+ if (raw_debug_info.HasInstrumentedBytecodeArray()) {
+ restore_bytecode = true;
+ sfi.SetActiveBytecodeArray(raw_debug_info.OriginalBytecodeArray());
+ }
+ sfi.set_script_or_debug_info(raw_debug_info.script(), kReleaseStore);
+ debug_info = handle(raw_debug_info, isolate());
}
- sfi->set_script_or_debug_info(debug_info.script(), kReleaseStore);
+ DCHECK(!sfi.HasDebugInfo());
}
- DCHECK(!sfi->HasDebugInfo());
-
SerializeGeneric(obj);
-
// Restore debug info
if (!debug_info.is_null()) {
- sfi->set_script_or_debug_info(debug_info, kReleaseStore);
- if (!debug_bytecode_array.is_null()) {
- sfi->SetActiveBytecodeArray(debug_bytecode_array);
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(*obj);
+ sfi.set_script_or_debug_info(*debug_info, kReleaseStore);
+ if (restore_bytecode) {
+ sfi.SetActiveBytecodeArray(debug_info->DebugBytecodeArray());
}
}
return;
- }
-
- if (obj->IsUncompiledDataWithoutPreparseDataWithJob()) {
+ } else if (InstanceTypeChecker::IsUncompiledDataWithoutPreparseDataWithJob(
+ instance_type)) {
Handle<UncompiledDataWithoutPreparseDataWithJob> data =
Handle<UncompiledDataWithoutPreparseDataWithJob>::cast(obj);
Address job = data->job();
@@ -209,8 +226,8 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
SerializeGeneric(data);
data->set_job(job);
return;
- }
- if (obj->IsUncompiledDataWithPreparseDataAndJob()) {
+ } else if (InstanceTypeChecker::IsUncompiledDataWithPreparseDataAndJob(
+ instance_type)) {
Handle<UncompiledDataWithPreparseDataAndJob> data =
Handle<UncompiledDataWithPreparseDataAndJob>::cast(obj);
Address job = data->job();
@@ -233,14 +250,16 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
#endif // V8_TARGET_ARCH_ARM
// Past this point we should not see any (context-specific) maps anymore.
- CHECK(!obj->IsMap());
+ CHECK(!InstanceTypeChecker::IsMap(instance_type));
// There should be no references to the global object embedded.
- CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
+ CHECK(!InstanceTypeChecker::IsJSGlobalProxy(instance_type) &&
+ !InstanceTypeChecker::IsJSGlobalObject(instance_type));
// Embedded FixedArrays that need rehashing must support rehashing.
CHECK_IMPLIES(obj->NeedsRehashing(cage_base()),
obj->CanBeRehashed(cage_base()));
// We expect no instantiated function objects or contexts.
- CHECK(!obj->IsJSFunction() && !obj->IsContext());
+ CHECK(!InstanceTypeChecker::IsJSFunction(instance_type) &&
+ !InstanceTypeChecker::IsContext(instance_type));
SerializeGeneric(obj);
}
@@ -269,7 +288,9 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
SharedFunctionInfo::ScriptIterator iter(isolate, *script);
for (SharedFunctionInfo shared_info = iter.Next(); !shared_info.is_null();
shared_info = iter.Next()) {
- if (!shared_info.HasBytecodeArray()) continue;
+ IsCompiledScope is_compiled(shared_info, isolate);
+ if (!is_compiled.is_compiled()) continue;
+ DCHECK(shared_info.HasBytecodeArray());
Handle<SharedFunctionInfo> info = handle(shared_info, isolate);
Handle<Code> code = isolate->factory()->CopyCode(Handle<Code>::cast(
isolate->factory()->interpreter_entry_trampoline_for_profiling()));
@@ -280,8 +301,12 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
interpreter_data->set_bytecode_array(info->GetBytecodeArray(isolate));
interpreter_data->set_interpreter_trampoline(ToCodeT(*code));
-
- info->set_interpreter_data(*interpreter_data);
+ if (info->HasBaselineCode()) {
+ FromCodeT(info->baseline_code(kAcquireLoad))
+ .set_bytecode_or_interpreter_data(*interpreter_data);
+ } else {
+ info->set_interpreter_data(*interpreter_data);
+ }
if (!log_code_creation) continue;
Handle<AbstractCode> abstract_code = Handle<AbstractCode>::cast(code);
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 242dee92f9..fa1e85279a 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -104,7 +104,8 @@ class CodeSerializer : public Serializer {
private:
void SerializeObjectImpl(Handle<HeapObject> o) override;
- bool SerializeReadOnlyObject(Handle<HeapObject> obj);
+ bool SerializeReadOnlyObject(HeapObject obj,
+ const DisallowGarbageCollection& no_gc);
DISALLOW_GARBAGE_COLLECTION(no_gc_)
uint32_t source_hash_;
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index 524911893b..98843b641c 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -61,11 +61,13 @@ void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
- // TODO(v8:11111): Support RAB / GSAB.
- CHECK(!buffer->is_resizable());
SharedFlag shared =
bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
- buffer->Setup(shared, ResizableFlag::kNotResizable, bs);
+ DCHECK_IMPLIES(bs, buffer->is_resizable() == bs->is_resizable());
+ ResizableFlag resizable = bs && bs->is_resizable()
+ ? ResizableFlag::kResizable
+ : ResizableFlag::kNotResizable;
+ buffer->Setup(shared, resizable, bs);
}
}
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 4aba58f660..9adaae6eab 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -121,7 +121,7 @@ void ContextSerializer::Serialize(Context* o,
}
void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
- DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
+ DCHECK(!ObjectIsBytecodeHandler(*obj)); // Only referenced in dispatch table.
if (!allow_active_isolate_for_testing()) {
// When serializing a snapshot intended for real use, we should not end up
@@ -132,11 +132,13 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
DCHECK_IMPLIES(obj->IsNativeContext(), *obj == context_);
}
- if (SerializeHotObject(obj)) return;
-
- if (SerializeRoot(obj)) return;
-
- if (SerializeBackReference(obj)) return;
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject raw = *obj;
+ if (SerializeHotObject(raw)) return;
+ if (SerializeRoot(raw)) return;
+ if (SerializeBackReference(raw)) return;
+ }
if (startup_serializer_->SerializeUsingReadOnlyObjectCache(&sink_, obj)) {
return;
@@ -161,30 +163,29 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// Function and object templates are not context specific.
DCHECK(!obj->IsTemplateInfo());
- // Clear literal boilerplates and feedback.
- if (obj->IsFeedbackVector()) {
+ InstanceType instance_type = obj->map().instance_type();
+ if (InstanceTypeChecker::IsFeedbackVector(instance_type)) {
+ // Clear literal boilerplates and feedback.
Handle<FeedbackVector>::cast(obj)->ClearSlots(isolate());
- }
-
- // Clear InterruptBudget when serializing FeedbackCell.
- if (obj->IsFeedbackCell()) {
+ } else if (InstanceTypeChecker::IsFeedbackCell(instance_type)) {
+ // Clear InterruptBudget when serializing FeedbackCell.
Handle<FeedbackCell>::cast(obj)->SetInitialInterruptBudget();
- }
-
- if (SerializeJSObjectWithEmbedderFields(obj)) {
- return;
- }
-
- if (obj->IsJSFunction()) {
- // Unconditionally reset the JSFunction to its SFI's code, since we can't
- // serialize optimized code anyway.
- Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
- closure->ResetIfCodeFlushed();
- if (closure->is_compiled()) {
- if (closure->shared().HasBaselineCode()) {
- closure->shared().FlushBaselineCode();
+ } else if (InstanceTypeChecker::IsJSObject(instance_type)) {
+ if (SerializeJSObjectWithEmbedderFields(Handle<JSObject>::cast(obj))) {
+ return;
+ }
+ if (InstanceTypeChecker::IsJSFunction(instance_type)) {
+ DisallowGarbageCollection no_gc;
+ // Unconditionally reset the JSFunction to its SFI's code, since we can't
+ // serialize optimized code anyway.
+ JSFunction closure = JSFunction::cast(*obj);
+ closure.ResetIfCodeFlushed();
+ if (closure.is_compiled()) {
+ if (closure.shared().HasBaselineCode()) {
+ closure.shared().FlushBaselineCode();
+ }
+ closure.set_code(closure.shared().GetCode(), kReleaseStore);
}
- closure->set_code(closure->shared().GetCode(), kReleaseStore);
}
}
@@ -219,19 +220,18 @@ bool DataIsEmpty(const StartupData& data) { return data.raw_size == 0; }
} // anonymous namespace
bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
- Handle<HeapObject> obj) {
- if (!obj->IsJSObject()) return false;
- Handle<JSObject> js_obj = Handle<JSObject>::cast(obj);
- int embedder_fields_count = js_obj->GetEmbedderFieldCount();
+ Handle<JSObject> obj) {
+ DisallowGarbageCollection no_gc;
+ JSObject js_obj = *obj;
+ int embedder_fields_count = js_obj.GetEmbedderFieldCount();
if (embedder_fields_count == 0) return false;
CHECK_GT(embedder_fields_count, 0);
- DCHECK(!js_obj->NeedsRehashing(cage_base()));
+ DCHECK(!js_obj.NeedsRehashing(cage_base()));
- DisallowGarbageCollection no_gc;
DisallowJavascriptExecution no_js(isolate());
DisallowCompilation no_compile(isolate());
- v8::Local<v8::Object> api_obj = v8::Utils::ToLocal(js_obj);
+ v8::Local<v8::Object> api_obj = v8::Utils::ToLocal(obj);
std::vector<EmbedderDataSlot::RawData> original_embedder_values;
std::vector<StartupData> serialized_data;
@@ -241,7 +241,7 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
// serializer. For aligned pointers, call the serialize callback. Hold
// onto the result.
for (int i = 0; i < embedder_fields_count; i++) {
- EmbedderDataSlot embedder_data_slot(*js_obj, i);
+ EmbedderDataSlot embedder_data_slot(js_obj, i);
original_embedder_values.emplace_back(
embedder_data_slot.load_raw(isolate(), no_gc));
Object object = embedder_data_slot.load_tagged();
@@ -270,13 +270,18 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
// with embedder callbacks.
for (int i = 0; i < embedder_fields_count; i++) {
if (!DataIsEmpty(serialized_data[i])) {
- EmbedderDataSlot(*js_obj, i).store_raw(isolate(), kNullAddress, no_gc);
+ EmbedderDataSlot(js_obj, i).store_raw(isolate(), kNullAddress, no_gc);
}
}
// 3) Serialize the object. References from embedder fields to heap objects or
// smis are serialized regularly.
- ObjectSerializer(this, js_obj, &sink_).Serialize();
+ {
+ AllowGarbageCollection allow_gc;
+ ObjectSerializer(this, obj, &sink_).Serialize();
+ // Reload raw pointer.
+ js_obj = *obj;
+ }
// 4) Obtain back reference for the serialized object.
const SerializerReference* reference =
@@ -290,8 +295,8 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
StartupData data = serialized_data[i];
if (DataIsEmpty(data)) continue;
// Restore original values from cleared fields.
- EmbedderDataSlot(*js_obj, i)
- .store_raw(isolate(), original_embedder_values[i], no_gc);
+ EmbedderDataSlot(js_obj, i).store_raw(isolate(),
+ original_embedder_values[i], no_gc);
embedder_fields_sink_.Put(kNewObject, "embedder field holder");
embedder_fields_sink_.PutInt(reference->back_ref_index(), "BackRefIndex");
embedder_fields_sink_.PutInt(i, "embedder field index");
diff --git a/deps/v8/src/snapshot/context-serializer.h b/deps/v8/src/snapshot/context-serializer.h
index 681c49d6ff..8aa0d67660 100644
--- a/deps/v8/src/snapshot/context-serializer.h
+++ b/deps/v8/src/snapshot/context-serializer.h
@@ -33,7 +33,7 @@ class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
void SerializeObjectImpl(Handle<HeapObject> o) override;
bool ShouldBeInTheStartupObjectCache(HeapObject o);
bool ShouldBeInTheSharedObjectCache(HeapObject o);
- bool SerializeJSObjectWithEmbedderFields(Handle<HeapObject> obj);
+ bool SerializeJSObjectWithEmbedderFields(Handle<JSObject> obj);
void CheckRehashability(HeapObject obj);
StartupSerializer* startup_serializer_;
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 40f1cbdefc..e1383ca2b7 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -33,7 +33,7 @@
#include "src/objects/string.h"
#include "src/roots/roots.h"
#include "src/sandbox/external-pointer.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/snapshot/references.h"
#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/shared-heap-serializer.h"
@@ -560,7 +560,7 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
// fields in the serializer.
- BytecodeArray::cast(raw_obj).set_osr_loop_nesting_level(0);
+ BytecodeArray::cast(raw_obj).reset_osr_urgency();
} else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
@@ -1160,11 +1160,28 @@ int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
return ReadRepeatedObject(slot_accessor, repeats);
}
- case kOffHeapBackingStore: {
+ case kOffHeapBackingStore:
+ case kOffHeapResizableBackingStore: {
int byte_length = source_.GetInt();
- std::unique_ptr<BackingStore> backing_store = BackingStore::Allocate(
- main_thread_isolate(), byte_length, SharedFlag::kNotShared,
- InitializedFlag::kUninitialized);
+ std::unique_ptr<BackingStore> backing_store;
+ if (data == kOffHeapBackingStore) {
+ backing_store = BackingStore::Allocate(
+ main_thread_isolate(), byte_length, SharedFlag::kNotShared,
+ InitializedFlag::kUninitialized);
+ } else {
+ int max_byte_length = source_.GetInt();
+ size_t page_size, initial_pages, max_pages;
+ Maybe<bool> result =
+ JSArrayBuffer::GetResizableBackingStorePageConfiguration(
+ nullptr, byte_length, max_byte_length, kDontThrow, &page_size,
+ &initial_pages, &max_pages);
+ DCHECK(result.FromJust());
+ USE(result);
+ constexpr bool kIsWasmMemory = false;
+ backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
+ main_thread_isolate(), byte_length, max_byte_length, page_size,
+ initial_pages, max_pages, kIsWasmMemory, SharedFlag::kNotShared);
+ }
CHECK_NOT_NULL(backing_store);
source_.CopyRaw(backing_store->buffer_start(), byte_length);
backing_stores_.push_back(std::move(backing_store));
diff --git a/deps/v8/src/snapshot/embedded/embedded-data-inl.h b/deps/v8/src/snapshot/embedded/embedded-data-inl.h
new file mode 100644
index 0000000000..1817ff6287
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/embedded-data-inl.h
@@ -0,0 +1,159 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_EMBEDDED_DATA_INL_H_
+#define V8_SNAPSHOT_EMBEDDED_EMBEDDED_DATA_INL_H_
+
+#include "src/snapshot/embedded/embedded-data.h"
+
+namespace v8 {
+namespace internal {
+
+Address EmbeddedData::InstructionStartOfBuiltin(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ const uint8_t* result = RawCode() + desc.instruction_offset;
+ DCHECK_LT(result, code_ + code_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::InstructionSizeOfBuiltin(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ return desc.instruction_length;
+}
+
+Address EmbeddedData::MetadataStartOfBuiltin(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ const uint8_t* result = RawMetadata() + desc.metadata_offset;
+ DCHECK_LE(desc.metadata_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::MetadataSizeOfBuiltin(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ return desc.metadata_length;
+}
+
+Address EmbeddedData::SafepointTableStartOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ const uint8_t* result = RawMetadata() + desc.metadata_offset;
+ DCHECK_LE(desc.handler_table_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::SafepointTableSizeOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+#if V8_EMBEDDED_CONSTANT_POOL
+ DCHECK_LE(desc.handler_table_offset, desc.constant_pool_offset);
+#else
+ DCHECK_LE(desc.handler_table_offset, desc.code_comments_offset_offset);
+#endif
+ return desc.handler_table_offset;
+}
+
+Address EmbeddedData::HandlerTableStartOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ const uint8_t* result = RawMetadata() + desc.handler_table_offset;
+ DCHECK_LE(desc.handler_table_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::HandlerTableSizeOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+#if V8_EMBEDDED_CONSTANT_POOL
+ DCHECK_LE(desc.handler_table_offset, desc.constant_pool_offset);
+ return desc.constant_pool_offset - desc.handler_table_offset;
+#else
+ DCHECK_LE(desc.handler_table_offset, desc.code_comments_offset_offset);
+ return desc.code_comments_offset_offset - desc.handler_table_offset;
+#endif
+}
+
+Address EmbeddedData::ConstantPoolStartOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+#if V8_EMBEDDED_CONSTANT_POOL
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ const uint8_t* result = RawMetadata() + desc.constant_pool_offset;
+ DCHECK_LE(desc.constant_pool_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+#else
+ return kNullAddress;
+#endif
+}
+
+uint32_t EmbeddedData::ConstantPoolSizeOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+#if V8_EMBEDDED_CONSTANT_POOL
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ DCHECK_LE(desc.constant_pool_offset, desc.code_comments_offset_offset);
+ return desc.code_comments_offset_offset - desc.constant_pool_offset;
+#else
+ return 0;
+#endif
+}
+
+Address EmbeddedData::CodeCommentsStartOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ const uint8_t* result = RawMetadata() + desc.code_comments_offset_offset;
+ DCHECK_LE(desc.code_comments_offset_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::CodeCommentsSizeOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ DCHECK_LE(desc.code_comments_offset_offset,
+ desc.unwinding_info_offset_offset);
+ return desc.unwinding_info_offset_offset - desc.code_comments_offset_offset;
+}
+
+Address EmbeddedData::UnwindingInfoStartOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ const uint8_t* result = RawMetadata() + desc.unwinding_info_offset_offset;
+ DCHECK_LE(desc.unwinding_info_offset_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::UnwindingInfoSizeOf(Builtin builtin) const {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ const struct LayoutDescription& desc = LayoutDescription(builtin);
+ DCHECK_LE(desc.unwinding_info_offset_offset, desc.metadata_length);
+ return desc.metadata_length - desc.unwinding_info_offset_offset;
+}
+
+Address EmbeddedData::InstructionStartOfBytecodeHandlers() const {
+ return InstructionStartOfBuiltin(Builtin::kFirstBytecodeHandler);
+}
+
+Address EmbeddedData::InstructionEndOfBytecodeHandlers() const {
+ STATIC_ASSERT(static_cast<int>(Builtin::kFirstBytecodeHandler) +
+ kNumberOfBytecodeHandlers +
+ 2 * kNumberOfWideBytecodeHandlers ==
+ Builtins::kBuiltinCount);
+ Builtin lastBytecodeHandler = Builtins::FromInt(Builtins::kBuiltinCount - 1);
+ return InstructionStartOfBuiltin(lastBytecodeHandler) +
+ InstructionSizeOfBuiltin(lastBytecodeHandler);
+}
+
+// Padded with kCodeAlignment.
+// TODO(v8:11045): Consider removing code alignment.
+uint32_t EmbeddedData::PaddedInstructionSizeOfBuiltin(Builtin builtin) const {
+ uint32_t size = InstructionSizeOfBuiltin(builtin);
+ CHECK_NE(size, 0);
+ return PadAndAlignCode(size);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_EMBEDDED_DATA_INL_H_
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 6d67c9d311..89ce411d3e 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -8,8 +8,10 @@
#include "src/codegen/callable.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/snapshot/snapshot-utils.h"
#include "src/snapshot/snapshot.h"
+#include "v8-internal.h"
namespace v8 {
namespace internal {
@@ -295,12 +297,26 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
uint32_t metadata_size = static_cast<uint32_t>(code.raw_metadata_size());
DCHECK_EQ(0, raw_code_size % kCodeAlignment);
- const int builtin_index = static_cast<int>(builtin);
- layout_descriptions[builtin_index].instruction_offset = raw_code_size;
- layout_descriptions[builtin_index].instruction_length = instruction_size;
- layout_descriptions[builtin_index].metadata_offset = raw_data_size;
- layout_descriptions[builtin_index].metadata_length = metadata_size;
-
+ {
+ const int builtin_index = static_cast<int>(builtin);
+ struct LayoutDescription& layout_desc =
+ layout_descriptions[builtin_index];
+ layout_desc.instruction_offset = raw_code_size;
+ layout_desc.instruction_length = instruction_size;
+ layout_desc.metadata_offset = raw_data_size;
+ layout_desc.metadata_length = metadata_size;
+
+ layout_desc.handler_table_offset =
+ raw_data_size + static_cast<uint32_t>(code.handler_table_offset());
+#if V8_EMBEDDED_CONSTANT_POOL
+ layout_desc.constant_pool_offset =
+ raw_data_size + static_cast<uint32_t>(code.constant_pool_offset());
+#endif
+ layout_desc.code_comments_offset_offset =
+ raw_data_size + static_cast<uint32_t>(code.code_comments_offset());
+ layout_desc.unwinding_info_offset_offset =
+ raw_data_size + static_cast<uint32_t>(code.unwinding_info_offset());
+ }
// Align the start of each section.
raw_code_size += PadAndAlignCode(instruction_size);
raw_data_size += PadAndAlignData(metadata_size);
@@ -396,50 +412,6 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
return d;
}
-Address EmbeddedData::InstructionStartOfBuiltin(Builtin builtin) const {
- DCHECK(Builtins::IsBuiltinId(builtin));
- const struct LayoutDescription* descs = LayoutDescription();
- const uint8_t* result =
- RawCode() + descs[static_cast<int>(builtin)].instruction_offset;
- DCHECK_LT(result, code_ + code_size_);
- return reinterpret_cast<Address>(result);
-}
-
-uint32_t EmbeddedData::InstructionSizeOfBuiltin(Builtin builtin) const {
- DCHECK(Builtins::IsBuiltinId(builtin));
- const struct LayoutDescription* descs = LayoutDescription();
- return descs[static_cast<int>(builtin)].instruction_length;
-}
-
-Address EmbeddedData::MetadataStartOfBuiltin(Builtin builtin) const {
- DCHECK(Builtins::IsBuiltinId(builtin));
- const struct LayoutDescription* descs = LayoutDescription();
- const uint8_t* result =
- RawMetadata() + descs[static_cast<int>(builtin)].metadata_offset;
- DCHECK_LE(descs[static_cast<int>(builtin)].metadata_offset, data_size_);
- return reinterpret_cast<Address>(result);
-}
-
-uint32_t EmbeddedData::MetadataSizeOfBuiltin(Builtin builtin) const {
- DCHECK(Builtins::IsBuiltinId(builtin));
- const struct LayoutDescription* descs = LayoutDescription();
- return descs[static_cast<int>(builtin)].metadata_length;
-}
-
-Address EmbeddedData::InstructionStartOfBytecodeHandlers() const {
- return InstructionStartOfBuiltin(Builtin::kFirstBytecodeHandler);
-}
-
-Address EmbeddedData::InstructionEndOfBytecodeHandlers() const {
- STATIC_ASSERT(static_cast<int>(Builtin::kFirstBytecodeHandler) +
- kNumberOfBytecodeHandlers +
- 2 * kNumberOfWideBytecodeHandlers ==
- Builtins::kBuiltinCount);
- Builtin lastBytecodeHandler = Builtins::FromInt(Builtins::kBuiltinCount - 1);
- return InstructionStartOfBuiltin(lastBytecodeHandler) +
- InstructionSizeOfBuiltin(lastBytecodeHandler);
-}
-
size_t EmbeddedData::CreateEmbeddedBlobDataHash() const {
STATIC_ASSERT(EmbeddedBlobDataHashOffset() == 0);
STATIC_ASSERT(EmbeddedBlobCodeHashOffset() == EmbeddedBlobDataHashSize());
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.h b/deps/v8/src/snapshot/embedded/embedded-data.h
index ba090062b3..ef43e2089e 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded/embedded-data.h
@@ -127,14 +127,30 @@ class EmbeddedData final {
data_ = nullptr;
}
- Address InstructionStartOfBuiltin(Builtin builtin) const;
- uint32_t InstructionSizeOfBuiltin(Builtin builtin) const;
+ // TODO(ishell): rename XyzOfBuiltin() to XyzOf().
+ inline Address InstructionStartOfBuiltin(Builtin builtin) const;
+ inline uint32_t InstructionSizeOfBuiltin(Builtin builtin) const;
- Address InstructionStartOfBytecodeHandlers() const;
- Address InstructionEndOfBytecodeHandlers() const;
+ inline Address InstructionStartOfBytecodeHandlers() const;
+ inline Address InstructionEndOfBytecodeHandlers() const;
- Address MetadataStartOfBuiltin(Builtin builtin) const;
- uint32_t MetadataSizeOfBuiltin(Builtin builtin) const;
+ inline Address MetadataStartOfBuiltin(Builtin builtin) const;
+ inline uint32_t MetadataSizeOfBuiltin(Builtin builtin) const;
+
+ inline Address SafepointTableStartOf(Builtin builtin) const;
+ inline uint32_t SafepointTableSizeOf(Builtin builtin) const;
+
+ inline Address HandlerTableStartOf(Builtin builtin) const;
+ inline uint32_t HandlerTableSizeOf(Builtin builtin) const;
+
+ inline Address ConstantPoolStartOf(Builtin builtin) const;
+ inline uint32_t ConstantPoolSizeOf(Builtin builtin) const;
+
+ inline Address CodeCommentsStartOf(Builtin builtin) const;
+ inline uint32_t CodeCommentsSizeOf(Builtin builtin) const;
+
+ inline Address UnwindingInfoStartOf(Builtin builtin) const;
+ inline uint32_t UnwindingInfoSizeOf(Builtin builtin) const;
uint32_t AddressForHashing(Address addr) {
DCHECK(IsInCodeRange(addr));
@@ -144,11 +160,7 @@ class EmbeddedData final {
// Padded with kCodeAlignment.
// TODO(v8:11045): Consider removing code alignment.
- uint32_t PaddedInstructionSizeOfBuiltin(Builtin builtin) const {
- uint32_t size = InstructionSizeOfBuiltin(builtin);
- CHECK_NE(size, 0);
- return PadAndAlignCode(size);
- }
+ inline uint32_t PaddedInstructionSizeOfBuiltin(Builtin builtin) const;
size_t CreateEmbeddedBlobDataHash() const;
size_t CreateEmbeddedBlobCodeHash() const;
@@ -173,9 +185,18 @@ class EmbeddedData final {
uint32_t instruction_offset;
uint32_t instruction_length;
// The offset and (unpadded) length of this builtin's metadata area
- // from the start of the embedded code section.
+ // from the start of the embedded data section.
uint32_t metadata_offset;
uint32_t metadata_length;
+
+ // The offsets describing inline metadata tables, relative to the start
+ // of the embedded data section.
+ uint32_t handler_table_offset;
+#if V8_EMBEDDED_CONSTANT_POOL
+ uint32_t constant_pool_offset;
+#endif
+ uint32_t code_comments_offset_offset;
+ uint32_t unwinding_info_offset_offset;
};
STATIC_ASSERT(offsetof(LayoutDescription, instruction_offset) ==
0 * kUInt32Size);
@@ -185,7 +206,23 @@ class EmbeddedData final {
2 * kUInt32Size);
STATIC_ASSERT(offsetof(LayoutDescription, metadata_length) ==
3 * kUInt32Size);
- STATIC_ASSERT(sizeof(LayoutDescription) == 4 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, handler_table_offset) ==
+ 4 * kUInt32Size);
+#if V8_EMBEDDED_CONSTANT_POOL
+ STATIC_ASSERT(offsetof(LayoutDescription, constant_pool_offset) ==
+ 5 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, code_comments_offset_offset) ==
+ 6 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, unwinding_info_offset_offset) ==
+ 7 * kUInt32Size);
+ STATIC_ASSERT(sizeof(LayoutDescription) == 8 * kUInt32Size);
+#else
+ STATIC_ASSERT(offsetof(LayoutDescription, code_comments_offset_offset) ==
+ 5 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, unwinding_info_offset_offset) ==
+ 6 * kUInt32Size);
+ STATIC_ASSERT(sizeof(LayoutDescription) == 7 * kUInt32Size);
+#endif
// The layout of the blob is as follows:
//
@@ -240,9 +277,11 @@ class EmbeddedData final {
const uint8_t* RawCode() const { return code_ + RawCodeOffset(); }
- const LayoutDescription* LayoutDescription() const {
- return reinterpret_cast<const struct LayoutDescription*>(
- data_ + LayoutDescriptionTableOffset());
+ const LayoutDescription& LayoutDescription(Builtin builtin) const {
+ const struct LayoutDescription* descs =
+ reinterpret_cast<const struct LayoutDescription*>(
+ data_ + LayoutDescriptionTableOffset());
+ return descs[static_cast<int>(builtin)];
}
const uint8_t* RawMetadata() const { return data_ + RawMetadataOffset(); }
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index 1a906c5de7..ff77021baa 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -10,6 +10,7 @@
#include "src/codegen/source-position-table.h"
#include "src/flags/flags.h" // For ENABLE_CONTROL_FLOW_INTEGRITY_BOOL
#include "src/objects/code-inl.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
@@ -163,6 +164,7 @@ void EmbeddedFileWriter::WriteCodeSection(PlatformEmbeddedFileWriterBase* w,
++builtin) {
WriteBuiltin(w, blob, builtin);
}
+ w->PaddingAfterCode();
w->Newline();
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index e858da90b5..5c4d1d3bbc 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -29,7 +29,7 @@ const char* DirectiveAsString(DataDirective directive) {
} // namespace
void PlatformEmbeddedFileWriterAIX::SectionText() {
- fprintf(fp_, ".csect [GL], 5\n");
+ fprintf(fp_, ".csect [GL], 6\n");
}
void PlatformEmbeddedFileWriterAIX::SectionData() {
@@ -69,6 +69,11 @@ void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
// On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
STATIC_ASSERT((1 << 6) >= kCodeAlignment);
fprintf(fp_, ".align 6\n");
+#elif V8_TARGET_ARCH_PPC64
+ // 64 byte alignment is needed on ppc64 to make sure p10 prefixed instructions
+ // don't cross 64-byte boundaries.
+ STATIC_ASSERT((1 << 6) >= kCodeAlignment);
+ fprintf(fp_, ".align 6\n");
#else
STATIC_ASSERT((1 << 5) >= kCodeAlignment);
fprintf(fp_, ".align 5\n");
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
index 59eeca63b9..3d50aeba0e 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
@@ -58,6 +58,7 @@ class PlatformEmbeddedFileWriterBase {
virtual void SectionRoData() = 0;
virtual void AlignToCodeAlignment() = 0;
+ virtual void PaddingAfterCode() {}
virtual void AlignToDataAlignment() = 0;
virtual void DeclareUint32(const char* name, uint32_t value) = 0;
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index 641d3638f3..9309dbdd35 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -78,6 +78,11 @@ void PlatformEmbeddedFileWriterGeneric::AlignToCodeAlignment() {
// On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
STATIC_ASSERT(64 >= kCodeAlignment);
fprintf(fp_, ".balign 64\n");
+#elif V8_TARGET_ARCH_PPC64
+ // 64 byte alignment is needed on ppc64 to make sure p10 prefixed instructions
+ // don't cross 64-byte boundaries.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
#else
STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index cfe9bbcde1..76a051b84d 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -64,12 +64,30 @@ void PlatformEmbeddedFileWriterMac::AlignToCodeAlignment() {
// On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
STATIC_ASSERT(64 >= kCodeAlignment);
fprintf(fp_, ".balign 64\n");
+#elif V8_TARGET_ARCH_PPC64
+ // 64 byte alignment is needed on ppc64 to make sure p10 prefixed instructions
+ // don't cross 64-byte boundaries.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#elif V8_TARGET_ARCH_ARM64
+ // ARM64 macOS has a 16kiB page size. Since we want to remap it on the heap,
+ // needs to be page-aligned.
+ fprintf(fp_, ".balign 16384\n");
#else
STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
#endif
}
+void PlatformEmbeddedFileWriterMac::PaddingAfterCode() {
+#if V8_TARGET_ARCH_ARM64
+ // ARM64 macOS has a 16kiB page size. Since we want to remap builtins on the
+ // heap, make sure that the trailing part of the page doesn't contain anything
+ // dangerous.
+ fprintf(fp_, ".balign 16384\n");
+#endif
+}
+
void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
STATIC_ASSERT(8 >= Code::kMetadataAlignment);
fprintf(fp_, ".balign 8\n");
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
index e6d0760033..f66cd41a92 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
@@ -26,6 +26,7 @@ class PlatformEmbeddedFileWriterMac : public PlatformEmbeddedFileWriterBase {
void SectionRoData() override;
void AlignToCodeAlignment() override;
+ void PaddingAfterCode() override;
void AlignToDataAlignment() override;
void DeclareUint32(const char* name, uint32_t value) override;
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index 83b85c8df9..d4232939af 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -11,7 +11,7 @@
#if defined(V8_OS_WIN64)
#include "src/builtins/builtins.h"
#include "src/diagnostics/unwinding-info-win64.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/snapshot/embedded/embedded-file-writer.h"
#endif // V8_OS_WIN64
@@ -641,6 +641,11 @@ void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
// On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
STATIC_ASSERT(64 >= kCodeAlignment);
fprintf(fp_, ".balign 64\n");
+#elif V8_TARGET_ARCH_PPC64
+ // 64 byte alignment is needed on ppc64 to make sure p10 prefixed instructions
+ // don't cross 64-byte boundaries.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
#else
STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index 9c2de5caca..a4ecf344ce 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -37,7 +37,7 @@ void ReadOnlyDeserializer::DeserializeIntoIsolate() {
ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
// Deserialize the Read-only Object Cache.
- for (size_t i = 0;; ++i) {
+ for (;;) {
Object* object = ro_heap->ExtendReadOnlyObjectCache();
// During deserialization, the visitor populates the read-only object
// cache and eventually terminates the cache with undefined.
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index 35a62aa515..d47ff5d5f2 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -41,13 +41,17 @@ void ReadOnlySerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// in the root table, so don't try to serialize a reference and rely on the
// below CHECK(!did_serialize_not_mapped_symbol_) to make sure it doesn't
// serialize twice.
- if (!IsNotMappedSymbol(*obj)) {
- if (SerializeHotObject(obj)) return;
- if (IsRootAndHasBeenSerialized(*obj) && SerializeRoot(obj)) return;
- if (SerializeBackReference(obj)) return;
- }
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject raw = *obj;
+ if (!IsNotMappedSymbol(raw)) {
+ if (SerializeHotObject(raw)) return;
+ if (IsRootAndHasBeenSerialized(raw) && SerializeRoot(raw)) return;
+ if (SerializeBackReference(raw)) return;
+ }
- CheckRehashability(*obj);
+ CheckRehashability(raw);
+ }
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_);
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
index 3b9a7b84ba..7d77a17979 100644
--- a/deps/v8/src/snapshot/roots-serializer.cc
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -27,7 +27,7 @@ RootsSerializer::RootsSerializer(Isolate* isolate,
int RootsSerializer::SerializeInObjectCache(Handle<HeapObject> heap_object) {
int index;
- if (!object_cache_index_map_.LookupOrInsert(heap_object, &index)) {
+ if (!object_cache_index_map_.LookupOrInsert(*heap_object, &index)) {
// This object is not part of the object cache yet. Add it to the cache so
// we can refer to it via cache index from the delegating snapshot.
SerializeObject(heap_object);
diff --git a/deps/v8/src/snapshot/serializer-deserializer.h b/deps/v8/src/snapshot/serializer-deserializer.h
index 626a102704..99871677b0 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.h
+++ b/deps/v8/src/snapshot/serializer-deserializer.h
@@ -35,8 +35,8 @@ class SerializerDeserializer : public RootVisitor {
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
- /* Free range 0x1d..0x1f */ \
- V(0x1d) V(0x1e) V(0x1f) \
+ /* Free range 0x1e..0x1f */ \
+ V(0x1e) V(0x1f) \
/* Free range 0x20..0x2f */ \
V(0x20) V(0x21) V(0x22) V(0x23) V(0x24) V(0x25) V(0x26) V(0x27) \
V(0x28) V(0x29) V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
@@ -83,7 +83,7 @@ class SerializerDeserializer : public RootVisitor {
enum Bytecode : byte {
//
- // ---------- byte code range 0x00..0x1c ----------
+ // ---------- byte code range 0x00..0x1d ----------
//
// 0x00..0x03 Allocate new object, in specified space.
@@ -114,6 +114,7 @@ class SerializerDeserializer : public RootVisitor {
kVariableRepeat,
// Used for embedder-allocated backing stores for TypedArrays.
kOffHeapBackingStore,
+ kOffHeapResizableBackingStore,
// Used for embedder-provided serialization data for embedder fields.
kEmbedderFieldsData,
// Raw data of variable length.
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index dbc8be27b1..d5f18693a9 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -171,32 +171,34 @@ void Serializer::PrintStack(std::ostream& out) {
}
#endif // DEBUG
-bool Serializer::SerializeRoot(Handle<HeapObject> obj) {
+bool Serializer::SerializeRoot(HeapObject obj) {
RootIndex root_index;
// Derived serializers are responsible for determining if the root has
// actually been serialized before calling this.
- if (root_index_map()->Lookup(*obj, &root_index)) {
+ if (root_index_map()->Lookup(obj, &root_index)) {
PutRoot(root_index);
return true;
}
return false;
}
-bool Serializer::SerializeHotObject(Handle<HeapObject> obj) {
+bool Serializer::SerializeHotObject(HeapObject obj) {
+ DisallowGarbageCollection no_gc;
// Encode a reference to a hot object by its index in the working set.
- int index = hot_objects_.Find(*obj);
+ int index = hot_objects_.Find(obj);
if (index == HotObjectsList::kNotFound) return false;
DCHECK(index >= 0 && index < kHotObjectCount);
if (FLAG_trace_serializer) {
PrintF(" Encoding hot object %d:", index);
- obj->ShortPrint();
+ obj.ShortPrint();
PrintF("\n");
}
sink_.Put(HotObject::Encode(index), "HotObject");
return true;
}
-bool Serializer::SerializeBackReference(Handle<HeapObject> obj) {
+bool Serializer::SerializeBackReference(HeapObject obj) {
+ DisallowGarbageCollection no_gc;
const SerializerReference* reference = reference_map_.LookupReference(obj);
if (reference == nullptr) return false;
// Encode the location of an already deserialized object in order to write
@@ -213,7 +215,7 @@ bool Serializer::SerializeBackReference(Handle<HeapObject> obj) {
DCHECK(reference->is_back_reference());
if (FLAG_trace_serializer) {
PrintF(" Encoding back reference to: ");
- obj->ShortPrint();
+ obj.ShortPrint();
PrintF("\n");
}
@@ -223,29 +225,28 @@ bool Serializer::SerializeBackReference(Handle<HeapObject> obj) {
return true;
}
-bool Serializer::SerializePendingObject(Handle<HeapObject> obj) {
+bool Serializer::SerializePendingObject(HeapObject obj) {
PendingObjectReferences* refs_to_object =
forward_refs_per_pending_object_.Find(obj);
if (refs_to_object == nullptr) {
return false;
}
-
PutPendingForwardReference(*refs_to_object);
return true;
}
-bool Serializer::ObjectIsBytecodeHandler(Handle<HeapObject> obj) const {
- if (!obj->IsCode()) return false;
- return (Code::cast(*obj).kind() == CodeKind::BYTECODE_HANDLER);
+bool Serializer::ObjectIsBytecodeHandler(HeapObject obj) const {
+ if (!obj.IsCode()) return false;
+ return (Code::cast(obj).kind() == CodeKind::BYTECODE_HANDLER);
}
void Serializer::PutRoot(RootIndex root) {
+ DisallowGarbageCollection no_gc;
int root_index = static_cast<int>(root);
- Handle<HeapObject> object =
- Handle<HeapObject>::cast(isolate()->root_handle(root));
+ HeapObject object = HeapObject::cast(isolate()->root(root));
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
- object->ShortPrint();
+ object.ShortPrint();
PrintF("\n");
}
@@ -256,12 +257,12 @@ void Serializer::PutRoot(RootIndex root) {
// TODO(ulan): Check that it works with young large objects.
if (root_index < kRootArrayConstantsCount &&
- !Heap::InYoungGeneration(*object)) {
+ !Heap::InYoungGeneration(object)) {
sink_.Put(RootArrayConstant::Encode(root), "RootConstant");
} else {
sink_.Put(kRootArray, "RootSerialization");
sink_.PutInt(root_index, "root_index");
- hot_objects_.Add(*object);
+ hot_objects_.Add(object);
}
}
@@ -280,11 +281,11 @@ void Serializer::PutSmiRoot(FullObjectSlot slot) {
sink_.PutRaw(raw_value_as_bytes, bytes_to_output, "Bytes");
}
-void Serializer::PutBackReference(Handle<HeapObject> object,
+void Serializer::PutBackReference(HeapObject object,
SerializerReference reference) {
- DCHECK_EQ(*object, *back_refs_[reference.back_ref_index()]);
+ DCHECK_EQ(object, *back_refs_[reference.back_ref_index()]);
sink_.PutInt(reference.back_ref_index(), "BackRefIndex");
- hot_objects_.Add(*object);
+ hot_objects_.Add(object);
}
void Serializer::PutAttachedReference(SerializerReference reference) {
@@ -346,8 +347,9 @@ ExternalReferenceEncoder::Value Serializer::EncodeExternalReference(
return result.FromJust();
}
-void Serializer::RegisterObjectIsPending(Handle<HeapObject> obj) {
- if (IsNotMappedSymbol(*obj)) return;
+void Serializer::RegisterObjectIsPending(HeapObject obj) {
+ DisallowGarbageCollection no_gc;
+ if (IsNotMappedSymbol(obj)) return;
// Add the given object to the pending objects -> forward refs map.
auto find_result = forward_refs_per_pending_object_.FindOrInsert(obj);
@@ -358,11 +360,12 @@ void Serializer::RegisterObjectIsPending(Handle<HeapObject> obj) {
// deferred objects queue though, since it may be the very object we just
// popped off that queue, so just check that it can be deferred.
DCHECK_IMPLIES(find_result.already_exists, *find_result.entry != nullptr);
- DCHECK_IMPLIES(find_result.already_exists, CanBeDeferred(*obj));
+ DCHECK_IMPLIES(find_result.already_exists, CanBeDeferred(obj));
}
-void Serializer::ResolvePendingObject(Handle<HeapObject> obj) {
- if (IsNotMappedSymbol(*obj)) return;
+void Serializer::ResolvePendingObject(HeapObject obj) {
+ DisallowGarbageCollection no_gc;
+ if (IsNotMappedSymbol(obj)) return;
std::vector<int>* refs;
CHECK(forward_refs_per_pending_object_.Delete(obj, &refs));
@@ -427,7 +430,7 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
// Until the space for the object is allocated, it is considered "pending".
- serializer_->RegisterObjectIsPending(object_);
+ serializer_->RegisterObjectIsPending(*object_);
// Serialize map (first word of the object) before anything else, so that
// the deserializer can access it when allocating. Make sure that the map
@@ -444,7 +447,7 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
// Now that the object is allocated, we can resolve pending references to
// it.
- serializer_->ResolvePendingObject(object_);
+ serializer_->ResolvePendingObject(*object_);
}
if (FLAG_serialization_statistics) {
@@ -474,79 +477,111 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
}
uint32_t Serializer::ObjectSerializer::SerializeBackingStore(
- void* backing_store, int32_t byte_length) {
+ void* backing_store, int32_t byte_length, Maybe<int32_t> max_byte_length) {
+ DisallowGarbageCollection no_gc;
const SerializerReference* reference_ptr =
serializer_->reference_map()->LookupBackingStore(backing_store);
// Serialize the off-heap backing store.
- if (!reference_ptr) {
- sink_->Put(kOffHeapBackingStore, "Off-heap backing store");
- sink_->PutInt(byte_length, "length");
- sink_->PutRaw(static_cast<byte*>(backing_store), byte_length,
- "BackingStore");
- DCHECK_NE(0, serializer_->seen_backing_stores_index_);
- SerializerReference reference =
- SerializerReference::OffHeapBackingStoreReference(
- serializer_->seen_backing_stores_index_++);
- // Mark this backing store as already serialized.
- serializer_->reference_map()->AddBackingStore(backing_store, reference);
- return reference.off_heap_backing_store_index();
- } else {
+ if (reference_ptr) {
return reference_ptr->off_heap_backing_store_index();
}
+ if (max_byte_length.IsJust()) {
+ sink_->Put(kOffHeapResizableBackingStore,
+ "Off-heap resizable backing store");
+ } else {
+ sink_->Put(kOffHeapBackingStore, "Off-heap backing store");
+ }
+ sink_->PutInt(byte_length, "length");
+ if (max_byte_length.IsJust()) {
+ sink_->PutInt(max_byte_length.FromJust(), "max length");
+ }
+ sink_->PutRaw(static_cast<byte*>(backing_store), byte_length, "BackingStore");
+ DCHECK_NE(0, serializer_->seen_backing_stores_index_);
+ SerializerReference reference =
+ SerializerReference::OffHeapBackingStoreReference(
+ serializer_->seen_backing_stores_index_++);
+ // Mark this backing store as already serialized.
+ serializer_->reference_map()->AddBackingStore(backing_store, reference);
+ return reference.off_heap_backing_store_index();
}
void Serializer::ObjectSerializer::SerializeJSTypedArray() {
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object_);
- if (typed_array->is_on_heap()) {
- typed_array->RemoveExternalPointerCompensationForSerialization(isolate());
- } else {
- if (!typed_array->WasDetached()) {
- // Explicitly serialize the backing store now.
- JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array->buffer());
- // We cannot store byte_length larger than int32 range in the snapshot.
- CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
- int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
- size_t byte_offset = typed_array->byte_offset();
-
- // We need to calculate the backing store from the data pointer
- // because the ArrayBuffer may already have been serialized.
- void* backing_store = reinterpret_cast<void*>(
- reinterpret_cast<Address>(typed_array->DataPtr()) - byte_offset);
-
- uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- typed_array->SetExternalBackingStoreRefForSerialization(ref);
+ {
+ DisallowGarbageCollection no_gc;
+ JSTypedArray typed_array = JSTypedArray::cast(*object_);
+ if (typed_array.is_on_heap()) {
+ typed_array.RemoveExternalPointerCompensationForSerialization(isolate());
} else {
- typed_array->SetExternalBackingStoreRefForSerialization(0);
+ if (!typed_array.WasDetached()) {
+ // Explicitly serialize the backing store now.
+ JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array.buffer());
+ // We cannot store byte_length or max_byte_length larger than int32
+ // range in the snapshot.
+ CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
+ int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
+ Maybe<int32_t> max_byte_length = Nothing<int32_t>();
+ if (buffer.is_resizable()) {
+ CHECK_LE(buffer.max_byte_length(),
+ std::numeric_limits<int32_t>::max());
+ max_byte_length =
+ Just(static_cast<int32_t>(buffer.max_byte_length()));
+ }
+ size_t byte_offset = typed_array.byte_offset();
+
+ // We need to calculate the backing store from the data pointer
+ // because the ArrayBuffer may already have been serialized.
+ void* backing_store = reinterpret_cast<void*>(
+ reinterpret_cast<Address>(typed_array.DataPtr()) - byte_offset);
+
+ uint32_t ref =
+ SerializeBackingStore(backing_store, byte_length, max_byte_length);
+ typed_array.SetExternalBackingStoreRefForSerialization(ref);
+ } else {
+ typed_array.SetExternalBackingStoreRefForSerialization(0);
+ }
}
}
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
- Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(object_);
- void* backing_store = buffer->backing_store();
- // We cannot store byte_length larger than int32 range in the snapshot.
- CHECK_LE(buffer->byte_length(), std::numeric_limits<int32_t>::max());
- int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
- ArrayBufferExtension* extension = buffer->extension();
-
- // Only serialize non-empty backing stores.
- if (buffer->IsEmpty()) {
- buffer->SetBackingStoreRefForSerialization(kEmptyBackingStoreRefSentinel);
- } else {
- uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- buffer->SetBackingStoreRefForSerialization(ref);
+ ArrayBufferExtension* extension;
+ void* backing_store;
+ {
+ DisallowGarbageCollection no_gc;
+ JSArrayBuffer buffer = JSArrayBuffer::cast(*object_);
+ backing_store = buffer.backing_store();
+ // We cannot store byte_length or max_byte_length larger than int32 range in
+ // the snapshot.
+ CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
+ int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
+ Maybe<int32_t> max_byte_length = Nothing<int32_t>();
+ if (buffer.is_resizable()) {
+ CHECK_LE(buffer.max_byte_length(), std::numeric_limits<int32_t>::max());
+ max_byte_length = Just(static_cast<int32_t>(buffer.max_byte_length()));
+ }
+ extension = buffer.extension();
- // Ensure deterministic output by setting extension to null during
- // serialization.
- buffer->set_extension(nullptr);
- }
+ // Only serialize non-empty backing stores.
+ if (buffer.IsEmpty()) {
+ buffer.SetBackingStoreRefForSerialization(kEmptyBackingStoreRefSentinel);
+ } else {
+ uint32_t ref =
+ SerializeBackingStore(backing_store, byte_length, max_byte_length);
+ buffer.SetBackingStoreRefForSerialization(ref);
+ // Ensure deterministic output by setting extension to null during
+ // serialization.
+ buffer.set_extension(nullptr);
+ }
+ }
SerializeObject();
-
- buffer->set_backing_store(isolate(), backing_store);
- buffer->set_extension(extension);
+ {
+ JSArrayBuffer buffer = JSArrayBuffer::cast(*object_);
+ buffer.set_backing_store(isolate(), backing_store);
+ buffer.set_extension(extension);
+ }
}
void Serializer::ObjectSerializer::SerializeExternalString() {
@@ -629,67 +664,72 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// maybe left-over bytes that need to be padded.
int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
- for (int i = 0; i < padding_size; i++)
+ for (int i = 0; i < padding_size; i++) {
sink_->Put(static_cast<byte>(0), "StringPadding");
+ }
}
// Clear and later restore the next link in the weak cell or allocation site.
// TODO(all): replace this with proper iteration of weak slots in serializer.
class V8_NODISCARD UnlinkWeakNextScope {
public:
- explicit UnlinkWeakNextScope(Heap* heap, Handle<HeapObject> object) {
+ explicit UnlinkWeakNextScope(Heap* heap, HeapObject object) {
Isolate* isolate = heap->isolate();
- if (object->IsAllocationSite(isolate) &&
- Handle<AllocationSite>::cast(object)->HasWeakNext()) {
+ if (object.IsAllocationSite(isolate) &&
+ AllocationSite::cast(object).HasWeakNext()) {
object_ = object;
- next_ = handle(AllocationSite::cast(*object).weak_next(), isolate);
- Handle<AllocationSite>::cast(object)->set_weak_next(
+ next_ = AllocationSite::cast(object).weak_next();
+ AllocationSite::cast(object).set_weak_next(
ReadOnlyRoots(isolate).undefined_value());
}
}
~UnlinkWeakNextScope() {
- if (!object_.is_null()) {
- Handle<AllocationSite>::cast(object_)->set_weak_next(
- *next_, UPDATE_WEAK_WRITE_BARRIER);
- }
+ if (next_ == Smi::zero()) return;
+ AllocationSite::cast(object_).set_weak_next(next_,
+ UPDATE_WEAK_WRITE_BARRIER);
}
private:
- Handle<HeapObject> object_;
- Handle<Object> next_;
+ HeapObject object_;
+ Object next_ = Smi::zero();
DISALLOW_GARBAGE_COLLECTION(no_gc_)
};
void Serializer::ObjectSerializer::Serialize() {
RecursionScope recursion(serializer_);
- // Defer objects as "pending" if they cannot be serialized now, or if we
- // exceed a certain recursion depth. Some objects cannot be deferred.
- if ((recursion.ExceedsMaximum() && CanBeDeferred(*object_)) ||
- serializer_->MustBeDeferred(*object_)) {
- DCHECK(CanBeDeferred(*object_));
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject raw = *object_;
+ // Defer objects as "pending" if they cannot be serialized now, or if we
+ // exceed a certain recursion depth. Some objects cannot be deferred.
+ if ((recursion.ExceedsMaximum() && CanBeDeferred(raw)) ||
+ serializer_->MustBeDeferred(raw)) {
+ DCHECK(CanBeDeferred(raw));
+ if (FLAG_trace_serializer) {
+ PrintF(" Deferring heap object: ");
+ object_->ShortPrint();
+ PrintF("\n");
+ }
+ // Deferred objects are considered "pending".
+ serializer_->RegisterObjectIsPending(raw);
+ serializer_->PutPendingForwardReference(
+ *serializer_->forward_refs_per_pending_object_.Find(raw));
+ serializer_->QueueDeferredObject(raw);
+ return;
+ }
+
if (FLAG_trace_serializer) {
- PrintF(" Deferring heap object: ");
+ PrintF(" Encoding heap object: ");
object_->ShortPrint();
PrintF("\n");
}
- // Deferred objects are considered "pending".
- serializer_->RegisterObjectIsPending(object_);
- serializer_->PutPendingForwardReference(
- *serializer_->forward_refs_per_pending_object_.Find(object_));
- serializer_->QueueDeferredObject(object_);
- return;
- }
-
- if (FLAG_trace_serializer) {
- PrintF(" Encoding heap object: ");
- object_->ShortPrint();
- PrintF("\n");
}
PtrComprCageBase cage_base(isolate());
- if (object_->IsExternalString(cage_base)) {
+ InstanceType instance_type = object_->map(cage_base).instance_type();
+ if (InstanceTypeChecker::IsExternalString(instance_type)) {
SerializeExternalString();
return;
} else if (!ReadOnlyHeap::Contains(*object_)) {
@@ -703,43 +743,41 @@ void Serializer::ObjectSerializer::Serialize() {
Handle<SeqTwoByteString>::cast(object_)->clear_padding();
}
}
- if (object_->IsJSTypedArray(cage_base)) {
+ if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
SerializeJSTypedArray();
return;
- } else if (object_->IsJSArrayBuffer(cage_base)) {
+ } else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
SerializeJSArrayBuffer();
return;
- }
-
- // We don't expect fillers.
- DCHECK(!object_->IsFreeSpaceOrFiller(cage_base));
-
- if (object_->IsScript(cage_base)) {
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
// Clear cached line ends.
Oddball undefined = ReadOnlyRoots(isolate()).undefined_value();
Handle<Script>::cast(object_)->set_line_ends(undefined);
}
+ // We don't expect fillers.
+ DCHECK(!object_->IsFreeSpaceOrFiller(cage_base));
+
SerializeObject();
}
namespace {
-SnapshotSpace GetSnapshotSpace(Handle<HeapObject> object) {
+SnapshotSpace GetSnapshotSpace(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- if (object->IsCode()) {
+ if (object.IsCode()) {
return SnapshotSpace::kCode;
- } else if (ReadOnlyHeap::Contains(*object)) {
+ } else if (ReadOnlyHeap::Contains(object)) {
return SnapshotSpace::kReadOnlyHeap;
- } else if (object->IsMap()) {
+ } else if (object.IsMap()) {
return SnapshotSpace::kMap;
} else {
return SnapshotSpace::kOld;
}
- } else if (ReadOnlyHeap::Contains(*object)) {
+ } else if (ReadOnlyHeap::Contains(object)) {
return SnapshotSpace::kReadOnlyHeap;
} else {
AllocationSpace heap_space =
- MemoryChunk::FromHeapObject(*object)->owner_identity();
+ MemoryChunk::FromHeapObject(object)->owner_identity();
// Large code objects are not supported and cannot be expressed by
// SnapshotSpace.
DCHECK_NE(heap_space, CODE_LO_SPACE);
@@ -781,7 +819,7 @@ void Serializer::ObjectSerializer::SerializeObject() {
if (map == ReadOnlyRoots(isolate()).descriptor_array_map()) {
map = ReadOnlyRoots(isolate()).strong_descriptor_array_map();
}
- SnapshotSpace space = GetSnapshotSpace(object_);
+ SnapshotSpace space = GetSnapshotSpace(*object_);
SerializePrologue(space, size, map);
// Serialize the rest of the object.
@@ -811,15 +849,16 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
}
void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
- UnlinkWeakNextScope unlink_weak_next(isolate()->heap(), object_);
- if (object_->IsCode()) {
+ HeapObject raw = *object_;
+ UnlinkWeakNextScope unlink_weak_next(isolate()->heap(), raw);
+ if (raw.IsCode()) {
// For code objects, perform a custom serialization.
SerializeCode(map, size);
} else {
// For other objects, iterate references first.
- object_->IterateBody(map, size, this);
+ raw.IterateBody(map, size, this);
// Then output data payload, if any.
- OutputRawData(object_->address() + size);
+ OutputRawData(raw.address() + size);
}
}
@@ -862,7 +901,7 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
}
Handle<HeapObject> obj = handle(current_contents, isolate());
- if (serializer_->SerializePendingObject(obj)) {
+ if (serializer_->SerializePendingObject(*obj)) {
bytes_processed_so_far_ += kTaggedSize;
++current;
continue;
@@ -913,7 +952,7 @@ void Serializer::ObjectSerializer::VisitCodePointer(HeapObject host,
DCHECK(contents.IsCode());
Handle<HeapObject> obj = handle(HeapObject::cast(contents), isolate());
- if (!serializer_->SerializePendingObject(obj)) {
+ if (!serializer_->SerializePendingObject(*obj)) {
serializer_->SerializeObject(obj);
}
bytes_processed_so_far_ += kTaggedSize;
@@ -1150,10 +1189,12 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
PtrComprCageBase cage_base(isolate_);
if (object_->IsBytecodeArray(cage_base)) {
// The bytecode age field can be changed by GC concurrently.
- byte field_value = BytecodeArray::kNoAgeBytecodeAge;
+ static_assert(BytecodeArray::kBytecodeAgeSize == kUInt16Size);
+ uint16_t field_value = BytecodeArray::kNoAgeBytecodeAge;
OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
BytecodeArray::kBytecodeAgeOffset,
- sizeof(field_value), &field_value);
+ sizeof(field_value),
+ reinterpret_cast<byte*>(&field_value));
} else if (object_->IsDescriptorArray(cage_base)) {
// The number of marked descriptors field can be changed by GC
// concurrently.
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 8aab2028cc..fc300b21f0 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -233,8 +233,7 @@ class Serializer : public SerializerDeserializer {
void PutRoot(RootIndex root_index);
void PutSmiRoot(FullObjectSlot slot);
- void PutBackReference(Handle<HeapObject> object,
- SerializerReference reference);
+ void PutBackReference(HeapObject object, SerializerReference reference);
void PutAttachedReference(SerializerReference reference);
void PutNextChunk(SnapshotSpace space);
void PutRepeat(int repeat_count);
@@ -247,19 +246,19 @@ class Serializer : public SerializerDeserializer {
void ResolvePendingForwardReference(int obj);
// Returns true if the object was successfully serialized as a root.
- bool SerializeRoot(Handle<HeapObject> obj);
+ bool SerializeRoot(HeapObject obj);
// Returns true if the object was successfully serialized as hot object.
- bool SerializeHotObject(Handle<HeapObject> obj);
+ bool SerializeHotObject(HeapObject obj);
// Returns true if the object was successfully serialized as back reference.
- bool SerializeBackReference(Handle<HeapObject> obj);
+ bool SerializeBackReference(HeapObject obj);
// Returns true if the object was successfully serialized as pending object.
- bool SerializePendingObject(Handle<HeapObject> obj);
+ bool SerializePendingObject(HeapObject obj);
// Returns true if the given heap object is a bytecode handler code object.
- bool ObjectIsBytecodeHandler(Handle<HeapObject> obj) const;
+ bool ObjectIsBytecodeHandler(HeapObject obj) const;
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr);
@@ -278,18 +277,18 @@ class Serializer : public SerializerDeserializer {
Code CopyCode(Code code);
- void QueueDeferredObject(Handle<HeapObject> obj) {
+ void QueueDeferredObject(HeapObject obj) {
DCHECK_NULL(reference_map_.LookupReference(obj));
- deferred_objects_.Push(*obj);
+ deferred_objects_.Push(obj);
}
// Register that the the given object shouldn't be immediately serialized, but
// will be serialized later and any references to it should be pending forward
// references.
- void RegisterObjectIsPending(Handle<HeapObject> obj);
+ void RegisterObjectIsPending(HeapObject obj);
// Resolve the given pending object reference with the current object.
- void ResolvePendingObject(Handle<HeapObject> obj);
+ void ResolvePendingObject(HeapObject obj);
void OutputStatistics(const char* name);
@@ -470,7 +469,8 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
ExternalPointerTag tag);
void OutputRawData(Address up_to);
void SerializeCode(Map map, int size);
- uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
+ uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length,
+ Maybe<int32_t> max_byte_length);
void SerializeJSTypedArray();
void SerializeJSArrayBuffer();
void SerializeExternalString();
diff --git a/deps/v8/src/snapshot/shared-heap-serializer.cc b/deps/v8/src/snapshot/shared-heap-serializer.cc
index daacbc5e99..c29554787f 100644
--- a/deps/v8/src/snapshot/shared-heap-serializer.cc
+++ b/deps/v8/src/snapshot/shared-heap-serializer.cc
@@ -169,15 +169,22 @@ void SharedHeapSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// Objects in the shared heap cannot depend on per-Isolate roots but can
// depend on RO roots since sharing objects requires sharing the RO space.
DCHECK(CanBeInSharedOldSpace(*obj) || ReadOnlyHeap::Contains(*obj));
-
- if (SerializeHotObject(obj)) return;
- if (IsRootAndHasBeenSerialized(*obj) && SerializeRoot(obj)) return;
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject raw = *obj;
+ if (SerializeHotObject(raw)) return;
+ if (IsRootAndHasBeenSerialized(raw) && SerializeRoot(raw)) return;
+ }
if (SerializeUsingReadOnlyObjectCache(&sink_, obj)) return;
- if (SerializeBackReference(obj)) return;
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject raw = *obj;
+ if (SerializeBackReference(raw)) return;
+ CheckRehashability(raw);
- CheckRehashability(*obj);
+ DCHECK(!ReadOnlyHeap::Contains(raw));
+ }
- DCHECK(!ReadOnlyHeap::Contains(*obj));
ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index 4350d13777..a8df512d4c 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -313,10 +313,6 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
// Test serialization.
{
GlobalSafepointScope global_safepoint(isolate);
- base::Optional<SafepointScope> shared_isolate_safepoint_scope;
- if (Isolate* shared_isolate = isolate->shared_isolate()) {
- shared_isolate_safepoint_scope.emplace(shared_isolate->heap());
- }
DisallowGarbageCollection no_gc;
Snapshot::SerializerFlags flags(
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index ad80d45fe3..199ac81818 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -144,13 +144,17 @@ void StartupSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
"the isolate snapshot");
}
#endif // DEBUG
- DCHECK(!IsUnexpectedCodeObject(isolate(), *obj));
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject raw = *obj;
+ DCHECK(!IsUnexpectedCodeObject(isolate(), raw));
+ if (SerializeHotObject(raw)) return;
+ if (IsRootAndHasBeenSerialized(raw) && SerializeRoot(raw)) return;
+ }
- if (SerializeHotObject(obj)) return;
- if (IsRootAndHasBeenSerialized(*obj) && SerializeRoot(obj)) return;
if (SerializeUsingReadOnlyObjectCache(&sink_, obj)) return;
if (SerializeUsingSharedHeapObjectCache(&sink_, obj)) return;
- if (SerializeBackReference(obj)) return;
+ if (SerializeBackReference(*obj)) return;
bool use_simulator = false;
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 325c6dea8f..e3ba0f55c0 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -111,6 +111,8 @@ static const char* const ANNOTATION_EXPORT = "@export";
static const char* const ANNOTATION_DO_NOT_GENERATE_CAST = "@doNotGenerateCast";
static const char* const ANNOTATION_USE_PARENT_TYPE_CHECKER =
"@useParentTypeChecker";
+static const char* const ANNOTATION_CPP_OBJECT_DEFINITION =
+ "@cppObjectDefinition";
// Generate C++ accessors with relaxed store semantics.
// Weak<T> and MaybeObject fields always use relaxed store.
static const char* const ANNOTATION_CPP_RELAXED_STORE = "@cppRelaxedStore";
@@ -162,6 +164,7 @@ enum class ClassFlag {
kDoNotGenerateCast = 1 << 11,
kGenerateUniqueMap = 1 << 12,
kGenerateFactoryFunction = 1 << 13,
+ kCppObjectDefinition = 1 << 14,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index e882dd0f2c..14b12ec587 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -3995,6 +3995,7 @@ class CppClassGenerator {
}
void GenerateClass();
+ void GenerateCppObjectDefinitionAsserts();
private:
SourcePosition Position();
@@ -4124,8 +4125,6 @@ void CppClassGenerator::GenerateClass() {
<< "::cast(*this), "
"isolate);\n";
impl_ << "}\n\n";
- }
- if (type_->ShouldGenerateVerify()) {
impl_ << "\n";
}
@@ -4241,6 +4240,36 @@ void CppClassGenerator::GenerateClass() {
}
}
+void CppClassGenerator::GenerateCppObjectDefinitionAsserts() {
+ hdr_ << "// Definition " << Position() << "\n"
+ << template_decl() << "\n"
+ << "class " << gen_name_ << "Asserts {\n";
+
+ ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
+ type_->GetSuperClass());
+ for (auto f : type_->fields()) {
+ CurrentSourcePosition::Scope scope(f.pos);
+ g.RecordOffsetFor(f);
+ }
+ g.Finish();
+ hdr_ << "\n";
+
+ for (auto f : type_->fields()) {
+ std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
+ std::string type = f.name_and_type.type->SimpleName();
+ hdr_ << " static_assert(" << field << " == D::" << field << ",\n"
+ << " \"Values of " << name_ << "::" << field
+ << " defined in Torque and C++ do not match\");\n"
+ << " static_assert(StaticStringsEqual(\"" << type << "\", D::k"
+ << CamelifyString(f.name_and_type.name) << "TqFieldType),\n"
+ << " \"Types of " << name_ << "::" << field
+ << " specified in Torque and C++ do not match\");\n";
+ }
+ hdr_ << " static_assert(kSize == D::kSize);\n";
+
+ hdr_ << "};\n\n";
+}
+
void CppClassGenerator::GenerateClassCasts() {
cpp::Class owner({cpp::TemplateParameter("D"), cpp::TemplateParameter("P")},
gen_name_);
@@ -4704,7 +4733,9 @@ void ImplementationVisitor::GenerateClassDefinitions(
std::string name = type->ShouldGenerateCppClassDefinitions()
? type->name()
: type->GetGeneratedTNodeTypeName();
- header << "class " << name << ";\n";
+ if (type->ShouldGenerateCppClassDefinitions()) {
+ header << "class " << name << ";\n";
+ }
forward_declarations << "class " << name << ";\n";
}
@@ -4718,6 +4749,9 @@ void ImplementationVisitor::GenerateClassDefinitions(
if (type->ShouldGenerateCppClassDefinitions()) {
CppClassGenerator g(type, header, inline_header, implementation);
g.GenerateClass();
+ } else if (type->ShouldGenerateCppObjectDefinitionAsserts()) {
+ CppClassGenerator g(type, header, inline_header, implementation);
+ g.GenerateCppObjectDefinitionAsserts();
}
for (const Field& f : type->fields()) {
const Type* field_type = f.name_and_type.type;
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 5cbe07309b..5670619327 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -975,7 +975,8 @@ base::Optional<ParseResult> MakeClassDeclaration(
ANNOTATION_EXPORT, ANNOTATION_DO_NOT_GENERATE_CAST,
ANNOTATION_GENERATE_UNIQUE_MAP, ANNOTATION_GENERATE_FACTORY_FUNCTION,
ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
- ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT},
+ ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT,
+ ANNOTATION_CPP_OBJECT_DEFINITION},
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
ANNOTATION_INSTANCE_TYPE_VALUE});
ClassFlags flags = ClassFlag::kNone;
@@ -1020,6 +1021,9 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (annotations.Contains(ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT)) {
flags |= ClassFlag::kLowestInstanceTypeWithinParent;
}
+ if (annotations.Contains(ANNOTATION_CPP_OBJECT_DEFINITION)) {
+ flags |= ClassFlag::kCppObjectDefinition;
+ }
auto is_extern = child_results->NextAs<bool>();
if (is_extern) flags |= ClassFlag::kExtern;
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index c49753be21..a2fff537b6 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -668,16 +668,21 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
bool ShouldGeneratePrint() const {
- return !IsExtern() || (ShouldGenerateCppClassDefinitions() &&
- !IsAbstract() && !HasUndefinedLayout());
+ if (flags_ & ClassFlag::kCppObjectDefinition) return false;
+ if (!IsExtern()) return true;
+ if (!ShouldGenerateCppClassDefinitions()) return false;
+ return !IsAbstract() && !HasUndefinedLayout();
}
bool ShouldGenerateVerify() const {
- return !IsExtern() || (ShouldGenerateCppClassDefinitions() &&
- !HasUndefinedLayout() && !IsShape());
+ if (flags_ & ClassFlag::kCppObjectDefinition) return false;
+ if (!IsExtern()) return true;
+ if (!ShouldGenerateCppClassDefinitions()) return false;
+ return !HasUndefinedLayout() && !IsShape();
}
bool ShouldGenerateBodyDescriptor() const {
- return flags_ & ClassFlag::kGenerateBodyDescriptor ||
- (!IsAbstract() && !IsExtern());
+ if (flags_ & ClassFlag::kCppObjectDefinition) return false;
+ if (flags_ & ClassFlag::kGenerateBodyDescriptor) return true;
+ return !IsAbstract() && !IsExtern();
}
bool DoNotGenerateCast() const {
return flags_ & ClassFlag::kDoNotGenerateCast;
@@ -688,8 +693,12 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kHasSameInstanceTypeAsParent;
}
bool ShouldGenerateCppClassDefinitions() const {
+ if (flags_ & ClassFlag::kCppObjectDefinition) return false;
return (flags_ & ClassFlag::kGenerateCppClassDefinitions) || !IsExtern();
}
+ bool ShouldGenerateCppObjectDefinitionAsserts() const {
+ return flags_ & ClassFlag::kCppObjectDefinition;
+ }
bool ShouldGenerateFullClassDefinition() const { return !IsExtern(); }
bool ShouldGenerateUniqueMap() const {
return (flags_ & ClassFlag::kGenerateUniqueMap) ||
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 128fd3bd5c..aa5a20d8a7 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -89,6 +89,7 @@ void ValidateCodeObjects() {
}
// Check the validity of the free list.
+#ifdef DEBUG
size_t free_count = 0;
for (size_t i = gNextCodeObject; i != gNumCodeObjects;
i = gCodeObjects[i].next_free) {
@@ -106,6 +107,7 @@ void ValidateCodeObjects() {
}
}
TH_DCHECK(free_count == free_count2);
+#endif
}
} // namespace
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 1aa7a305c7..84ffdbd056 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -27,7 +27,7 @@ namespace trap_handler {
#define V8_TRAP_HANDLER_SUPPORTED true
// Arm64 simulator on x64 on Linux, Mac, or Windows.
#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && \
- (V8_OS_LINUX || V8_OS_DARWIN)
+ (V8_OS_LINUX || V8_OS_DARWIN || V8_OS_WIN)
#define V8_TRAP_HANDLER_VIA_SIMULATOR
#define V8_TRAP_HANDLER_SUPPORTED true
// Everything else is unsupported.
diff --git a/deps/v8/src/utils/memcopy.cc b/deps/v8/src/utils/memcopy.cc
index 7e0fdb9a00..6bf529e7f9 100644
--- a/deps/v8/src/utils/memcopy.cc
+++ b/deps/v8/src/utils/memcopy.cc
@@ -4,7 +4,7 @@
#include "src/utils/memcopy.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index 005e1c4ad4..6eafcf5466 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -490,10 +490,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, FeedbackSlot);
class BytecodeOffset {
public:
- explicit BytecodeOffset(int id) : id_(id) {}
- int ToInt() const { return id_; }
+ explicit constexpr BytecodeOffset(int id) : id_(id) {}
+ constexpr int ToInt() const { return id_; }
- static BytecodeOffset None() { return BytecodeOffset(kNoneId); }
+ static constexpr BytecodeOffset None() { return BytecodeOffset(kNoneId); }
// Special bailout id support for deopting into the {JSConstructStub} stub.
// The following hard-coded deoptimization points are supported by the stub:
@@ -506,7 +506,7 @@ class BytecodeOffset {
id_ == ConstructStubInvoke().ToInt();
}
- bool IsNone() const { return id_ == kNoneId; }
+ constexpr bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BytecodeOffset& other) const {
return id_ == other.id_;
}
@@ -659,6 +659,16 @@ V8_INLINE void ZapCode(Address addr, size_t size_in_bytes) {
std::memset(reinterpret_cast<void*>(addr), kZapByte, size_in_bytes);
}
+inline bool RoundUpToPageSize(size_t byte_length, size_t page_size,
+ size_t max_allowed_byte_length, size_t* pages) {
+ size_t bytes_wanted = RoundUp(byte_length, page_size);
+ if (bytes_wanted > max_allowed_byte_length) {
+ return false;
+ }
+ *pages = bytes_wanted / page_size;
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index fd67a671ec..bd28a6f7b6 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -155,9 +155,9 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister rhs) {
Register dst_low = dst.low_gp();
if (dst_low == lhs.high_gp() || dst_low == rhs.high_gp()) {
- dst_low = assm->GetUnusedRegister(
- kGpReg, LiftoffRegList::ForRegs(lhs, rhs, dst.high_gp()))
- .gp();
+ dst_low =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs, dst.high_gp()})
+ .gp();
}
(assm->*op)(dst_low, lhs.low_gp(), rhs.low_gp(), SetCC, al);
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(rhs.high_gp()),
@@ -193,7 +193,7 @@ inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
// Left shift writes {dst_high} then {dst_low}, right shifts write {dst_low}
// then {dst_high}.
Register clobbered_dst_reg = is_left_shift ? dst_high : dst_low;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(clobbered_dst_reg, src);
+ LiftoffRegList pinned = {clobbered_dst_reg, src};
Register amount_capped =
pinned.set(assm->GetUnusedRegister(kGpReg, pinned)).gp();
assm->and_(amount_capped, amount, Operand(0x3F));
@@ -565,7 +565,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -950,8 +950,7 @@ inline void AtomicBinop32(LiftoffAssembler* lasm, Register dst_addr,
StoreType type,
void (*op)(LiftoffAssembler*, Register, Register,
Register)) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
switch (type.value()) {
case StoreType::kI64Store8:
__ LoadConstant(result.high(), WasmValue(0));
@@ -1002,16 +1001,14 @@ inline void AtomicOp64(LiftoffAssembler* lasm, Register dst_addr,
// Make sure {dst_low} and {dst_high} are not occupied by any other value.
Register value_low = value.low_gp();
Register value_high = value.high_gp();
- LiftoffRegList pinned = LiftoffRegList::ForRegs(
- dst_addr, offset_reg, value_low, value_high, dst_low, dst_high);
+ LiftoffRegList pinned = {dst_addr, offset_reg, value_low,
+ value_high, dst_low, dst_high};
__ ClearRegister(dst_low, {&dst_addr, &offset_reg, &value_low, &value_high},
pinned);
- pinned = pinned |
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value_low, value_high);
+ pinned = pinned | LiftoffRegList{dst_addr, offset_reg, value_low, value_high};
__ ClearRegister(dst_high, {&dst_addr, &offset_reg, &value_low, &value_high},
pinned);
- pinned = pinned |
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value_low, value_high);
+ pinned = pinned | LiftoffRegList{dst_addr, offset_reg, value_low, value_high};
// Make sure that {result}, if it exists, also does not overlap with
// {dst_low} and {dst_high}. We don't have to transfer the value stored in
@@ -1275,7 +1272,7 @@ void LiftoffAssembler::AtomicCompareExchange(
void (Assembler::*load)(Register, Register, Condition) = nullptr;
void (Assembler::*store)(Register, Register, Register, Condition) = nullptr;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg);
+ LiftoffRegList pinned = {dst_addr, offset_reg};
// We need to remember the high word of {result}, so we can set it to zero in
// the end if necessary.
Register result_high = no_reg;
@@ -1614,7 +1611,7 @@ inline void GeneratePopCnt(Assembler* assm, Register dst, Register src,
} // namespace liftoff
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegList pinned = {dst};
Register scratch1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
liftoff::GeneratePopCnt(this, dst, src, scratch1, scratch2);
@@ -1855,7 +1852,7 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
// overwrite the second src register before using it.
Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp();
Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp();
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src2);
+ LiftoffRegList pinned = {dst, src2};
Register scratch1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
liftoff::GeneratePopCnt(this, dst.low_gp(), src1, scratch1, scratch2);
@@ -1866,6 +1863,14 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ldr(scratch, MemOperand(dst.gp(), offset));
+ add(scratch, scratch, Operand(Smi::FromInt(1)));
+ str(scratch, MemOperand(dst.gp(), offset));
+}
+
bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(this, ARMv8);
@@ -2924,12 +2929,12 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
QwNeonRegister tmp2 = right;
LiftoffRegList used_plus_dst =
- cache_state()->used_registers | LiftoffRegList::ForRegs(dst);
+ cache_state()->used_registers | LiftoffRegList{dst};
if (used_plus_dst.has(lhs) && used_plus_dst.has(rhs)) {
tmp1 = temps.AcquireQ();
// We only have 1 scratch Q register, so acquire another ourselves.
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegList pinned = {dst};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
tmp2 = liftoff::GetSimd128Register(unused_pair);
} else if (used_plus_dst.has(lhs)) {
@@ -3055,7 +3060,7 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
if (cache_state()->is_used(src)) {
// We only have 1 scratch Q register, so try and reuse src.
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegList pinned = {src};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
mask = liftoff::GetSimd128Register(unused_pair);
}
@@ -3240,7 +3245,7 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
if (cache_state()->is_used(src)) {
// We only have 1 scratch Q register, so try and reuse src.
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegList pinned = {src};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
mask = liftoff::GetSimd128Register(unused_pair);
}
@@ -3555,7 +3560,7 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
if (cache_state()->is_used(src)) {
// We only have 1 scratch Q register, so try and reuse src.
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegList pinned = {src};
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
mask = liftoff::GetSimd128Register(unused_pair);
}
@@ -4172,7 +4177,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
UNREACHABLE();
}
args++;
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 70ed5de8f6..d883cc345e 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -395,7 +395,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
// some performance testing to see how big an effect it will take.
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -636,8 +636,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
Register offset_reg, uintptr_t offset_imm,
LiftoffRegister value, LiftoffRegister result,
StoreType type, Binop op) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
// {LiftoffCompiler::AtomicBinop} ensures that {result} is unique.
@@ -823,8 +822,7 @@ void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, new_value);
+ LiftoffRegList pinned = {dst_addr, offset_reg, expected, new_value};
Register result_reg = result.gp();
if (pinned.has(result)) {
@@ -1191,6 +1189,23 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(SmiValuesAre31Bits());
+ Register scratch = temps.AcquireW();
+ Ldr(scratch, MemOperand(dst.gp(), offset));
+ Add(scratch, scratch, Operand(Smi::FromInt(1)));
+ Str(scratch, MemOperand(dst.gp(), offset));
+ } else {
+ Register scratch = temps.AcquireX();
+ SmiUntag(scratch, MemOperand(dst.gp(), offset));
+ Add(scratch, scratch, Operand(1));
+ SmiTag(scratch);
+ Str(scratch, MemOperand(dst.gp(), offset));
+ }
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
@@ -2511,7 +2526,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
VRegister temp = dst.fp();
if (dst == lhs || dst == rhs) {
// dst overlaps with lhs or rhs, so we need a temporary.
- temp = GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp();
+ temp = GetUnusedRegister(kFpReg, LiftoffRegList{lhs, rhs}).fp();
}
UseScratchRegisterScope scope(this);
@@ -3171,7 +3186,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
int arg_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
Poke(liftoff::GetRegFromType(*args++, param_kind), arg_bytes);
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 4ff56c5ec5..25666642b2 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -318,7 +318,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
- return is_reference(kind) ? kSystemPointerSize : element_size_bytes(kind);
+ return value_kind_full_size(kind);
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
@@ -534,7 +534,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
// We know that {src} is not a byte register, so the only pinned byte
// registers (beside the outer {pinned}) are {dst_addr} and potentially
// {offset_reg}.
- LiftoffRegList pinned_byte = pinned | LiftoffRegList::ForRegs(dst_addr);
+ LiftoffRegList pinned_byte = pinned | LiftoffRegList{dst_addr};
if (offset_reg != no_reg) pinned_byte.set(offset_reg);
Register byte_src =
GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned_byte)).gp();
@@ -624,7 +624,7 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
bool is_byte_store = type.size() == 1;
LiftoffRegList src_candidates =
is_byte_store ? liftoff::kByteRegs : kGpCacheRegList;
- pinned = pinned | LiftoffRegList::ForRegs(dst_addr, src, offset_reg);
+ pinned = pinned | LiftoffRegList{dst_addr, src, offset_reg};
// Ensure that {src} is a valid and otherwise unused register.
if (!src_candidates.has(src) || cache_state_.is_used(src)) {
@@ -678,8 +678,7 @@ inline void AtomicAddOrSubOrExchange32(LiftoffAssembler* lasm, Binop binop,
Register result_reg = is_64_bit_op ? result.low_gp() : result.gp();
bool is_byte_store = type.size() == 1;
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, value_reg, offset_reg);
+ LiftoffRegList pinned = {dst_addr, value_reg, offset_reg};
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
@@ -749,7 +748,7 @@ inline void AtomicBinop32(LiftoffAssembler* lasm, Binop op, Register dst_addr,
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
__ ClearRegister(eax, {&dst_addr, &offset_reg, &value_reg},
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg));
+ LiftoffRegList{dst_addr, offset_reg, value_reg});
bool is_byte_store = type.size() == 1;
Register scratch = no_reg;
@@ -763,8 +762,7 @@ inline void AtomicBinop32(LiftoffAssembler* lasm, Binop op, Register dst_addr,
scratch = kRootRegister;
} else {
scratch = __ GetUnusedRegister(
- kGpReg,
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg, eax))
+ kGpReg, LiftoffRegList{dst_addr, offset_reg, value_reg, eax})
.gp();
}
@@ -1038,15 +1036,14 @@ void LiftoffAssembler::AtomicCompareExchange(
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
ClearRegister(eax, {&dst_addr, &value_reg},
- LiftoffRegList::ForRegs(dst_addr, value_reg, expected_reg));
+ LiftoffRegList{dst_addr, value_reg, expected_reg});
if (expected_reg != eax) {
mov(eax, expected_reg);
expected_reg = eax;
}
bool is_byte_store = type.size() == 1;
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, value_reg, expected_reg);
+ LiftoffRegList pinned = {dst_addr, value_reg, expected_reg};
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
@@ -1161,15 +1158,25 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueKind kind) {
- DCHECK_EQ(0, element_size_bytes(kind) % kSystemPointerSize);
- int words = element_size_bytes(kind) / kSystemPointerSize;
+ DCHECK_EQ(0, SlotSizeForType(kind) % kSystemPointerSize);
+ int words = SlotSizeForType(kind) / kSystemPointerSize;
DCHECK_LE(1, words);
- do {
- liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset),
- liftoff::GetStackSlot(dst_offset));
- dst_offset -= kSystemPointerSize;
- src_offset -= kSystemPointerSize;
- } while (--words);
+ // Make sure we move the words in the correct order in case there is an
+ // overlap between src and dst.
+ if (src_offset < dst_offset) {
+ do {
+ liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset),
+ liftoff::GetStackSlot(dst_offset));
+ dst_offset -= kSystemPointerSize;
+ src_offset -= kSystemPointerSize;
+ } while (--words);
+ } else {
+ while (words--) {
+ liftoff::MoveStackValue(
+ this, liftoff::GetStackSlot(src_offset - words * kSystemPointerSize),
+ liftoff::GetStackSlot(dst_offset - words * kSystemPointerSize));
+ }
+ }
}
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
@@ -1361,7 +1368,7 @@ void EmitInt32DivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
// unconditionally, as the cache state will also be modified unconditionally.
assm->SpillRegisters(eax, edx);
if (rhs == eax || rhs == edx) {
- LiftoffRegList unavailable = LiftoffRegList::ForRegs(eax, edx, lhs);
+ LiftoffRegList unavailable{eax, edx, lhs};
Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
assm->mov(tmp, rhs);
rhs = tmp;
@@ -1462,7 +1469,7 @@ namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
void (Assembler::*emit_shift)(Register)) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src, amount);
+ LiftoffRegList pinned = {dst, src, amount};
// If dst is ecx, compute into a tmp register first, then move to ecx.
if (dst == ecx) {
Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
@@ -1551,7 +1558,7 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
// First, compute the low half of the result, potentially into a temporary dst
// register if {dst.low_gp()} equals {rhs.low_gp()} or any register we need to
// keep alive for computing the upper half.
- LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp(), rhs);
+ LiftoffRegList keep_alive{lhs.high_gp(), rhs};
Register dst_low = keep_alive.has(dst.low_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.low_gp();
@@ -1560,7 +1567,7 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
(assm->*op)(dst_low, rhs.low_gp());
// Now compute the upper half, while keeping alive the previous result.
- keep_alive = LiftoffRegList::ForRegs(dst_low, rhs.high_gp());
+ keep_alive = LiftoffRegList{dst_low, rhs.high_gp()};
Register dst_high = keep_alive.has(dst.high_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.high_gp();
@@ -1693,7 +1700,7 @@ inline void Emit64BitShiftOperation(
LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
Register amount, void (TurboAssembler::*emit_shift)(Register, Register)) {
// Temporary registers cannot overlap with {dst}.
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegList pinned = {dst};
constexpr size_t kMaxRegMoves = 3;
base::SmallVector<LiftoffAssembler::ParallelRegisterMoveTuple, kMaxRegMoves>
@@ -1862,6 +1869,10 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ add(Operand(dst.gp(), offset), Immediate(Smi::FromInt(1)));
+}
+
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2232,7 +2243,7 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
}
CpuFeatureScope feature(assm, SSE4_1);
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
+ LiftoffRegList pinned = {src, dst};
DoubleRegister rounded =
pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
DoubleRegister converted_back =
@@ -2271,7 +2282,7 @@ inline bool EmitSatTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
Label not_nan;
Label src_positive;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
+ LiftoffRegList pinned = {src, dst};
DoubleRegister rounded =
pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
DoubleRegister converted_back =
@@ -2379,7 +2390,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
cvtsi2ss(dst.fp(), src.gp());
return true;
case kExprF32UConvertI32: {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src);
+ LiftoffRegList pinned = {dst, src};
Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
Cvtui2ss(dst.fp(), src.gp(), scratch);
return true;
@@ -2394,7 +2405,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
Cvtsi2sd(dst.fp(), src.gp());
return true;
case kExprF64UConvertI32: {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src);
+ LiftoffRegList pinned = {dst, src};
Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
Cvtui2sd(dst.fp(), src.gp(), scratch);
return true;
@@ -2689,8 +2700,7 @@ template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
void EmitSimdShiftOp(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister operand, LiftoffRegister count) {
static constexpr RegClass tmp_rc = reg_class_for(kI32);
- LiftoffRegister tmp =
- assm->GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(count));
+ LiftoffRegister tmp = assm->GetUnusedRegister(tmp_rc, LiftoffRegList{count});
constexpr int mask = (1 << width) - 1;
assm->mov(tmp.gp(), count.gp());
@@ -2722,8 +2732,7 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
+ Register tmp = assm->GetUnusedRegister(kGpReg, LiftoffRegList{dst}).gp();
assm->xor_(tmp, tmp);
assm->mov(dst.gp(), Immediate(1));
assm->Ptest(src.fp(), src.fp());
@@ -2737,8 +2746,7 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
base::Optional<CpuFeatureScope> sse_scope;
if (feature.has_value()) sse_scope.emplace(assm, *feature);
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
+ Register tmp = assm->GetUnusedRegister(kGpReg, LiftoffRegList{dst}).gp();
XMMRegister tmp_simd = liftoff::kScratchDoubleReg;
assm->mov(tmp, Immediate(1));
assm->xor_(dst.gp(), dst.gp());
@@ -2903,8 +2911,7 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
Register scratch = GetUnusedRegister(RegClass::kGpReg, {}).gp();
XMMRegister tmp =
- GetUnusedRegister(RegClass::kFpReg, LiftoffRegList::ForRegs(dst, src))
- .fp();
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList{dst, src}).fp();
I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp, scratch);
}
@@ -3141,8 +3148,8 @@ void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
} else {
// 2. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
- LiftoffRegister tmp = GetUnusedRegister(
- RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
+ LiftoffRegister tmp =
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList{lhs, rhs});
I64x2GtS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
movaps(dst.fp(), tmp.fp());
} else {
@@ -3160,8 +3167,8 @@ void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
} else if (CpuFeatures::IsSupported(SSE4_2)) {
// 2. SSE4_2, dst != lhs.
if (dst == lhs) {
- LiftoffRegister tmp = GetUnusedRegister(RegClass::kFpReg, {rhs},
- LiftoffRegList::ForRegs(lhs));
+ LiftoffRegister tmp =
+ GetUnusedRegister(RegClass::kFpReg, {rhs}, LiftoffRegList{lhs});
// macro-assembler uses kScratchDoubleReg, so don't use it.
I64x2GeS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
movaps(dst.fp(), tmp.fp());
@@ -3171,8 +3178,8 @@ void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
} else {
// 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
- LiftoffRegister tmp = GetUnusedRegister(
- RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
+ LiftoffRegister tmp =
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList{lhs, rhs});
I64x2GeS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
movaps(dst.fp(), tmp.fp());
} else {
@@ -3315,9 +3322,9 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, LiftoffRegList{rhs});
LiftoffRegister tmp_simd =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ GetUnusedRegister(kFpReg, LiftoffRegList{dst, lhs});
I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), tmp.gp(), liftoff::kScratchDoubleReg,
tmp_simd.fp());
}
@@ -3331,9 +3338,9 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList{rhs}).gp();
XMMRegister tmp_simd =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ GetUnusedRegister(kFpReg, LiftoffRegList{dst, lhs}).fp();
I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
tmp_simd);
}
@@ -3346,9 +3353,9 @@ void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList{rhs}).gp();
XMMRegister tmp_simd =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ GetUnusedRegister(kFpReg, LiftoffRegList{dst, lhs}).fp();
I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
tmp_simd);
}
@@ -3813,10 +3820,9 @@ void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
XMMRegister tmp =
- GetUnusedRegister(RegClass::kFpReg, LiftoffRegList::ForRegs(dst, lhs))
- .fp();
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList{dst, lhs}).fp();
Register scratch =
- GetUnusedRegister(RegClass::kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ GetUnusedRegister(RegClass::kGpReg, LiftoffRegList{rhs}).gp();
I64x2ShrS(dst.fp(), lhs.fp(), rhs.gp(), liftoff::kScratchDoubleReg, tmp,
scratch);
@@ -3856,9 +3862,9 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp1 =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs});
LiftoffRegister tmp2 =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs, tmp1});
I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
@@ -4126,8 +4132,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
static constexpr RegClass tmp_rc = reg_class_for(kS128);
- DoubleRegister tmp =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, src)).fp();
+ DoubleRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList{dst, src}).fp();
// NAN->0, negative->0.
Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
if (CpuFeatures::IsSupported(AVX)) {
@@ -4537,7 +4542,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
int arg_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, esp, arg_bytes, *args++, param_kind);
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 6691c7dd32..191fb71b2f 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -391,6 +391,10 @@ enum MergeAllowConstants : bool {
kConstantsAllowed = true,
kConstantsNotAllowed = false
};
+enum MergeAllowRegisters : bool {
+ kRegistersAllowed = true,
+ kRegistersNotAllowed = false
+};
enum ReuseRegisters : bool {
kReuseRegisters = true,
kNoReuseRegisters = false
@@ -399,6 +403,7 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
const VarState* source, VarState* target, uint32_t count,
MergeKeepStackSlots keep_stack_slots,
MergeAllowConstants allow_constants,
+ MergeAllowRegisters allow_registers,
ReuseRegisters reuse_registers, LiftoffRegList used_regs) {
RegisterReuseMap register_reuse_map;
for (const VarState* source_end = source + count; source < source_end;
@@ -409,18 +414,21 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
continue;
}
base::Optional<LiftoffRegister> reg;
- // First try: Keep the same register, if it's free.
- if (source->is_reg() && state->is_free(source->reg())) {
- reg = source->reg();
- }
- // Second try: Use the same register we used before (if we reuse registers).
- if (!reg && reuse_registers) {
- reg = register_reuse_map.Lookup(source->reg());
- }
- // Third try: Use any free register.
- RegClass rc = reg_class_for(source->kind());
- if (!reg && state->has_unused_register(rc, used_regs)) {
- reg = state->unused_register(rc, used_regs);
+ if (allow_registers) {
+ // First try: Keep the same register, if it's free.
+ if (source->is_reg() && state->is_free(source->reg())) {
+ reg = source->reg();
+ }
+ // Second try: Use the same register we used before (if we reuse
+ // registers).
+ if (!reg && reuse_registers) {
+ reg = register_reuse_map.Lookup(source->reg());
+ }
+ // Third try: Use any free register.
+ RegClass rc = reg_class_for(source->kind());
+ if (!reg && state->has_unused_register(rc, used_regs)) {
+ reg = state->unused_register(rc, used_regs);
+ }
}
if (!reg) {
// No free register; make this a stack slot.
@@ -469,9 +477,17 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
for (auto& src : base::VectorOf(source_begin, num_locals)) {
if (src.is_reg()) used_regs.set(src.reg());
}
- for (auto& src :
- base::VectorOf(source_begin + stack_base + discarded, arity)) {
- if (src.is_reg()) used_regs.set(src.reg());
+ // If there is more than one operand in the merge region, a stack-to-stack
+ // move can interfere with a register reload, which would not be handled
+ // correctly by the StackTransferRecipe. To avoid this, spill all registers in
+ // this region.
+ MergeAllowRegisters allow_registers =
+ arity <= 1 ? kRegistersAllowed : kRegistersNotAllowed;
+ if (allow_registers) {
+ for (auto& src :
+ base::VectorOf(source_begin + stack_base + discarded, arity)) {
+ if (src.is_reg()) used_regs.set(src.reg());
+ }
}
// Initialize the merge region. If this region moves, try to turn stack slots
@@ -480,7 +496,8 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
discarded == 0 ? kKeepStackSlots : kTurnStackSlotsIntoRegisters;
InitMergeRegion(this, source_begin + stack_base + discarded,
target_begin + stack_base, arity, keep_merge_stack_slots,
- kConstantsNotAllowed, kNoReuseRegisters, used_regs);
+ kConstantsNotAllowed, allow_registers, kNoReuseRegisters,
+ used_regs);
// Shift spill offsets down to keep slots contiguous.
int offset = stack_base == 0 ? StaticStackFrameSize()
: source.stack_state[stack_base - 1].offset();
@@ -493,7 +510,8 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// Initialize the locals region. Here, stack slots stay stack slots (because
// they do not move). Try to keep register in registers, but avoid duplicates.
InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots,
- kConstantsNotAllowed, kNoReuseRegisters, used_regs);
+ kConstantsNotAllowed, kRegistersAllowed, kNoReuseRegisters,
+ used_regs);
// Consistency check: All the {used_regs} are really in use now.
DCHECK_EQ(used_regs, used_registers & used_regs);
@@ -503,7 +521,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// source region, ensure to use the same register twice in the target region.
InitMergeRegion(this, source_begin + num_locals, target_begin + num_locals,
stack_depth, kKeepStackSlots, kConstantsAllowed,
- kReuseRegisters, used_regs);
+ kRegistersAllowed, kReuseRegisters, used_regs);
}
void LiftoffAssembler::CacheState::Steal(const CacheState& source) {
@@ -721,8 +739,8 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
namespace {
bool SlotInterference(const VarState& a, const VarState& b) {
return a.is_stack() && b.is_stack() &&
- b.offset() > a.offset() - element_size_bytes(a.kind()) &&
- b.offset() - element_size_bytes(b.kind()) < a.offset();
+ b.offset() > a.offset() - value_kind_size(a.kind()) &&
+ b.offset() - value_kind_size(b.kind()) < a.offset();
}
bool SlotInterference(const VarState& a, base::Vector<const VarState> v) {
@@ -785,7 +803,7 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
transfers.TransferStackSlot(target.stack_state[target_stack_base + i],
cache_state_.stack_state[stack_base + i]);
DCHECK(!SlotInterference(
- target.stack_state[i],
+ target.stack_state[target_stack_base + i],
base::VectorOf(cache_state_.stack_state.data() + stack_base + i + 1,
arity - i - 1)));
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index b3cb11a61e..22c7f73acc 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -738,6 +738,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_sari(dst.gp(), dst.gp(), kSmiTagSize);
}
}
+ inline void IncrementSmi(LiftoffRegister dst, int offset);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
@@ -1591,8 +1592,7 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
return;
}
// Otherwise, we need a temporary register.
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ Register tmp = assm->GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
(assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
assm->Move(dst.low_gp(), tmp, kI32);
@@ -1619,8 +1619,7 @@ void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
return;
}
// Otherwise, we need a temporary register.
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
+ Register tmp = assm->GetUnusedRegister(kGpReg, LiftoffRegList{lhs}).gp();
(assm->*op)(tmp, lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
assm->Move(dst.low_gp(), tmp, kI32);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 4c1931cb3f..34b51e37c6 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -293,16 +293,16 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
// Decode errors are ok.
if (reason == kDecodeError) return;
- // Missing CPU features are also generally OK for now.
- if (reason == kMissingCPUFeature) return;
-
// --liftoff-only ensures that tests actually exercise the Liftoff path
- // without bailing out. Bailing out due to (simulated) lack of CPU support
- // is okay though (see above).
+ // without bailing out. We also fail for missing CPU support, to avoid
+ // running any TurboFan code under --liftoff-only.
if (FLAG_liftoff_only) {
FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s", detail);
}
+ // Missing CPU features are generally OK, except with --liftoff-only.
+ if (reason == kMissingCPUFeature) return;
+
// If --enable-testing-opcode-in-wasm is set, we are expected to bailout with
// "testing opcode".
if (FLAG_enable_testing_opcode_in_wasm &&
@@ -546,7 +546,7 @@ class LiftoffCompiler {
int GetFeedbackVectorSlots() const {
// The number of instructions is capped by max function size.
STATIC_ASSERT(kV8MaxWasmFunctionSize < std::numeric_limits<int>::max());
- return static_cast<int>(num_call_ref_instructions_) * 2;
+ return static_cast<int>(num_call_instructions_) * 2;
}
void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
@@ -671,9 +671,8 @@ class LiftoffCompiler {
LiftoffRegister reg =
LoadToReg(descriptor_->GetInputLocation(input_idx), {});
if (needs_pair) {
- LiftoffRegister reg2 =
- LoadToReg(descriptor_->GetInputLocation(input_idx + 1),
- LiftoffRegList::ForRegs(reg));
+ LiftoffRegister reg2 = LoadToReg(
+ descriptor_->GetInputLocation(input_idx + 1), LiftoffRegList{reg});
reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp());
}
__ PushRegister(kind, reg);
@@ -728,6 +727,10 @@ class LiftoffCompiler {
void TierupCheck(FullDecoder* decoder, WasmCodePosition position,
int budget_used) {
+ // We should always decrement the budget, and we don't expect integer
+ // overflows in the budget calculation.
+ DCHECK_LE(1, budget_used);
+
if (for_debugging_ != kNoDebugging) return;
CODE_COMMENT("tierup check");
// We never want to blow the entire budget at once.
@@ -1366,7 +1369,7 @@ class LiftoffCompiler {
}
// Compare two arbitrary values.
- Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
+ Register lhs = __ PopToRegister(LiftoffRegList{rhs}).gp();
__ emit_cond_jump(cond, false_dst, kI32, lhs, rhs);
}
@@ -1490,10 +1493,10 @@ class LiftoffCompiler {
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
- param_bytes += element_size_bytes(param_kind);
+ param_bytes += value_kind_size(param_kind);
}
int out_arg_bytes =
- out_argument_kind == kVoid ? 0 : element_size_bytes(out_argument_kind);
+ out_argument_kind == kVoid ? 0 : value_kind_size(out_argument_kind);
int stack_bytes = std::max(param_bytes, out_arg_bytes);
__ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
ext_ref);
@@ -1556,7 +1559,7 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
if (V8_UNLIKELY(nondeterminism_)) {
- auto pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegList pinned = {dst};
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
@@ -1602,7 +1605,7 @@ class LiftoffCompiler {
// External references for potentially trapping conversions return int.
auto sig = MakeSig::Returns(kI32).Params(src_kind);
LiftoffRegister ret_reg =
- __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
+ __ GetUnusedRegister(kGpReg, LiftoffRegList{dst});
LiftoffRegister dst_regs[] = {ret_reg, dst};
GenerateCCall(dst_regs, &sig, dst_kind, &src, ext_ref);
__ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
@@ -1781,7 +1784,7 @@ class LiftoffCompiler {
LiftoffRegister lhs = __ PopToRegister();
// Either reuse {lhs} for {dst}, or choose a register (pair) which does
// not overlap, for easier code generation.
- LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs);
+ LiftoffRegList pinned = {lhs};
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, pinned)
: __ GetUnusedRegister(result_rc, pinned);
@@ -1803,7 +1806,7 @@ class LiftoffCompiler {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister rhs = __ PopToRegister();
- LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister lhs = __ PopToRegister(LiftoffRegList{rhs});
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
: __ GetUnusedRegister(result_rc, {});
@@ -1812,7 +1815,7 @@ class LiftoffCompiler {
CallEmitFn(fn, dst, lhs, rhs);
if (V8_UNLIKELY(nondeterminism_)) {
- auto pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegList pinned = {dst};
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
@@ -1828,10 +1831,9 @@ class LiftoffCompiler {
Label* trap_by_zero,
Label* trap_unrepresentable = nullptr) {
// Cannot emit native instructions, build C call.
- LiftoffRegister ret =
- __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister ret = __ GetUnusedRegister(kGpReg, LiftoffRegList{dst});
LiftoffRegister tmp =
- __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
+ __ GetUnusedRegister(kGpReg, LiftoffRegList{dst, ret});
LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst};
auto sig = MakeSig::Returns(kI32).Params(kI64, kI64);
@@ -2590,7 +2592,11 @@ class LiftoffCompiler {
if (target->is_loop()) {
DCHECK(target->label.get()->is_bound());
int jump_distance = __ pc_offset() - target->label.get()->pos();
- TierupCheck(decoder, decoder->position(), jump_distance);
+ // For now we just add one as the cost for the tier up check. We might
+ // want to revisit this when tuning tiering budgets later.
+ const int kTierUpCheckCost = 1;
+ TierupCheck(decoder, decoder->position(),
+ jump_distance + kTierUpCheckCost);
} else {
// To estimate time spent in this function more accurately, we could
// increment the tiering budget on forward jumps. However, we don't
@@ -3012,7 +3018,7 @@ class LiftoffCompiler {
if (index == no_reg) return;
CODE_COMMENT("load from memory");
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned = {index};
// Load the memory start address only now to reduce register pressure
// (important on ia32).
@@ -3056,7 +3062,7 @@ class LiftoffCompiler {
if (index == no_reg) return;
uintptr_t offset = imm.offset;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned = {index};
CODE_COMMENT("load with transformation");
Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -3196,7 +3202,7 @@ class LiftoffCompiler {
LiftoffRegister result{mem_size};
if (env_->module->is_memory64 && kNeedI64RegPair) {
LiftoffRegister high_word =
- __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(mem_size));
+ __ GetUnusedRegister(kGpReg, LiftoffRegList{mem_size});
// The high word is always 0 on 32-bit systems.
__ LoadConstant(high_word, WasmValue{uint32_t{0}});
result = LiftoffRegister::ForPair(mem_size, high_word.gp());
@@ -3433,24 +3439,22 @@ class LiftoffCompiler {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src3 = __ PopToRegister();
- LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3));
- LiftoffRegister src1 =
- __ PopToRegister(LiftoffRegList::ForRegs(src3, src2));
+ LiftoffRegister src2 = __ PopToRegister(LiftoffRegList{src3});
+ LiftoffRegister src1 = __ PopToRegister(LiftoffRegList{src3, src2});
// Reusing src1 and src2 will complicate codegen for select for some
// backend, so we allow only reusing src3 (the mask), and pin src1 and src2.
- LiftoffRegister dst =
- src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {src3},
- LiftoffRegList::ForRegs(src1, src2))
- : __ GetUnusedRegister(result_rc, {});
+ LiftoffRegister dst = src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {src3},
+ LiftoffRegList{src1, src2})
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
if (V8_UNLIKELY(nondeterminism_)) {
- auto pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegList pinned = {dst};
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
(result_lane_kind == kF32 || result_lane_kind == kF64)) {
- CheckS128Nan(dst, LiftoffRegList::ForRegs(src1, src2, src3, dst),
+ CheckS128Nan(dst, LiftoffRegList{src1, src2, src3, dst},
result_lane_kind);
}
}
@@ -3495,7 +3499,7 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
if (V8_UNLIKELY(nondeterminism_)) {
- auto pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegList pinned = {dst};
CheckS128Nan(dst, pinned, result_lane_kind);
}
__ PushRegister(kS128, dst);
@@ -4039,13 +4043,12 @@ class LiftoffCompiler {
// Does not work for arm
LiftoffRegister src2 = __ PopToRegister();
LiftoffRegister src1 = (src1_rc == src2_rc || pin_src2)
- ? __ PopToRegister(LiftoffRegList::ForRegs(src2))
+ ? __ PopToRegister(LiftoffRegList{src2})
: __
PopToRegister();
LiftoffRegister dst =
(src2_rc == result_rc || pin_src2)
- ? __ GetUnusedRegister(result_rc, {src1},
- LiftoffRegList::ForRegs(src2))
+ ? __ GetUnusedRegister(result_rc, {src1}, LiftoffRegList{src2})
: __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
__ PushRegister(kS128, dst);
@@ -4127,7 +4130,7 @@ class LiftoffCompiler {
}
static constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffRegister rhs = __ PopToRegister();
- LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister lhs = __ PopToRegister(LiftoffRegList{rhs});
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
uint8_t shuffle[kSimd128Size];
@@ -4479,7 +4482,7 @@ class LiftoffCompiler {
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned = {index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
CODE_COMMENT("atomic load from memory");
@@ -4548,7 +4551,7 @@ class LiftoffCompiler {
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
if (index == no_reg) return;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned = {index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
@@ -4625,11 +4628,11 @@ class LiftoffCompiler {
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index_reg =
- BoundsCheckMem(decoder, element_size_bytes(kind), imm.offset,
- full_index, {}, kDoForceCheck);
+ BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
+ {}, kDoForceCheck);
if (index_reg == no_reg) return;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
- AlignmentCheckMem(decoder, element_size_bytes(kind), imm.offset, index_reg,
+ LiftoffRegList pinned = {index_reg};
+ AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
pinned);
uintptr_t offset = imm.offset;
@@ -4675,7 +4678,7 @@ class LiftoffCompiler {
Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
+ LiftoffRegList pinned = {index_reg};
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
@@ -4932,7 +4935,8 @@ class LiftoffCompiler {
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Scale the seg_index for the array access.
- __ LoadConstant(seg_index, WasmValue(imm.index << element_size_log2(kI32)));
+ __ LoadConstant(seg_index,
+ WasmValue(imm.index << value_kind_size_log2(kI32)));
// Set the length of the segment to '0' to drop it.
LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -5189,7 +5193,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back(1);
LiftoffRegister obj(kReturnRegister0);
- LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
+ LiftoffRegList pinned = {obj};
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
@@ -5267,7 +5271,7 @@ class LiftoffCompiler {
WasmArray::MaxLength(imm.array_type));
}
ValueKind elem_kind = imm.array_type->element_type().kind();
- int elem_size = element_size_bytes(elem_kind);
+ int elem_size = value_kind_size(elem_kind);
// Allocate the array.
{
LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
@@ -5292,7 +5296,7 @@ class LiftoffCompiler {
LiftoffRegister obj(kReturnRegister0);
if (initial_value_on_stack) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
+ LiftoffRegList pinned = {obj};
LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
@@ -5302,9 +5306,9 @@ class LiftoffCompiler {
offset,
WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
LiftoffRegister end_offset = length;
- if (element_size_log2(elem_kind) != 0) {
+ if (value_kind_size_log2(elem_kind) != 0) {
__ emit_i32_shli(end_offset.gp(), length.gp(),
- element_size_log2(elem_kind));
+ value_kind_size_log2(elem_kind));
}
__ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
Label loop, done;
@@ -5347,7 +5351,7 @@ class LiftoffCompiler {
BoundsCheckArray(decoder, array, index, pinned);
ValueKind elem_kind = imm.array_type->element_type().kind();
if (!CheckSupportedType(decoder, elem_kind, "array load")) return;
- int elem_size_shift = element_size_log2(elem_kind);
+ int elem_size_shift = value_kind_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
@@ -5371,7 +5375,7 @@ class LiftoffCompiler {
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheckArray(decoder, array, index, pinned);
ValueKind elem_kind = imm.array_type->element_type().kind();
- int elem_size_shift = element_size_log2(elem_kind);
+ int elem_size_shift = value_kind_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
@@ -5421,7 +5425,7 @@ class LiftoffCompiler {
LiftoffRegister elem_size_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(elem_size_reg, WasmValue(element_size_bytes(elem_kind)));
+ __ LoadConstant(elem_size_reg, WasmValue(value_kind_size(elem_kind)));
LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
LiftoffRegister length_reg =
@@ -5445,11 +5449,12 @@ class LiftoffCompiler {
LiftoffRegister array(kReturnRegister0);
if (!CheckSupportedType(decoder, elem_kind, "array.init")) return;
for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(array);
+ LiftoffRegList pinned = {array};
LiftoffRegister element = pinned.set(__ PopToRegister(pinned));
LiftoffRegister offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(offset_reg, WasmValue(i << element_size_log2(elem_kind)));
+ __ LoadConstant(offset_reg,
+ WasmValue(i << value_kind_size_log2(elem_kind)));
StoreObjectField(array.gp(), offset_reg.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
element, pinned, elem_kind);
@@ -5579,17 +5584,6 @@ class LiftoffCompiler {
__ LoadMap(tmp1.gp(), obj_reg.gp());
// {tmp1} now holds the object's map.
- if (decoder->module_->has_signature(rtt.type.ref_index())) {
- // Function case: currently, the only way for the type check to succeed is
- // that the function's map equals the rtt.
- __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
- rtt_reg.gp());
- __ bind(&match);
- return obj_reg;
- }
-
- // Array/struct case until the rest of the function.
-
// Check for rtt equality, and if not, check if the rtt is a struct/array
// rtt.
__ emit_cond_jump(kEqual, &match, rtt.type.kind(), tmp1.gp(), rtt_reg.gp());
@@ -5928,6 +5922,17 @@ class LiftoffCompiler {
call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+ // One slot would be enough for call_direct, but would make index
+ // computations much more complicated.
+ uintptr_t vector_slot = num_call_instructions_ * 2;
+ if (FLAG_wasm_speculative_inlining) {
+ base::MutexGuard mutex_guard(&decoder->module_->type_feedback.mutex);
+ decoder->module_->type_feedback.feedback_for_function[func_index_]
+ .positions[decoder->position()] =
+ static_cast<int>(num_call_instructions_);
+ num_call_instructions_++;
+ }
+
if (imm.index < env_->module->num_imported_functions) {
// A direct call to an imported function.
LiftoffRegList pinned;
@@ -5963,6 +5968,15 @@ class LiftoffCompiler {
FinishCall(decoder, &sig, call_descriptor);
}
} else {
+ // Inlining direct calls isn't speculative, but existence of the
+ // feedback vector currently depends on this flag.
+ if (FLAG_wasm_speculative_inlining) {
+ LiftoffRegister vector = __ GetUnusedRegister(kGpReg, {});
+ __ Fill(vector, liftoff::kFeedbackVectorOffset, kPointerKind);
+ __ IncrementSmi(vector,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
+ static_cast<int>(vector_slot)));
+ }
// A direct call within this module just gets the current instance.
__ PrepareCall(&sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
@@ -5994,7 +6008,7 @@ class LiftoffCompiler {
// Pop the index. We'll modify the register's contents later.
Register index = __ PopToModifiableRegister().gp();
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned = {index};
// Get three temporary registers.
Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
@@ -6151,14 +6165,14 @@ class LiftoffCompiler {
__ Fill(vector, liftoff::kFeedbackVectorOffset, kPointerKind);
LiftoffAssembler::VarState vector_var(kPointerKind, vector, 0);
LiftoffRegister index = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- uintptr_t vector_slot = num_call_ref_instructions_ * 2;
+ uintptr_t vector_slot = num_call_instructions_ * 2;
{
base::MutexGuard mutex_guard(&decoder->module_->type_feedback.mutex);
decoder->module_->type_feedback.feedback_for_function[func_index_]
.positions[decoder->position()] =
- static_cast<int>(num_call_ref_instructions_);
+ static_cast<int>(num_call_instructions_);
}
- num_call_ref_instructions_++;
+ num_call_instructions_++;
__ LoadConstant(index, WasmValue::ForUintPtr(vector_slot));
LiftoffAssembler::VarState index_var(kIntPtrKind, index, 0);
@@ -6468,7 +6482,7 @@ class LiftoffCompiler {
Register instance = __ cache_state()->cached_instance;
if (instance == no_reg) {
instance = __ cache_state()->TrySetCachedInstanceRegister(
- pinned | LiftoffRegList::ForRegs(fallback));
+ pinned | LiftoffRegList{fallback});
if (instance == no_reg) instance = fallback;
__ LoadInstanceFromFrame(instance);
}
@@ -6534,9 +6548,10 @@ class LiftoffCompiler {
// Current number of exception refs on the stack.
int num_exceptions_ = 0;
- // Number of {call_ref} instructions encountered. While compiling, also
- // index of the next {call_ref}. Used for indexing type feedback.
- uintptr_t num_call_ref_instructions_ = 0;
+ // Number of feedback-collecting call instructions encountered. While
+ // compiling, also index of the next such instruction. Used for indexing type
+ // feedback.
+ uintptr_t num_call_instructions_ = 0;
int32_t* max_steps_;
int32_t* nondeterminism_;
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 69a7350fc5..e9e6fd5d70 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -347,6 +347,17 @@ class LiftoffRegList {
constexpr LiftoffRegList() = default;
+ // Allow to construct LiftoffRegList from a number of
+ // {Register|DoubleRegister|LiftoffRegister}.
+ template <
+ typename... Regs,
+ typename = std::enable_if_t<std::conjunction_v<std::disjunction<
+ std::is_same<Register, Regs>, std::is_same<DoubleRegister, Regs>,
+ std::is_same<LiftoffRegister, Regs>>...>>>
+ constexpr LiftoffRegList(Regs... regs) {
+ (..., set(regs));
+ }
+
constexpr Register set(Register reg) {
return set(LiftoffRegister(reg)).gp();
}
@@ -461,13 +472,6 @@ class LiftoffRegList {
return LiftoffRegList(bits);
}
- template <typename... Regs>
- static LiftoffRegList ForRegs(Regs... regs) {
- LiftoffRegList list;
- for (LiftoffRegister reg : {LiftoffRegister(regs)...}) list.set(reg);
- return list;
- }
-
private:
// Unchecked constructor. Only use for valid bits.
explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
index b62fee4e04..7a88ff5c03 100644
--- a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -299,7 +299,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -599,8 +599,7 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::Atomic##name( \
Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
LiftoffRegister value, LiftoffRegister result, StoreType type) { \
- LiftoffRegList pinned = \
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result); \
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result}; \
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
@@ -655,8 +654,7 @@ ATOMIC_BINOP_CASE(Xor, Xor, Xor, xor)
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
@@ -714,8 +712,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
@@ -790,8 +787,7 @@ void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg,
- expected, new_value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, expected, new_value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
@@ -1000,6 +996,15 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ SmiUntag(scratch, MemOperand(dst.gp(), offset));
+ Add_d(scratch, scratch, Operand(1));
+ SmiTag(scratch);
+ St_d(scratch, MemOperand(dst.gp(), offset));
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul_w(dst, lhs, rhs);
}
@@ -1320,10 +1325,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
return true;
case kExprI32SConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s(rounded.fp(), src.fp());
@@ -1343,10 +1347,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32UConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s(rounded.fp(), src.fp());
@@ -1364,10 +1367,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32SConvertF64: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_d(rounded.fp(), src.fp());
@@ -1381,10 +1383,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32UConvertF64: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_d(rounded.fp(), src.fp());
@@ -1406,10 +1407,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
return true;
case kExprI64SConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s(rounded.fp(), src.fp());
@@ -1438,10 +1438,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI64SConvertF64: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_d(rounded.fp(), src.fp());
@@ -1473,8 +1472,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
movfr2gr_d(dst.gp(), src.fp());
return true;
case kExprF32SConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
movgr2fr_w(scratch.fp(), src.gp());
ffint_s_w(dst.fp(), scratch.fp());
return true;
@@ -1489,8 +1487,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::FmoveLow(dst.fp(), src.gp());
return true;
case kExprF64SConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
movgr2fr_w(scratch.fp(), src.gp());
ffint_d_w(dst.fp(), scratch.fp());
return true;
@@ -1635,7 +1632,7 @@ void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs || dst == rhs) {
- tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
TurboAssembler::li(tmp, 1);
@@ -1658,7 +1655,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs.gp() || dst == rhs.gp()) {
- tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
TurboAssembler::li(tmp, 1);
@@ -2966,7 +2963,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
int arg_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 9c02cf3697..c76fd2f49e 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -428,7 +428,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -658,7 +658,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_store_mem) {
- pinned = pinned | LiftoffRegList::ForRegs(dst_op.rm(), src);
+ pinned = pinned | LiftoffRegList{dst_op.rm(), src};
LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
// Save original value.
Move(tmp, src, type.value_type());
@@ -1023,7 +1023,7 @@ I32_SHIFTOP_I(shr, srl)
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
LiftoffRegister imm_reg =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ GetUnusedRegister(kGpRegPair, LiftoffRegList{dst, lhs});
int32_t imm_low_word = static_cast<int32_t>(imm);
int32_t imm_high_word = static_cast<int32_t>(imm >> 32);
TurboAssembler::li(imm_reg.low_gp(), imm_low_word);
@@ -1092,7 +1092,7 @@ inline void Emit64BitShiftOperation(
void (TurboAssembler::*emit_shift)(Register, Register, Register, Register,
Register, Register, Register)) {
Label move, done;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src, amount);
+ LiftoffRegList pinned = {dst, src, amount};
// If some of destination registers are in use, get another, unused pair.
// That way we prevent overwriting some input registers while shifting.
@@ -1235,6 +1235,14 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ lw(scratch, MemOperand(dst.gp(), offset));
+ Addu(scratch, scratch, Operand(Smi::FromInt(1)));
+ sw(scratch, MemOperand(dst.gp(), offset));
+}
+
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src);
}
@@ -1377,10 +1385,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::Move(dst.gp(), src.low_gp());
return true;
case kExprI32SConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
@@ -1400,10 +1407,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32UConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
@@ -1422,8 +1428,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32SConvertF64: {
- LiftoffRegister scratch =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kGpReg, LiftoffRegList{dst});
// Try a conversion to a signed integer.
trunc_w_d(kScratchDoubleReg, src.fp());
@@ -1442,9 +1447,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode()) {
LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
@@ -1501,15 +1506,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::Mfhc1(dst.high_gp(), src.fp());
return true;
case kExprF32SConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
mtc1(src.gp(), scratch.fp());
cvt_s_w(dst.fp(), scratch.fp());
return true;
}
case kExprF32UConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
cvt_s_d(dst.fp(), dst.fp());
return true;
@@ -1521,15 +1524,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::FmoveLow(dst.fp(), src.gp());
return true;
case kExprF64SConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
mtc1(src.gp(), scratch.fp());
cvt_d_w(dst.fp(), scratch.fp());
return true;
}
case kExprF64UConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
return true;
}
@@ -1614,7 +1615,7 @@ void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs || dst == rhs) {
- tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
TurboAssembler::li(tmp, 1);
@@ -1628,8 +1629,7 @@ void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- Register tmp =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(src, dst)).gp();
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList{src, dst}).gp();
sltiu(tmp, src.low_gp(), 1);
sltiu(dst, src.high_gp(), 1);
and_(dst, dst, tmp);
@@ -1666,8 +1666,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
Register tmp = dst;
if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) {
- tmp =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, lhs, rhs)).gp();
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList{dst, lhs, rhs}).gp();
}
// Write 1 initially in tmp register.
@@ -2989,7 +2988,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
int arg_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index c0f934c656..b4b238421d 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -415,7 +415,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -738,8 +738,7 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::Atomic##name( \
Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
LiftoffRegister value, LiftoffRegister result, StoreType type) { \
- LiftoffRegList pinned = \
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result); \
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result}; \
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
@@ -816,8 +815,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
@@ -893,8 +891,7 @@ void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg,
- expected, new_value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, expected, new_value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
@@ -1101,6 +1098,15 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ SmiUntag(scratch, MemOperand(dst.gp(), offset));
+ Daddu(scratch, scratch, Operand(1));
+ SmiTag(scratch);
+ Sd(scratch, MemOperand(dst.gp(), offset));
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
@@ -1463,10 +1469,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32);
return true;
case kExprI32SConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
@@ -1486,10 +1491,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32UConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
@@ -1507,10 +1511,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32SConvertF64: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
@@ -1524,10 +1527,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32UConvertF64: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
@@ -1549,10 +1551,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32);
return true;
case kExprI64SConvertF32: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
@@ -1581,10 +1582,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI64SConvertF64: {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+ GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
@@ -1616,8 +1616,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
dmfc1(dst.gp(), src.fp());
return true;
case kExprF32SConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
mtc1(src.gp(), scratch.fp());
cvt_s_w(dst.fp(), scratch.fp());
return true;
@@ -1632,8 +1631,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::FmoveLow(dst.fp(), src.gp());
return true;
case kExprF64SConvertI32: {
- LiftoffRegister scratch =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
mtc1(src.gp(), scratch.fp());
cvt_d_w(dst.fp(), scratch.fp());
return true;
@@ -1847,7 +1845,7 @@ void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs || dst == rhs) {
- tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
TurboAssembler::li(tmp, 1);
@@ -1870,7 +1868,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs.gp() || dst == rhs.gp()) {
- tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
TurboAssembler::li(tmp, 1);
@@ -3535,7 +3533,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
int arg_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index cb4efb7dd7..796ddaa4d1 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -228,7 +228,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -1142,7 +1142,7 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// return_val, return_type)
#define BINOP_LIST(V) \
V(f32_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
- , , , ROUND_F64_TO_F32, , void) \
+ , , , USE, , void) \
V(f64_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
, , , USE, , void) \
V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
@@ -1255,6 +1255,23 @@ bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
return false;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(SmiValuesAre31Bits());
+ Register scratch = temps.Acquire();
+ LoadS32(scratch, MemOperand(dst.gp(), offset), r0);
+ AddS64(scratch, scratch, Operand(Smi::FromInt(1)));
+ StoreU32(scratch, MemOperand(dst.gp(), offset), r0);
+ } else {
+ Register scratch = temps.Acquire();
+ SmiUntag(scratch, MemOperand(dst.gp(), offset), LeaveRC, r0);
+ AddS64(scratch, scratch, Operand(1));
+ SmiTag(scratch);
+ StoreU64(scratch, MemOperand(dst.gp(), offset), r0);
+ }
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
@@ -2953,7 +2970,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
UNREACHABLE();
}
args++;
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index cb6851d663..642a7d2a33 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -399,7 +399,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -657,8 +657,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
Register offset_reg, uintptr_t offset_imm,
LiftoffRegister value, LiftoffRegister result,
StoreType type, Binop op) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ LiftoffRegList pinned = {dst_addr, offset_reg, value, result};
Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
// Make sure that {result} is unique.
@@ -865,70 +864,81 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
type, liftoff::Binop::kExchange);
}
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ sync(); \
+ bind(&compareExchange); \
+ load_linked(result.gp(), MemOperand(temp0, 0)); \
+ BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
+ mv(temp2, new_value.gp()); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
+ bind(&exit); \
+ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, size, aligned) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ andi(temp1, temp0, aligned); \
+ Sub64(temp0, temp0, Operand(temp1)); \
+ Sll32(temp1, temp1, 3); \
+ sync(); \
+ bind(&compareExchange); \
+ load_linked(temp2, MemOperand(temp0, 0)); \
+ ExtractBits(result.gp(), temp2, temp1, size, false); \
+ ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
+ BranchShort(&exit, ne, temp2, Operand(result.gp())); \
+ InsertBits(temp2, new_value.gp(), temp1, size); \
+ store_conditional(temp2, MemOperand(temp0, 0)); \
+ BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
+ bind(&exit); \
+ sync(); \
+ } while (0)
+
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, new_value);
-
- Register result_reg = result.gp();
- if (pinned.has(result)) {
- result_reg = GetUnusedRegister(kGpReg, pinned).gp();
- }
-
- UseScratchRegisterScope temps(this);
-
- Register actual_addr = liftoff::CalculateActualAddress(
- this, dst_addr, offset_reg, offset_imm, temps.Acquire());
-
- Register store_result = temps.Acquire();
-
- Label retry;
- Label done;
- bind(&retry);
+ LiftoffRegList pinned = {dst_addr, offset_reg, expected, new_value, result};
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Add64(temp0, dst_op.rm(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 7);
+ break;
case StoreType::kI32Store8:
- lbu(result_reg, actual_addr, 0);
- sync();
- Branch(&done, ne, result.gp(), Operand(expected.gp()));
- sync();
- sb(new_value.gp(), actual_addr, 0);
- sync();
- mv(store_result, zero_reg);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 3);
break;
case StoreType::kI64Store16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 7);
+ break;
case StoreType::kI32Store16:
- lhu(result_reg, actual_addr, 0);
- sync();
- Branch(&done, ne, result.gp(), Operand(expected.gp()));
- sync();
- sh(new_value.gp(), actual_addr, 0);
- sync();
- mv(store_result, zero_reg);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 3);
break;
case StoreType::kI64Store32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, 32, 7);
+ break;
case StoreType::kI32Store:
- lr_w(true, true, result_reg, actual_addr);
- Branch(&done, ne, result.gp(), Operand(expected.gp()));
- sc_w(true, true, store_result, new_value.gp(), actual_addr);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
break;
case StoreType::kI64Store:
- lr_d(true, true, result_reg, actual_addr);
- Branch(&done, ne, result.gp(), Operand(expected.gp()));
- sc_d(true, true, store_result, new_value.gp(), actual_addr);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
default:
UNREACHABLE();
}
- bnez(store_result, &retry);
- bind(&done);
-
- if (result_reg != result.gp()) {
- mv(result.gp(), result_reg);
- }
}
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
void LiftoffAssembler::AtomicFence() { sync(); }
@@ -1300,23 +1310,41 @@ I64_BINOP_I(xor, Xor)
LiftoffRegister dst, LiftoffRegister src, Register amount) { \
instruction(dst.gp(), src.gp(), amount); \
}
-#define I64_SHIFTOP_I(name, instruction) \
- void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
- LiftoffRegister src, int amount) { \
- DCHECK(is_uint6(amount)); \
- instruction(dst.gp(), src.gp(), amount); \
- }
I64_SHIFTOP(shl, sll)
I64_SHIFTOP(sar, sra)
I64_SHIFTOP(shr, srl)
+#undef I64_SHIFTOP
-I64_SHIFTOP_I(shl, slli)
-I64_SHIFTOP_I(sar, srai)
-I64_SHIFTOP_I(shr, srli)
+void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
+ int amount) {
+ if (is_uint6(amount)) {
+ slli(dst.gp(), src.gp(), amount);
+ } else {
+ li(kScratchReg, amount);
+ sll(dst.gp(), src.gp(), kScratchReg);
+ }
+}
-#undef I64_SHIFTOP
-#undef I64_SHIFTOP_I
+void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
+ int amount) {
+ if (is_uint6(amount)) {
+ srai(dst.gp(), src.gp(), amount);
+ } else {
+ li(kScratchReg, amount);
+ sra(dst.gp(), src.gp(), kScratchReg);
+ }
+}
+
+void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
+ int amount) {
+ if (is_uint6(amount)) {
+ srli(dst.gp(), src.gp(), amount);
+ } else {
+ li(kScratchReg, amount);
+ srl(dst.gp(), src.gp(), kScratchReg);
+ }
+}
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
@@ -1681,6 +1709,23 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
Branch(target, condition, scratch, Operand(zero_reg));
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(SmiValuesAre31Bits());
+ Register scratch = temps.Acquire();
+ Lw(scratch, MemOperand(dst.gp(), offset));
+ Add32(scratch, scratch, Operand(Smi::FromInt(1)));
+ Sw(scratch, MemOperand(dst.gp(), offset));
+ } else {
+ Register scratch = temps.Acquire();
+ SmiUntag(scratch, MemOperand(dst.gp(), offset));
+ Add64(scratch, scratch, Operand(1));
+ SmiTag(scratch);
+ Sd(scratch, MemOperand(dst.gp(), offset));
+ }
+}
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
@@ -1848,7 +1893,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
VU.set(kScratchReg, E8, m1);
VRegister temp =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp().toV();
+ GetUnusedRegister(kFpReg, LiftoffRegList{lhs, rhs}).fp().toV();
if (dst_v == lhs_v) {
vmv_vv(temp, lhs_v);
lhs_v = temp;
@@ -3673,7 +3718,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
int arg_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 9b7abea5f3..65bb3435ad 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -206,7 +206,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
switch (kind) {
case kS128:
- return element_size_bytes(kind);
+ return value_kind_size(kind);
default:
return kStackSlotSize;
}
@@ -409,9 +409,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kS128Load:
if (is_load_mem) {
- LoadV128LE(dst.fp(), src_op, r0, r1);
+ LoadV128LE(dst.fp(), src_op, r1, r0);
} else {
- LoadV128(dst.fp(), src_op, r0);
+ LoadV128(dst.fp(), src_op, r1);
}
break;
default:
@@ -478,7 +478,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kS128Store: {
if (is_store_mem) {
- StoreV128LE(src.fp(), dst_op, r0, r1);
+ StoreV128LE(src.fp(), dst_op, r1, r0);
} else {
StoreV128(src.fp(), dst_op, r1);
}
@@ -561,14 +561,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- Register tmp1 =
- GetUnusedRegister(
- kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
- .gp();
- Register tmp2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
- value, result, tmp1))
- .gp();
+ Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result})
+ .gp();
+ Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result, tmp1})
+ .gp();
if (!is_int20(offset_imm)) {
mov(ip, Operand(offset_imm));
@@ -663,14 +661,12 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- Register tmp1 =
- GetUnusedRegister(
- kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
- .gp();
- Register tmp2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
- value, result, tmp1))
- .gp();
+ Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result})
+ .gp();
+ Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result, tmp1})
+ .gp();
if (!is_int20(offset_imm)) {
mov(ip, Operand(offset_imm));
@@ -765,14 +761,12 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- Register tmp1 =
- GetUnusedRegister(
- kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
- .gp();
- Register tmp2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
- value, result, tmp1))
- .gp();
+ Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result})
+ .gp();
+ Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result, tmp1})
+ .gp();
if (!is_int20(offset_imm)) {
mov(ip, Operand(offset_imm));
@@ -867,14 +861,12 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- Register tmp1 =
- GetUnusedRegister(
- kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
- .gp();
- Register tmp2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
- value, result, tmp1))
- .gp();
+ Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result})
+ .gp();
+ Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result, tmp1})
+ .gp();
if (!is_int20(offset_imm)) {
mov(ip, Operand(offset_imm));
@@ -969,14 +961,12 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- Register tmp1 =
- GetUnusedRegister(
- kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
- .gp();
- Register tmp2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
- value, result, tmp1))
- .gp();
+ Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result})
+ .gp();
+ Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
+ value, result, tmp1})
+ .gp();
if (!is_int20(offset_imm)) {
mov(ip, Operand(offset_imm));
@@ -1707,6 +1697,23 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ UseScratchRegisterScope temps(this);
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(SmiValuesAre31Bits());
+ Register scratch = temps.Acquire();
+ LoadS32(scratch, MemOperand(dst.gp(), offset));
+ AddU32(scratch, Operand(Smi::FromInt(1)));
+ StoreU32(scratch, MemOperand(dst.gp(), offset));
+ } else {
+ Register scratch = temps.Acquire();
+ SmiUntag(scratch, MemOperand(dst.gp(), offset));
+ AddU64(scratch, Operand(1));
+ SmiTag(scratch);
+ StoreU64(scratch, MemOperand(dst.gp(), offset));
+ }
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
@@ -2532,12 +2539,10 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
* `EmitBinOp`. */ \
/* Make sure dst and temp are also unique. */ \
if (dest == src1 || dest == src2) { \
- dest = \
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src1, src2)).fp(); \
+ dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1, src2}).fp(); \
} \
Simd128Register temp = \
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, src1, src2)) \
- .fp(); \
+ GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1, src2}).fp(); \
op(dest, src1, src2, kScratchDoubleReg, temp); \
/* Original dst register needs to be populated. */ \
if (dest != dst.fp()) { \
@@ -2554,21 +2559,21 @@ SIMD_ADD_SUB_SAT_LIST(EMIT_SIMD_ADD_SUB_SAT)
V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
-#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
- LiftoffRegister src) { \
- Simd128Register src1 = src.fp(); \
- Simd128Register dest = dst.fp(); \
- /* Make sure dst and temp are unique. */ \
- if (dest == src1) { \
- dest = GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src1)).fp(); \
- } \
- Simd128Register temp = \
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, src1)).fp(); \
- op(dest, src1, kScratchDoubleReg, temp); \
- if (dest != dst.fp()) { \
- vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
- } \
+#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ Simd128Register src1 = src.fp(); \
+ Simd128Register dest = dst.fp(); \
+ /* Make sure dst and temp are unique. */ \
+ if (dest == src1) { \
+ dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1}).fp(); \
+ } \
+ Simd128Register temp = \
+ GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1}).fp(); \
+ op(dest, src1, kScratchDoubleReg, temp); \
+ if (dest != dst.fp()) { \
+ vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
+ } \
}
SIMD_EXT_ADD_PAIRWISE_LIST(EMIT_SIMD_EXT_ADD_PAIRWISE)
#undef EMIT_SIMD_EXT_ADD_PAIRWISE
@@ -2716,7 +2721,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
Simd128Register src2 = rhs.fp();
Simd128Register dest = dst.fp();
Simd128Register temp =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, src1, src2)).fp();
+ GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1, src2}).fp();
I8x16Swizzle(dest, src1, src2, r0, r1, kScratchDoubleReg, temp);
}
@@ -2764,10 +2769,9 @@ void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
Simd128Register dest = dst.fp();
// Make sure temp registers are unique.
Simd128Register temp1 =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, s1, s2)).fp();
+ GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2}).fp();
Simd128Register temp2 =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dest, s1, s2, temp1))
- .fp();
+ GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2, temp1}).fp();
I16x8Q15MulRSatS(dest, s1, s2, kScratchDoubleReg, temp1, temp2);
}
@@ -2908,11 +2912,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
MultiPush(regs.GetGpList());
- MultiPushF64OrV128(regs.GetFpList());
+ MultiPushF64OrV128(regs.GetFpList(), ip);
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- MultiPopF64OrV128(regs.GetFpList());
+ MultiPopF64OrV128(regs.GetFpList(), ip);
MultiPop(regs.GetGpList());
}
@@ -2976,7 +2980,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
UNREACHABLE();
}
args++;
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 960e7ba273..fd3c6bc883 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -293,7 +293,7 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
- return is_reference(kind) ? kSystemPointerSize : element_size_bytes(kind);
+ return value_kind_full_size(kind);
}
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
@@ -598,8 +598,8 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- LiftoffRegList dont_overwrite = cache_state()->used_registers |
- LiftoffRegList::ForRegs(dst_addr, offset_reg);
+ LiftoffRegList dont_overwrite =
+ cache_state()->used_registers | LiftoffRegList{dst_addr, offset_reg};
DCHECK(!dont_overwrite.has(result));
if (dont_overwrite.has(value)) {
// We cannot overwrite {value}, but the {value} register is changed in the
@@ -660,8 +660,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
// The cmpxchg instruction uses rax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
- LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg);
+ LiftoffRegList pinned = LiftoffRegList{dst_addr, offset_reg, value_reg};
__ ClearRegister(rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
Operand dst_op = liftoff::GetMemOp(lasm, dst_addr, offset_reg, offset_imm);
@@ -798,7 +797,7 @@ void LiftoffAssembler::AtomicCompareExchange(
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
LiftoffRegList pinned =
- LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, value_reg);
+ LiftoffRegList{dst_addr, offset_reg, expected, value_reg};
ClearRegister(rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
if (expected.gp() != rax) {
movq(rax, expected.gp());
@@ -872,21 +871,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
- size_t size = element_size_log2(kind);
- if (kind == kRef || kind == kOptRef || kind == kRtt) {
- // Pointers are uncompressed on the stack!
- size = kSystemPointerSizeLog2;
- }
- switch (size) {
- case 2:
+ switch (SlotSizeForType(kind)) {
+ case 4:
movl(kScratchRegister, src);
movl(dst, kScratchRegister);
break;
- case 3:
+ case 8:
movq(kScratchRegister, src);
movq(dst, kScratchRegister);
break;
- case 4:
+ case 16:
Movdqu(kScratchDoubleReg, src);
Movdqu(dst, kScratchDoubleReg);
break;
@@ -1475,6 +1469,10 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
+ SmiAddConstant(Operand(dst.gp(), offset), Smi::FromInt(1));
+}
+
void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
movl(dst, src);
}
@@ -3429,9 +3427,9 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp1 =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs});
LiftoffRegister tmp2 =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs, tmp1});
I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
@@ -4092,7 +4090,7 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
int arg_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_kind);
- arg_bytes += element_size_bytes(param_kind);
+ arg_bytes += value_kind_size(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
diff --git a/deps/v8/src/wasm/canonical-types.cc b/deps/v8/src/wasm/canonical-types.cc
new file mode 100644
index 0000000000..43cac17589
--- /dev/null
+++ b/deps/v8/src/wasm/canonical-types.cc
@@ -0,0 +1,155 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/canonical-types.h"
+
+#include "src/wasm/wasm-engine.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+V8_EXPORT_PRIVATE TypeCanonicalizer* GetTypeCanonicalizer() {
+ return GetWasmEngine()->type_canonicalizer();
+}
+
+void TypeCanonicalizer::AddRecursiveGroup(WasmModule* module, uint32_t size) {
+ // Multiple threads could try to register recursive groups concurrently.
+ // TODO(manoskouk): Investigate if we can fine-grain the synchronization.
+ base::MutexGuard mutex_guard(&mutex_);
+ DCHECK_GE(module->types.size(), size);
+ uint32_t start_index = static_cast<uint32_t>(module->types.size()) - size;
+ CanonicalGroup group;
+ group.types.resize(size);
+ for (uint32_t i = 0; i < size; i++) {
+ group.types[i] = CanonicalizeTypeDef(module, module->types[start_index + i],
+ start_index);
+ }
+ int canonical_index = FindCanonicalGroup(group);
+ if (canonical_index >= 0) {
+ // Identical group found. Map new types to the old types's canonical
+ // representatives.
+ for (uint32_t i = 0; i < size; i++) {
+ module->isorecursive_canonical_type_ids[start_index + i] =
+ canonical_index + i;
+ }
+ } else {
+ // Identical group not found. Add new canonical representatives for the new
+ // types.
+ uint32_t first_canonical_index =
+ static_cast<uint32_t>(canonical_supertypes_.size());
+ canonical_supertypes_.resize(first_canonical_index + size);
+ for (uint32_t i = 0; i < size; i++) {
+ CanonicalType& canonical_type = group.types[i];
+ // Compute the canonical index of the supertype: If it is relative, we
+ // need to add {first_canonical_index}.
+ canonical_supertypes_[first_canonical_index + i] =
+ canonical_type.is_relative_supertype
+ ? canonical_type.type_def.supertype + first_canonical_index
+ : canonical_type.type_def.supertype;
+ module->isorecursive_canonical_type_ids[start_index + i] =
+ first_canonical_index + i;
+ }
+ canonical_groups_.emplace(group, first_canonical_index);
+ }
+}
+
+// An index in a type gets mapped to a relative index if it is inside the new
+// canonical group, or the canonical representative if it is not.
+ValueType TypeCanonicalizer::CanonicalizeValueType(
+ const WasmModule* module, ValueType type,
+ uint32_t recursive_group_start) const {
+ if (!type.has_index()) return type;
+ return type.ref_index() >= recursive_group_start
+ ? ValueType::CanonicalWithRelativeIndex(
+ type.kind(), type.ref_index() - recursive_group_start)
+ : ValueType::FromIndex(
+ type.kind(),
+ module->isorecursive_canonical_type_ids[type.ref_index()]);
+}
+
+bool TypeCanonicalizer::IsCanonicalSubtype(uint32_t sub_index,
+ uint32_t super_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module) {
+ // Multiple threads could try to register and access recursive groups
+ // concurrently.
+ // TODO(manoskouk): Investigate if we can improve this synchronization.
+ base::MutexGuard mutex_guard(&mutex_);
+ uint32_t canonical_super =
+ super_module->isorecursive_canonical_type_ids[super_index];
+ uint32_t canonical_sub =
+ sub_module->isorecursive_canonical_type_ids[sub_index];
+ while (canonical_sub != kNoSuperType) {
+ if (canonical_sub == canonical_super) return true;
+ canonical_sub = canonical_supertypes_[canonical_sub];
+ }
+ return false;
+}
+
+// Map all type indices (including supertype) inside {type} to indices relative
+// to {recursive_group_start}.
+TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef(
+ const WasmModule* module, TypeDefinition type,
+ uint32_t recursive_group_start) {
+ uint32_t canonical_supertype = kNoSuperType;
+ bool is_relative_supertype = false;
+ if (type.supertype < recursive_group_start) {
+ canonical_supertype =
+ module->isorecursive_canonical_type_ids[type.supertype];
+ } else if (type.supertype != kNoSuperType) {
+ canonical_supertype = type.supertype - recursive_group_start;
+ is_relative_supertype = true;
+ }
+ TypeDefinition result;
+ switch (type.kind) {
+ case TypeDefinition::kFunction: {
+ const FunctionSig* original_sig = type.function_sig;
+ FunctionSig::Builder builder(&zone_, original_sig->return_count(),
+ original_sig->parameter_count());
+ for (ValueType ret : original_sig->returns()) {
+ builder.AddReturn(
+ CanonicalizeValueType(module, ret, recursive_group_start));
+ }
+ for (ValueType param : original_sig->parameters()) {
+ builder.AddParam(
+ CanonicalizeValueType(module, param, recursive_group_start));
+ }
+ result = TypeDefinition(builder.Build(), canonical_supertype);
+ break;
+ }
+ case TypeDefinition::kStruct: {
+ const StructType* original_type = type.struct_type;
+ StructType::Builder builder(&zone_, original_type->field_count());
+ for (uint32_t i = 0; i < original_type->field_count(); i++) {
+ builder.AddField(CanonicalizeValueType(module, original_type->field(i),
+ recursive_group_start),
+ original_type->mutability(i));
+ }
+ result = TypeDefinition(builder.Build(), canonical_supertype);
+ break;
+ }
+ case TypeDefinition::kArray: {
+ ValueType element_type = CanonicalizeValueType(
+ module, type.array_type->element_type(), recursive_group_start);
+ result = TypeDefinition(
+ zone_.New<ArrayType>(element_type, type.array_type->mutability()),
+ canonical_supertype);
+ break;
+ }
+ }
+
+ return {result, is_relative_supertype};
+}
+
+// Returns the index of the canonical representative of the first type in this
+// group, or -1 if an identical group does not exist.
+int TypeCanonicalizer::FindCanonicalGroup(CanonicalGroup& group) const {
+ auto element = canonical_groups_.find(group);
+ return element == canonical_groups_.end() ? -1 : element->second;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/canonical-types.h b/deps/v8/src/wasm/canonical-types.h
new file mode 100644
index 0000000000..88f9e3617e
--- /dev/null
+++ b/deps/v8/src/wasm/canonical-types.h
@@ -0,0 +1,125 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_WASM_CANONICAL_TYPES_H_
+#define V8_WASM_CANONICAL_TYPES_H_
+
+#include <unordered_map>
+
+#include "src/base/lazy-instance.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// A singleton class, responsible for isorecursive canonicalization of wasm
+// types.
+// A recursive group is a subsequence of types explicitly marked in the type
+// section of a wasm module. Identical recursive groups have to be canonicalized
+// to a single canonical group and are are considered identical. Respective
+// types in two identical groups are considered identical for all purposes.
+// Two groups are considered identical if they have the same shape, and all
+// type indices referenced in the same position in both groups reference:
+// - identical types, if those do not belong to the rec. group,
+// - types in the same relative position in the group, if those belong to the
+// rec. group.
+class TypeCanonicalizer {
+ public:
+ TypeCanonicalizer() = default;
+
+ // Singleton class; no copying or moving allowed.
+ TypeCanonicalizer(const TypeCanonicalizer& other) = delete;
+ TypeCanonicalizer& operator=(const TypeCanonicalizer& other) = delete;
+ TypeCanonicalizer(TypeCanonicalizer&& other) = delete;
+ TypeCanonicalizer& operator=(TypeCanonicalizer&& other) = delete;
+
+ // Registers the last {size} types of {module} as a recursive group, and
+ // possibly canonicalizes it if an identical one has been found.
+ // Modifies {module->isorecursive_canonical_type_ids}.
+ V8_EXPORT_PRIVATE void AddRecursiveGroup(WasmModule* module, uint32_t size);
+
+ // Returns if the type at {sub_index} in {sub_module} is a subtype of the
+ // type at {super_index} in {super_module} after canonicalization.
+ V8_EXPORT_PRIVATE bool IsCanonicalSubtype(uint32_t sub_index,
+ uint32_t super_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+
+ private:
+ using TypeInModule = std::pair<const WasmModule*, uint32_t>;
+ struct CanonicalType {
+ TypeDefinition type_def;
+ bool is_relative_supertype;
+
+ bool operator==(const CanonicalType& other) const {
+ return type_def == other.type_def &&
+ is_relative_supertype == other.is_relative_supertype;
+ }
+
+ bool operator!=(const CanonicalType& other) const {
+ return type_def != other.type_def ||
+ is_relative_supertype != other.is_relative_supertype;
+ }
+
+ size_t hash_value() const {
+ return base::hash_combine(type_def.kind,
+ base::hash_value(is_relative_supertype));
+ }
+ };
+ struct CanonicalGroup {
+ struct hash {
+ size_t operator()(const CanonicalGroup& group) const {
+ return group.hash_value();
+ }
+ };
+
+ bool operator==(const CanonicalGroup& other) const {
+ return types == other.types;
+ }
+
+ bool operator!=(const CanonicalGroup& other) const {
+ return types != other.types;
+ }
+
+ size_t hash_value() const {
+ size_t result = 0;
+ for (const CanonicalType& type : types) {
+ result = base::hash_combine(result, type.hash_value());
+ }
+ return result;
+ }
+
+ std::vector<CanonicalType> types;
+ };
+
+ int FindCanonicalGroup(CanonicalGroup&) const;
+
+ CanonicalType CanonicalizeTypeDef(const WasmModule* module,
+ TypeDefinition type,
+ uint32_t recursive_group_start);
+ ValueType CanonicalizeValueType(const WasmModule* module, ValueType type,
+ uint32_t recursive_group_start) const;
+
+ std::vector<uint32_t> canonical_supertypes_;
+ // group -> canonical id of first type
+ std::unordered_map<CanonicalGroup, uint32_t, CanonicalGroup::hash>
+ canonical_groups_;
+ AccountingAllocator allocator_;
+ Zone zone_{&allocator_, "canonical type zone"};
+ base::Mutex mutex_;
+};
+
+// Returns a reference to the TypeCanonicalizer shared by the entire process.
+V8_EXPORT_PRIVATE TypeCanonicalizer* GetTypeCanonicalizer();
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_CANONICAL_TYPES_H_
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 5b7201abb6..cbb7b4f630 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -4233,7 +4233,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Byte sequences in data segments are interpreted as little endian for
// the purposes of this instruction. This means that those will have to
// be transformed in big endian architectures. TODO(7748): Implement.
- if (element_type.element_size_bytes() > 1) {
+ if (element_type.value_kind_size() > 1) {
UNIMPLEMENTED();
}
#endif
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 99928ed6e6..5d01a08b85 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -311,6 +311,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
++line_nr;
}
DCHECK(!line_numbers || line_numbers->size() == static_cast<size_t>(line_nr));
+ USE(line_nr);
return decoder.ok();
}
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 5aa2eabd4b..3ca925cf7b 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -109,7 +109,8 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
// them to be compiled for debugging, see documentation.
if (V8_LIKELY(FLAG_wasm_tier_mask_for_testing == 0) ||
func_index_ >= 32 ||
- ((FLAG_wasm_tier_mask_for_testing & (1 << func_index_)) == 0)) {
+ ((FLAG_wasm_tier_mask_for_testing & (1 << func_index_)) == 0) ||
+ FLAG_liftoff_only) {
// We do not use the debug side table, we only (optionally) pass it to
// cover different code paths in Liftoff for testing.
std::unique_ptr<DebugSideTable> unused_debug_sidetable;
@@ -127,6 +128,10 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
if (result.succeeded()) break;
}
+ // If --liftoff-only, do not fall back to turbofan, even if compilation
+ // failed.
+ if (FLAG_liftoff_only) break;
+
// If Liftoff failed, fall back to turbofan.
// TODO(wasm): We could actually stop or remove the tiering unit for this
// function to avoid compiling it twice with TurboFan.
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index ce98a98f0e..a74cbf8650 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -24,7 +24,7 @@ namespace v8 {
namespace internal {
class Counters;
-class OptimizedCompilationJob;
+class TurbofanCompilationJob;
namespace wasm {
@@ -138,7 +138,7 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
bool is_import_;
const FunctionSig* sig_;
bool use_generic_wrapper_;
- std::unique_ptr<OptimizedCompilationJob> job_;
+ std::unique_ptr<TurbofanCompilationJob> job_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index bd3317b3b2..f2d776435d 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -640,12 +640,20 @@ class WasmGraphBuildingInterface {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
+ if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
+ DCHECK_LT(feedback_instruction_index_, type_feedback_.size());
+ feedback_instruction_index_++;
+ }
DoCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args, returns);
}
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
+ if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
+ DCHECK_LT(feedback_instruction_index_, type_feedback_.size());
+ feedback_instruction_index_++;
+ }
DoReturnCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args);
}
@@ -671,8 +679,6 @@ class WasmGraphBuildingInterface {
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
int maybe_feedback = -1;
- // TODO(jkummerow): The way we currently prepare type feedback means that
- // we won't have any for inlined functions. Figure out how to change that.
if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
DCHECK_LT(feedback_instruction_index_, type_feedback_.size());
maybe_feedback =
@@ -1155,10 +1161,6 @@ class WasmGraphBuildingInterface {
result.object_can_be_null = object_type.is_nullable();
DCHECK(object_type.is_object_reference()); // Checked by validation.
// In the bottom case, the result is irrelevant.
- result.reference_kind =
- !rtt_type.is_bottom() && module->has_signature(rtt_type.ref_index())
- ? compiler::WasmGraphBuilder::kFunction
- : compiler::WasmGraphBuilder::kArrayOrStruct;
result.rtt_depth = rtt_type.is_bottom()
? 0 /* unused */
: static_cast<uint8_t>(GetSubtypingDepth(
diff --git a/deps/v8/src/wasm/init-expr-interface.cc b/deps/v8/src/wasm/init-expr-interface.cc
index 685dab463b..bd0d033642 100644
--- a/deps/v8/src/wasm/init-expr-interface.cc
+++ b/deps/v8/src/wasm/init-expr-interface.cc
@@ -198,7 +198,7 @@ void InitExprInterface::ArrayInitFromData(
const WasmDataSegment& data_segment =
module_->data_segments[data_segment_imm.index];
uint32_t length_in_bytes =
- length * array_imm.array_type->element_type().element_size_bytes();
+ length * array_imm.array_type->element_type().value_kind_size();
// Error handling.
if (length >
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 9de513ae4b..688d27b84e 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -1207,8 +1207,8 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
// Allocate feedback vector if needed.
if (result.feedback_vector_slots > 0) {
DCHECK(FLAG_wasm_speculative_inlining);
- Handle<FixedArray> vector =
- isolate->factory()->NewFixedArray(result.feedback_vector_slots);
+ Handle<FixedArray> vector = isolate->factory()->NewFixedArrayWithZeroes(
+ result.feedback_vector_slots);
instance->feedback_vectors().set(
declared_function_index(module, func_index), *vector);
}
@@ -1244,15 +1244,56 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
return true;
}
-std::vector<CallSiteFeedback> ProcessTypeFeedback(
- Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
- int which_vector = declared_function_index(instance->module(), func_index);
- Object maybe_feedback = instance->feedback_vectors().get(which_vector);
- if (!maybe_feedback.IsFixedArray()) return {};
+class TransitiveTypeFeedbackProcessor {
+ public:
+ TransitiveTypeFeedbackProcessor(const WasmModule* module,
+ Handle<WasmInstanceObject> instance,
+ int func_index)
+ : instance_(instance),
+ feedback_for_function_(module->type_feedback.feedback_for_function) {
+ base::MutexGuard mutex_guard(&module->type_feedback.mutex);
+ queue_.insert(func_index);
+ while (!queue_.empty()) {
+ auto next = queue_.cbegin();
+ Process(*next);
+ queue_.erase(next);
+ }
+ }
+
+ private:
+ void Process(int func_index);
+
+ void EnqueueCallees(std::vector<CallSiteFeedback> feedback) {
+ for (size_t i = 0; i < feedback.size(); i++) {
+ int func = feedback[i].function_index;
+ // TODO(jkummerow): Find a way to get the target function ID for
+ // direct calls (which currently requires decoding the function).
+ if (func == -1) continue;
+ // Don't spend time on calls that have never been executed.
+ if (feedback[i].absolute_call_frequency == 0) continue;
+ // Don't recompute feedback that has already been processed.
+ auto existing = feedback_for_function_.find(func);
+ if (existing != feedback_for_function_.end() &&
+ existing->second.feedback_vector.size() > 0) {
+ continue;
+ }
+ queue_.insert(func);
+ }
+ }
+
+ Handle<WasmInstanceObject> instance_;
+ std::map<uint32_t, FunctionTypeFeedback>& feedback_for_function_;
+ std::unordered_set<int> queue_;
+};
+
+void TransitiveTypeFeedbackProcessor::Process(int func_index) {
+ int which_vector = declared_function_index(instance_->module(), func_index);
+ Object maybe_feedback = instance_->feedback_vectors().get(which_vector);
+ if (!maybe_feedback.IsFixedArray()) return;
FixedArray feedback = FixedArray::cast(maybe_feedback);
std::vector<CallSiteFeedback> result(feedback.length() / 2);
int imported_functions =
- static_cast<int>(instance->module()->num_imported_functions);
+ static_cast<int>(instance_->module()->num_imported_functions);
for (int i = 0; i < feedback.length(); i += 2) {
Object value = feedback.get(i);
if (value.IsWasmInternalFunction() &&
@@ -1263,7 +1304,7 @@ std::vector<CallSiteFeedback> ProcessTypeFeedback(
// if it's defined in the same module.
WasmExportedFunction target = WasmExportedFunction::cast(
WasmInternalFunction::cast(value).external());
- if (target.instance() == *instance &&
+ if (target.instance() == *instance_ &&
target.function_index() >= imported_functions) {
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_ref #%d inlineable (monomorphic)]\n",
@@ -1304,7 +1345,7 @@ std::vector<CallSiteFeedback> ProcessTypeFeedback(
}
WasmExportedFunction target =
WasmExportedFunction::cast(internal.external());
- if (target.instance() != *instance ||
+ if (target.instance() != *instance_ ||
target.function_index() < imported_functions) {
continue;
}
@@ -1324,6 +1365,15 @@ std::vector<CallSiteFeedback> ProcessTypeFeedback(
PrintF("[Function #%d call_ref #%d: best frequency %f]\n", func_index,
i / 2, best_frequency);
}
+ } else if (value.IsSmi()) {
+ // Direct call, just collecting call count.
+ int count = Smi::cast(value).value();
+ if (FLAG_trace_wasm_speculative_inlining) {
+ PrintF("[Function #%d call_direct #%d: frequency %d]\n", func_index,
+ i / 2, count);
+ }
+ result[i / 2] = {-1, count};
+ continue;
}
// If we fall through to here, then this call isn't eligible for inlining.
// Possible reasons: uninitialized or megamorphic feedback; or monomorphic
@@ -1334,7 +1384,8 @@ std::vector<CallSiteFeedback> ProcessTypeFeedback(
}
result[i / 2] = {-1, -1};
}
- return result;
+ EnqueueCallees(result);
+ feedback_for_function_[func_index].feedback_vector = std::move(result);
}
void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
@@ -1363,13 +1414,10 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
priority = saved_priority;
}
if (FLAG_wasm_speculative_inlining) {
- auto feedback = ProcessTypeFeedback(isolate, instance, func_index);
- base::MutexGuard mutex_guard(&module->type_feedback.mutex);
- // TODO(jkummerow): we could have collisions here if two different instances
- // of the same module schedule tier-ups of the same function at the same
- // time. If that ever becomes a problem, figure out a solution.
- module->type_feedback.feedback_for_function[func_index].feedback_vector =
- std::move(feedback);
+ // TODO(jkummerow): we could have collisions here if different instances
+ // of the same module have collected different feedback. If that ever
+ // becomes a problem, figure out a solution.
+ TransitiveTypeFeedbackProcessor process(module, instance, func_index);
}
compilation_state->AddTopTierPriorityCompilationUnit(tiering_unit, priority);
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 1c5aefb4a2..0d37867510 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -13,6 +13,7 @@
#include "src/logging/metrics.h"
#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/canonical-types.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/init-expr-interface.h"
@@ -669,17 +670,22 @@ class ModuleDecoderImpl : public Decoder {
}
void DecodeTypeSection() {
+ TypeCanonicalizer* type_canon = GetTypeCanonicalizer();
uint32_t types_count = consume_count("types count", kV8MaxWasmTypes);
// Non wasm-gc type section decoding.
if (!enabled_features_.has_gc()) {
- for (uint32_t i = 0; ok() && i < types_count; ++i) {
+ module_->types.reserve(types_count);
+ for (uint32_t i = 0; i < types_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
expect_u8("signature definition", kWasmFunctionTypeCode);
const FunctionSig* sig = consume_sig(module_->signature_zone.get());
if (!ok()) break;
module_->add_signature(sig, kNoSuperType);
+ if (FLAG_wasm_type_canonicalization) {
+ type_canon->AddRecursiveGroup(module_.get(), 1);
+ }
}
return;
}
@@ -700,6 +706,9 @@ class ModuleDecoderImpl : public Decoder {
TypeDefinition type = consume_nominal_type_definition();
if (ok()) module_->add_type(type);
}
+ if (ok() && FLAG_wasm_type_canonicalization) {
+ type_canon->AddRecursiveGroup(module_.get(), types_count);
+ }
} else {
// wasm-gc isorecursive type section decoding.
for (uint32_t i = 0; ok() && i < types_count; ++i) {
@@ -722,9 +731,17 @@ class ModuleDecoderImpl : public Decoder {
TypeDefinition type = consume_subtype_definition();
if (ok()) module_->add_type(type);
}
+ if (ok() && FLAG_wasm_type_canonicalization) {
+ type_canon->AddRecursiveGroup(module_.get(), group_size);
+ }
} else {
TypeDefinition type = consume_subtype_definition();
- if (ok()) module_->add_type(type);
+ if (ok()) {
+ module_->add_type(type);
+ if (FLAG_wasm_type_canonicalization) {
+ type_canon->AddRecursiveGroup(module_.get(), 1);
+ }
+ }
}
}
}
@@ -1290,14 +1307,31 @@ class ModuleDecoderImpl : public Decoder {
uint8_t hint_byte = decoder.consume_u8("compilation hint");
if (!decoder.ok()) break;
+ // Validate the hint_byte.
+ // For the compilation strategy, all 2-bit values are valid. For the tier,
+ // only 0x0, 0x1, and 0x2 are allowed.
+ static_assert(
+ static_cast<int>(WasmCompilationHintTier::kDefault) == 0 &&
+ static_cast<int>(WasmCompilationHintTier::kBaseline) == 1 &&
+ static_cast<int>(WasmCompilationHintTier::kOptimized) == 2,
+ "The check below assumes that 0x03 is the only invalid 2-bit number "
+ "for a compilation tier");
+ if (((hint_byte >> 2) & 0x03) == 0x03 ||
+ ((hint_byte >> 4) & 0x03) == 0x03) {
+ decoder.errorf(decoder.pc(),
+ "Invalid compilation hint %#04x (invalid tier 0x03)",
+ hint_byte);
+ break;
+ }
+
// Decode compilation hint.
WasmCompilationHint hint;
hint.strategy =
static_cast<WasmCompilationHintStrategy>(hint_byte & 0x03);
hint.baseline_tier =
- static_cast<WasmCompilationHintTier>(hint_byte >> 2 & 0x3);
+ static_cast<WasmCompilationHintTier>((hint_byte >> 2) & 0x03);
hint.top_tier =
- static_cast<WasmCompilationHintTier>(hint_byte >> 4 & 0x3);
+ static_cast<WasmCompilationHintTier>((hint_byte >> 4) & 0x03);
// Ensure that the top tier never downgrades a compilation result. If
// baseline and top tier are the same compilation will be invoked only
@@ -1305,7 +1339,7 @@ class ModuleDecoderImpl : public Decoder {
if (hint.top_tier < hint.baseline_tier &&
hint.top_tier != WasmCompilationHintTier::kDefault) {
decoder.errorf(decoder.pc(),
- "Invalid compilation hint %#x (forbidden downgrade)",
+ "Invalid compilation hint %#04x (forbidden downgrade)",
hint_byte);
}
@@ -1615,7 +1649,7 @@ class ModuleDecoderImpl : public Decoder {
// All entries in the tagged_globals_buffer have size 1.
tagged_offset++;
} else {
- int size = global.type.element_size_bytes();
+ int size = global.type.value_kind_size();
untagged_offset = (untagged_offset + size - 1) & ~(size - 1); // align
global.offset = untagged_offset;
untagged_offset += size;
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 2dc43b69f4..b03903cc57 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -139,9 +139,6 @@ Handle<DescriptorArray> CreateArrayDescriptorArray(
return descriptors;
}
-} // namespace
-
-// TODO(jkummerow): Move these elsewhere.
Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
int struct_index, Handle<Map> opt_rtt_parent,
Handle<WasmInstanceObject> instance) {
@@ -191,7 +188,7 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
- WasmArray::EncodeElementSizeInMap(type->element_type().element_size_bytes(),
+ WasmArray::EncodeElementSizeInMap(type->element_type().value_kind_size(),
*map);
return map;
}
@@ -218,6 +215,24 @@ void CreateMapForType(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> maps) {
// Recursive calls for supertypes may already have created this map.
if (maps->get(type_index).IsMap()) return;
+
+ Handle<WeakArrayList> canonical_rtts;
+ uint32_t canonical_type_index =
+ module->isorecursive_canonical_type_ids[type_index];
+
+ if (FLAG_wasm_type_canonicalization) {
+ // Try to find the canonical map for this type in the isolate store.
+ canonical_rtts = handle(isolate->heap()->wasm_canonical_rtts(), isolate);
+ DCHECK_GT(static_cast<uint32_t>(canonical_rtts->length()),
+ canonical_type_index);
+ MaybeObject maybe_canonical_map = canonical_rtts->Get(canonical_type_index);
+ if (maybe_canonical_map.IsStrongOrWeak() &&
+ maybe_canonical_map.GetHeapObject().IsMap()) {
+ maps->set(type_index, maybe_canonical_map.GetHeapObject());
+ return;
+ }
+ }
+
Handle<Map> rtt_parent;
// If the type with {type_index} has an explicit supertype, make sure the
// map for that supertype is created first, so that the supertypes list
@@ -238,14 +253,17 @@ void CreateMapForType(Isolate* isolate, const WasmModule* module,
map = CreateArrayMap(isolate, module, type_index, rtt_parent, instance);
break;
case TypeDefinition::kFunction:
- // TODO(7748): Create funcref RTTs lazily?
- // TODO(7748): Canonicalize function maps (cross-module)?
map = CreateFuncRefMap(isolate, module, rtt_parent, instance);
break;
}
+ if (FLAG_wasm_type_canonicalization) {
+ canonical_rtts->Set(canonical_type_index, HeapObjectReference::Weak(*map));
+ }
maps->set(type_index, *map);
}
+} // namespace
+
// A helper class to simplify instantiating a module from a module object.
// It closes over the {Isolate}, the {ErrorThrower}, etc.
class InstanceBuilder {
@@ -643,6 +661,15 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// list.
//--------------------------------------------------------------------------
if (enabled_.has_gc()) {
+ if (FLAG_wasm_type_canonicalization) {
+ uint32_t maximum_canonical_type_index =
+ *std::max_element(module_->isorecursive_canonical_type_ids.begin(),
+ module_->isorecursive_canonical_type_ids.end());
+ // Make sure all canonical indices have been set.
+ DCHECK_NE(maximum_canonical_type_index, kNoSuperType);
+ isolate_->heap()->EnsureWasmCanonicalRttsSize(
+ maximum_canonical_type_index + 1);
+ }
Handle<FixedArray> maps = isolate_->factory()->NewFixedArray(
static_cast<int>(module_->types.size()));
for (uint32_t index = 0; index < module_->types.size(); index++) {
@@ -668,7 +695,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
PrintF("[Function %d (declared %d): allocating %d feedback slots]\n",
func_index, i, slots);
}
- Handle<FixedArray> feedback = isolate_->factory()->NewFixedArray(slots);
+ Handle<FixedArray> feedback =
+ isolate_->factory()->NewFixedArrayWithZeroes(slots);
vectors->set(i, *feedback);
}
}
@@ -791,7 +819,7 @@ bool InstanceBuilder::ExecuteStartFunction() {
// v8::Context::Enter() and must happen in addition to the function call
// sequence doing the compiled version of "isolate->set_context(...)".
HandleScopeImplementer* hsi = isolate_->handle_scope_implementer();
- hsi->EnterContext(start_function_->context().native_context());
+ hsi->EnterContext(start_function_->native_context());
// Call the JS function.
Handle<Object> undefined = isolate_->factory()->undefined_value();
@@ -1637,6 +1665,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
return -1;
}
num_imported_tables++;
+ USE(num_imported_tables);
break;
}
case kExternalMemory: {
diff --git a/deps/v8/src/wasm/struct-types.h b/deps/v8/src/wasm/struct-types.h
index eafa90a215..44c492d327 100644
--- a/deps/v8/src/wasm/struct-types.h
+++ b/deps/v8/src/wasm/struct-types.h
@@ -71,9 +71,9 @@ class StructType : public ZoneObject {
void InitializeOffsets() {
if (field_count() == 0) return;
- uint32_t offset = field(0).element_size_bytes();
+ uint32_t offset = field(0).value_kind_size();
for (uint32_t i = 1; i < field_count(); i++) {
- uint32_t field_size = field(i).element_size_bytes();
+ uint32_t field_size = field(i).value_kind_size();
// TODO(jkummerow): Don't round up to more than kTaggedSize-alignment.
offset = RoundUp(offset, field_size);
field_offsets_[i - 1] = offset;
@@ -132,8 +132,12 @@ class ArrayType : public ZoneObject {
ValueType element_type() const { return rep_; }
bool mutability() const { return mutability_; }
- bool operator==(const ArrayType& other) const { return rep_ == other.rep_; }
- bool operator!=(const ArrayType& other) const { return rep_ != other.rep_; }
+ bool operator==(const ArrayType& other) const {
+ return rep_ == other.rep_ && mutability_ == other.mutability_;
+ }
+ bool operator!=(const ArrayType& other) const {
+ return rep_ != other.rep_ || mutability_ != other.mutability_;
+ }
private:
const ValueType rep_;
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 84929b85f7..6c4b56ee5d 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -208,19 +208,19 @@ constexpr bool is_object_reference(ValueKind kind) {
return kind == kRef || kind == kOptRef;
}
-constexpr int element_size_log2(ValueKind kind) {
- constexpr int8_t kElementSizeLog2[] = {
-#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
- FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
-#undef ELEM_SIZE_LOG2
+constexpr int value_kind_size_log2(ValueKind kind) {
+ constexpr int8_t kValueKindSizeLog2[] = {
+#define VALUE_KIND_SIZE_LOG2(kind, log2Size, ...) log2Size,
+ FOREACH_VALUE_TYPE(VALUE_KIND_SIZE_LOG2)
+#undef VALUE_KIND_SIZE_LOG2
};
- int size_log_2 = kElementSizeLog2[kind];
+ int size_log_2 = kValueKindSizeLog2[kind];
DCHECK_LE(0, size_log_2);
return size_log_2;
}
-constexpr int element_size_bytes(ValueKind kind) {
+constexpr int value_kind_size(ValueKind kind) {
constexpr int8_t kElementSize[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) \
log2Size == -1 ? -1 : (1 << std::max(0, log2Size)),
@@ -233,6 +233,14 @@ constexpr int element_size_bytes(ValueKind kind) {
return size;
}
+constexpr int value_kind_full_size(ValueKind kind) {
+ if (is_reference(kind)) {
+ // Uncompressed pointer size.
+ return kSystemPointerSize;
+ }
+ return value_kind_size(kind);
+}
+
constexpr char short_name(ValueKind kind) {
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
@@ -279,12 +287,14 @@ constexpr bool is_defaultable(ValueKind kind) {
return kind != kRef && !is_rtt(kind);
}
-// A ValueType is encoded by three components: A ValueKind, a heap
-// representation (for reference types), and an inheritance depth (for rtts
-// only). Those are encoded into 32 bits using base::BitField. The underlying
-// ValueKind enumeration includes four elements which do not strictly correspond
-// to value types: the two packed types i8 and i16, the void type (for control
-// structures), and a bottom value (for internal use).
+// A ValueType is encoded by two components: a ValueKind and a heap
+// representation (for reference types/rtts). Those are encoded into 32 bits
+// using base::BitField. The underlying ValueKind enumeration includes four
+// elements which do not strictly correspond to value types: the two packed
+// types i8 and i16, the void type (for control structures), and a bottom value
+// (for internal use).
+// ValueType encoding includes an additional bit marking the index of a type as
+// relative. This should only be used during type canonicalization.
class ValueType {
public:
/******************************* Constructors *******************************/
@@ -309,6 +319,11 @@ class ValueType {
HeapTypeField::encode(type_index));
}
+ static constexpr ValueType FromIndex(ValueKind kind, uint32_t index) {
+ DCHECK(kind == kOptRef || kind == kRef || kind == kRtt);
+ return ValueType(KindField::encode(kind) | HeapTypeField::encode(index));
+ }
+
// Useful when deserializing a type stored in a runtime object.
static constexpr ValueType FromRawBitField(uint32_t bit_field) {
return ValueType(bit_field);
@@ -388,12 +403,16 @@ class ValueType {
return offsetof(ValueType, bit_field_);
}
- constexpr int element_size_log2() const {
- return wasm::element_size_log2(kind());
+ constexpr int value_kind_size_log2() const {
+ return wasm::value_kind_size_log2(kind());
}
- constexpr int element_size_bytes() const {
- return wasm::element_size_bytes(kind());
+ constexpr int value_kind_size() const {
+ return wasm::value_kind_size(kind());
+ }
+
+ constexpr int value_kind_full_size() const {
+ return wasm::value_kind_full_size(kind());
}
/*************************** Machine-type related ***************************/
@@ -491,8 +510,6 @@ class ValueType {
}
}
- static constexpr int kLastUsedBit = 24;
-
/****************************** Pretty-printing *****************************/
constexpr char short_name() const { return wasm::short_name(kind()); }
@@ -517,23 +534,40 @@ class ValueType {
return buf.str();
}
- // We only use 31 bits so ValueType fits in a Smi. This can be changed if
- // needed.
+ /********************** Type canonicalization utilities *********************/
+ static constexpr ValueType CanonicalWithRelativeIndex(ValueKind kind,
+ uint32_t index) {
+ return ValueType(KindField::encode(kind) | HeapTypeField::encode(index) |
+ CanonicalRelativeField::encode(true));
+ }
+
+ constexpr bool is_canonical_relative() const {
+ return has_index() && CanonicalRelativeField::decode(bit_field_);
+ }
+
+ /**************************** Static constants ******************************/
+ static constexpr int kLastUsedBit = 25;
static constexpr int kKindBits = 5;
static constexpr int kHeapTypeBits = 20;
private:
- STATIC_ASSERT(kV8MaxWasmTypes < (1u << kHeapTypeBits));
-
// {hash_value} directly reads {bit_field_}.
friend size_t hash_value(ValueType type);
using KindField = base::BitField<ValueKind, 0, kKindBits>;
using HeapTypeField = KindField::Next<uint32_t, kHeapTypeBits>;
+ // Marks a type as a canonical type which uses an index relative to its
+ // recursive group start. Used only during type canonicalization.
+ using CanonicalRelativeField = HeapTypeField::Next<bool, 1>;
+ static_assert(kV8MaxWasmTypes < (1u << kHeapTypeBits),
+ "Type indices fit in kHeapTypeBits");
// This is implemented defensively against field order changes.
- STATIC_ASSERT(kLastUsedBit ==
- std::max(KindField::kLastUsedBit, HeapTypeField::kLastUsedBit));
+ static_assert(kLastUsedBit ==
+ std::max(KindField::kLastUsedBit,
+ std::max(HeapTypeField::kLastUsedBit,
+ CanonicalRelativeField::kLastUsedBit)),
+ "kLastUsedBit is consistent");
constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {}
diff --git a/deps/v8/src/wasm/wasm-arguments.h b/deps/v8/src/wasm/wasm-arguments.h
index 4e59a23264..305d7cc361 100644
--- a/deps/v8/src/wasm/wasm-arguments.h
+++ b/deps/v8/src/wasm/wasm-arguments.h
@@ -52,11 +52,11 @@ class CWasmArgumentsPacker {
static int TotalSize(const FunctionSig* sig) {
int return_size = 0;
for (ValueType t : sig->returns()) {
- return_size += t.element_size_bytes();
+ return_size += t.value_kind_size();
}
int param_size = 0;
for (ValueType t : sig->parameters()) {
- param_size += t.element_size_bytes();
+ param_size += t.value_kind_size();
}
return std::max(return_size, param_size);
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index eb5edb877d..08842909a1 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -23,7 +23,7 @@
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/utils/ostreams.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/compilation-environment.h"
@@ -388,7 +388,6 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "Body (size = " << instructions().size() << " = "
<< unpadded_binary_size_ << " + " << padding << " padding)\n";
-#ifdef ENABLE_DISASSEMBLER
int instruction_size = unpadded_binary_size_;
if (constant_pool_offset_ < instruction_size) {
instruction_size = constant_pool_offset_;
@@ -400,6 +399,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
instruction_size = handler_table_offset_;
}
DCHECK_LT(0, instruction_size);
+
+#ifdef ENABLE_DISASSEMBLER
os << "Instructions (size = " << instruction_size << ")\n";
Disassembler::Decode(nullptr, os, instructions().begin(),
instructions().begin() + instruction_size,
@@ -446,7 +447,11 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
it.rinfo()->Print(nullptr, os);
}
os << "\n";
-#endif // ENABLE_DISASSEMBLER
+#else // !ENABLE_DISASSEMBLER
+ os << "Instructions (size = " << instruction_size << ", "
+ << static_cast<void*>(instructions().begin()) << "-"
+ << static_cast<void*>(instructions().begin() + instruction_size) << ")\n";
+#endif // !ENABLE_DISASSEMBLER
}
const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 6a4d95546f..e44da1bf62 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -19,6 +19,7 @@
#include "src/base/platform/mutex.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/operations-barrier.h"
+#include "src/wasm/canonical-types.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-tier.h"
#include "src/zone/accounting-allocator.h"
@@ -357,6 +358,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
void SampleRethrowEvent(Isolate*);
void SampleCatchEvent(Isolate*);
+ TypeCanonicalizer* type_canonicalizer() { return &type_canonicalizer_; }
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
@@ -392,6 +395,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
std::atomic<int> next_compilation_id_{0};
+ TypeCanonicalizer type_canonicalizer_;
+
// This mutex protects all information which is mutated concurrently or
// fields that are initialized lazily on the first access.
base::Mutex mutex_;
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 27e022bdb8..73f25dbcca 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -573,7 +573,7 @@ void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
UPDATE_WRITE_BARRIER);
}
} else {
- int element_size_bytes = element_type.element_size_bytes();
+ int element_size_bytes = element_type.value_kind_size();
void* dst = ArrayElementAddress(dst_array, dst_index, element_size_bytes);
void* src = ArrayElementAddress(src_array, src_index, element_size_bytes);
size_t copy_size = length * element_size_bytes;
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 7d9a5593ae..60da9f5db3 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -2992,23 +2992,25 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
MaybeHandle<Object> maybe_webassembly =
JSObject::GetProperty(isolate, global, "WebAssembly");
Handle<Object> webassembly_obj;
- if (!maybe_webassembly.ToHandle(&webassembly_obj)) {
- // There is not {WebAssembly} object. We just return without adding the
- // {Tag} constructor.
- return;
- }
- if (!webassembly_obj->IsJSObject()) {
- // The {WebAssembly} object is invalid. As we cannot add the {Tag}
- // constructor, we just return.
+ if (!maybe_webassembly.ToHandle(&webassembly_obj) ||
+ !webassembly_obj->IsJSObject()) {
+ // There is no {WebAssembly} object, or it's not what we expect.
+ // Just return without adding the {Tag} constructor.
return;
}
Handle<JSObject> webassembly = Handle<JSObject>::cast(webassembly_obj);
- // Setup Exception
+ // Setup Tag.
Handle<String> tag_name = v8_str(isolate, "Tag");
+ // The {WebAssembly} object may already have been modified. The following
+ // code is designed to:
+ // - check for existing {Tag} properties on the object itself, and avoid
+ // overwriting them or adding duplicate properties
+ // - disregard any setters or read-only properties on the prototype chain
+ // - only make objects accessible to user code after all internal setup
+ // has been completed.
if (JSObject::HasOwnProperty(isolate, webassembly, tag_name)
.FromMaybe(true)) {
- // The {Exception} constructor already exists, there is nothing more to
- // do.
+ // Existing property, or exception.
return;
}
@@ -3017,14 +3019,6 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
CreateFunc(isolate, tag_name, WebAssemblyTag, has_prototype,
SideEffectType::kHasNoSideEffect);
tag_constructor->shared().set_length(1);
- auto result =
- Object::SetProperty(isolate, webassembly, tag_name, tag_constructor,
- StoreOrigin::kNamed, Just(ShouldThrow::kDontThrow));
- if (result.is_null()) {
- // Setting the {Tag} constructor failed. We just bail out.
- return;
- }
- // Install the constructor on the context.
context->set_wasm_tag_constructor(*tag_constructor);
Handle<JSObject> tag_proto =
SetupConstructor(isolate, tag_constructor, i::WASM_TAG_OBJECT_TYPE,
@@ -3032,6 +3026,12 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
if (enabled_features.has_type_reflection()) {
InstallFunc(isolate, tag_proto, "type", WebAssemblyTagType, 0);
}
+ LookupIterator it(isolate, webassembly, tag_name, LookupIterator::OWN);
+ Maybe<bool> result = JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, tag_constructor, DONT_ENUM, Just(kDontThrow));
+ // This could still fail if the object was non-extensible, but now we
+ // return anyway so there's no need to even check.
+ USE(result);
}
}
#undef ASSIGN
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index c04a431004..b2ee4d3958 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -40,7 +40,7 @@ constexpr size_t kV8MaxWasmDataSegments = 100000;
// Also, do not use this limit to validate declared memory, use
// kSpecMaxMemoryPages for that.
constexpr size_t kV8MaxWasmMemoryPages = kSystemPointerSize == 4
- ? 32767 // = 2 GiB
+ ? 32767 // = 2 GiB - 64Kib
: 65536; // = 4 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 4d4a487485..d8723f2f1f 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -274,6 +274,8 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
globals_(zone),
exceptions_(zone),
signature_map_(zone),
+ current_recursive_group_start_(-1),
+ recursive_groups_(zone),
start_function_index_(-1),
min_memory_size_(16),
max_memory_size_(0),
@@ -593,10 +595,24 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// == Emit types =============================================================
if (types_.size() > 0) {
size_t start = EmitSection(kTypeSectionCode, buffer);
- buffer->write_size(types_.size());
+ size_t type_count = types_.size();
+ for (auto pair : recursive_groups_) {
+ // Every rec. group counts as one type entry.
+ type_count -= pair.second - 1;
+ }
+
+ buffer->write_size(type_count);
+
+ for (uint32_t i = 0; i < types_.size(); i++) {
+ auto recursive_group = recursive_groups_.find(i);
+
+ if (recursive_group != recursive_groups_.end()) {
+ buffer->write_u8(kWasmRecursiveTypeGroupCode);
+ buffer->write_u32v(recursive_group->second);
+ }
+
+ const TypeDefinition& type = types_[i];
- // TODO(7748): Add support for recursive groups.
- for (const TypeDefinition& type : types_) {
if (type.supertype != kNoSuperType) {
buffer->write_u8(kWasmSubtypeCode);
buffer->write_u8(1); // The supertype count is always 1.
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 9ac13891fc..dbf2eb0ccd 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -359,6 +359,22 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
void SetMaxMemorySize(uint32_t value);
void SetHasSharedMemory();
+ void StartRecursiveTypeGroup() {
+ DCHECK_EQ(current_recursive_group_start_, -1);
+ current_recursive_group_start_ = static_cast<int>(types_.size());
+ }
+
+ void EndRecursiveTypeGroup() {
+ // Make sure we are in a recursive group.
+ DCHECK_NE(current_recursive_group_start_, -1);
+ // Make sure the current recursive group has at least one element.
+ DCHECK_GT(static_cast<int>(types_.size()), current_recursive_group_start_);
+ recursive_groups_.emplace(
+ current_recursive_group_start_,
+ static_cast<uint32_t>(types_.size()) - current_recursive_group_start_);
+ current_recursive_group_start_ = -1;
+ }
+
// Writing methods.
void WriteTo(ZoneBuffer* buffer) const;
void WriteAsmJsOffsetTable(ZoneBuffer* buffer) const;
@@ -455,6 +471,9 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ZoneVector<WasmGlobal> globals_;
ZoneVector<int> exceptions_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
+ int current_recursive_group_start_;
+ // first index -> size
+ ZoneUnorderedMap<uint32_t, uint32_t> recursive_groups_;
int start_function_index_;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 868f50bd8f..613dbc3c70 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -364,6 +364,25 @@ struct TypeDefinition {
const StructType* struct_type;
const ArrayType* array_type;
};
+
+ bool operator==(const TypeDefinition& other) const {
+ if (supertype != other.supertype || kind != other.kind) {
+ return false;
+ }
+ switch (kind) {
+ case kFunction:
+ return *function_sig == *other.function_sig;
+ case kStruct:
+ return *struct_type == *other.struct_type;
+ case kArray:
+ return *array_type == *other.array_type;
+ }
+ }
+
+ bool operator!=(const TypeDefinition& other) const {
+ return !(*this == other);
+ }
+
uint32_t supertype;
Kind kind;
};
@@ -424,15 +443,15 @@ struct V8_EXPORT_PRIVATE WasmModule {
? signature_map.FindOrInsert(*type.function_sig)
: 0;
canonicalized_type_ids.push_back(canonical_id);
+ // Canonical type will be computed later.
+ isorecursive_canonical_type_ids.push_back(kNoSuperType);
}
bool has_type(uint32_t index) const { return index < types.size(); }
void add_signature(const FunctionSig* sig, uint32_t supertype) {
- types.push_back(TypeDefinition(sig, supertype));
DCHECK_NOT_NULL(sig);
- uint32_t canonical_id = signature_map.FindOrInsert(*sig);
- canonicalized_type_ids.push_back(canonical_id);
+ add_type(TypeDefinition(sig, supertype));
}
bool has_signature(uint32_t index) const {
return index < types.size() &&
@@ -444,9 +463,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
void add_struct_type(const StructType* type, uint32_t supertype) {
- types.push_back(TypeDefinition(type, supertype));
- // No canonicalization for structs.
- canonicalized_type_ids.push_back(0);
+ DCHECK_NOT_NULL(type);
+ add_type(TypeDefinition(type, supertype));
}
bool has_struct(uint32_t index) const {
return index < types.size() && types[index].kind == TypeDefinition::kStruct;
@@ -457,9 +475,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
void add_array_type(const ArrayType* type, uint32_t supertype) {
- types.push_back(TypeDefinition(type, supertype));
- // No canonicalization for arrays.
- canonicalized_type_ids.push_back(0);
+ DCHECK_NOT_NULL(type);
+ add_type(TypeDefinition(type, supertype));
}
bool has_array(uint32_t index) const {
return index < types.size() && types[index].kind == TypeDefinition::kArray;
@@ -478,11 +495,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
std::vector<TypeDefinition> types; // by type index
- // Map from each type index to the index of its corresponding canonical index.
- // Canonical indices do not correspond to types.
- // Note: right now, only functions are canonicalized, and arrays and structs
- // map to 0.
+ // TODO(7748): Unify the following two arrays.
+ // Maps each type index to a canonical index for purposes of call_indirect.
std::vector<uint32_t> canonicalized_type_ids;
+ // Maps each type index to its global (cross-module) canonical index as per
+ // isorecursive type canonicalization.
+ std::vector<uint32_t> isorecursive_canonical_type_ids;
// Canonicalizing map for signature indexes.
SignatureMap signature_map;
std::vector<WasmFunction> functions;
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index caaeafbffb..3d5164beb3 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -120,7 +120,7 @@ void WasmGlobalObject::set_type(wasm::ValueType value) {
set_raw_type(static_cast<int>(value.raw_bit_field()));
}
-int WasmGlobalObject::type_size() const { return type().element_size_bytes(); }
+int WasmGlobalObject::type_size() const { return type().value_kind_size(); }
Address WasmGlobalObject::address() const {
DCHECK_NE(type(), wasm::kWasmAnyRef);
@@ -590,7 +590,7 @@ int WasmArray::SizeFor(Map map, int length) {
uint32_t WasmArray::element_offset(uint32_t index) {
DCHECK_LE(index, length());
return WasmArray::kHeaderSize +
- index * type()->element_type().element_size_bytes();
+ index * type()->element_type().value_kind_size();
}
Address WasmArray::ElementAddress(uint32_t index) {
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index deeab21028..cfd79072fb 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -1054,7 +1054,7 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_tagged_buffer(*tagged_buffer);
} else {
DCHECK(maybe_tagged_buffer.is_null());
- uint32_t type_size = type.element_size_bytes();
+ uint32_t type_size = type.value_kind_size();
Handle<JSArrayBuffer> untagged_buffer;
if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
@@ -1594,7 +1594,7 @@ wasm::WasmValue WasmStruct::GetFieldValue(uint32_t index) {
wasm::WasmValue WasmArray::GetElement(uint32_t index) {
wasm::ValueType element_type = type()->element_type();
int element_offset =
- WasmArray::kHeaderSize + index * element_type.element_size_bytes();
+ WasmArray::kHeaderSize + index * element_type.value_kind_size();
Address element_address = GetFieldAddress(element_offset);
using wasm::Simd128;
switch (element_type.kind()) {
@@ -1810,6 +1810,17 @@ Handle<WasmSuspenderObject> WasmSuspenderObject::New(Isolate* isolate) {
suspender->set_continuation(ReadOnlyRoots(isolate).undefined_value());
suspender->set_parent(ReadOnlyRoots(isolate).undefined_value());
suspender->set_state(Inactive);
+ // Instantiate the callable object which resumes this Suspender. This will be
+ // used implicitly as the onFulfilled callback of the returned JS promise.
+ Handle<WasmOnFulfilledData> function_data =
+ isolate->factory()->NewWasmOnFulfilledData(suspender);
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForWasmOnFulfilled(
+ function_data);
+ Handle<Context> context(isolate->native_context());
+ Handle<JSObject> resume =
+ Factory::JSFunctionBuilder{isolate, shared, context}.Build();
+ suspender->set_resume(*resume);
return suspender;
}
@@ -1820,7 +1831,7 @@ namespace {
constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
size_t ComputeEncodedElementSize(wasm::ValueType type) {
- size_t byte_size = type.element_size_bytes();
+ size_t byte_size = type.value_kind_size();
DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0);
DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement);
return byte_size / kBytesPerExceptionValuesArrayElement;
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 5cff97a24a..7cfd82f323 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -986,7 +986,7 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
}
static int MaxLength(const wasm::ArrayType* type) {
- return MaxLength(type->element_type().element_size_bytes());
+ return MaxLength(type->element_type().value_kind_size());
}
static inline void EncodeElementSizeInMap(int element_size, Map map);
@@ -1036,14 +1036,6 @@ class WasmSuspenderObject
#undef DECL_OPTIONAL_ACCESSORS
namespace wasm {
-
-Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
- int struct_index, MaybeHandle<Map> rtt_parent,
- Handle<WasmInstanceObject> instance);
-Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
- int array_index, MaybeHandle<Map> rtt_parent,
- Handle<WasmInstanceObject> instance);
-
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
Handle<Object> value, ValueType expected,
const char** error_message);
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index 9fa8f0fb2e..399ec53ecd 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -103,6 +103,7 @@ extern class WasmContinuationObject extends Struct {
extern class WasmSuspenderObject extends JSObject {
continuation: WasmContinuationObject|Undefined;
parent: WasmSuspenderObject|Undefined;
+ resume: JSObject;
state: Smi; // 0: Inactive, 1: Active, 2: Suspended.
}
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index bbd512296e..2776520f50 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -5,6 +5,7 @@
#include "src/wasm/wasm-subtyping.h"
#include "src/base/platform/mutex.h"
+#include "src/wasm/canonical-types.h"
#include "src/wasm/wasm-module.h"
#include "src/zone/zone-containers.h"
@@ -18,17 +19,15 @@ V8_INLINE bool EquivalentIndices(uint32_t index1, uint32_t index2,
const WasmModule* module1,
const WasmModule* module2) {
DCHECK(index1 != index2 || module1 != module2);
- // TODO(7748): Canonicalize types.
- return false;
+ if (!FLAG_wasm_type_canonicalization) return false;
+ return module1->isorecursive_canonical_type_ids[index1] ==
+ module2->isorecursive_canonical_type_ids[index2];
}
bool ValidStructSubtypeDefinition(uint32_t subtype_index,
uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
- // TODO(7748): Figure out the cross-module story.
- if (sub_module != super_module) return false;
-
const StructType* sub_struct = sub_module->types[subtype_index].struct_type;
const StructType* super_struct =
super_module->types[supertype_index].struct_type;
@@ -56,9 +55,6 @@ bool ValidArraySubtypeDefinition(uint32_t subtype_index,
uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
- // TODO(7748): Figure out the cross-module story.
- if (sub_module != super_module) return false;
-
const ArrayType* sub_array = sub_module->types[subtype_index].array_type;
const ArrayType* super_array =
super_module->types[supertype_index].array_type;
@@ -78,9 +74,6 @@ bool ValidFunctionSubtypeDefinition(uint32_t subtype_index,
uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
- // TODO(7748): Figure out the cross-module story.
- if (sub_module != super_module) return false;
-
const FunctionSig* sub_func = sub_module->types[subtype_index].function_sig;
const FunctionSig* super_func =
super_module->types[supertype_index].function_sig;
@@ -219,15 +212,17 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
// equality; here we catch (ref $x) being a subtype of (ref null $x).
if (sub_module == super_module && sub_index == super_index) return true;
- // TODO(7748): Figure out cross-module story.
- if (sub_module != super_module) return false;
-
- uint32_t explicit_super = sub_module->supertype(sub_index);
- while (true) {
- if (explicit_super == super_index) return true;
- // Reached the end of the explicitly defined inheritance chain.
- if (explicit_super == kNoSuperType) return false;
- explicit_super = sub_module->supertype(explicit_super);
+ if (FLAG_wasm_type_canonicalization) {
+ return GetTypeCanonicalizer()->IsCanonicalSubtype(sub_index, super_index,
+ sub_module, super_module);
+ } else {
+ uint32_t explicit_super = sub_module->supertype(sub_index);
+ while (true) {
+ if (explicit_super == super_index) return true;
+ // Reached the end of the explicitly defined inheritance chain.
+ if (explicit_super == kNoSuperType) return false;
+ explicit_super = sub_module->supertype(explicit_super);
+ }
}
}
diff --git a/deps/v8/src/wasm/wasm-subtyping.h b/deps/v8/src/wasm/wasm-subtyping.h
index 76dff87e24..6e195359b1 100644
--- a/deps/v8/src/wasm/wasm-subtyping.h
+++ b/deps/v8/src/wasm/wasm-subtyping.h
@@ -27,14 +27,16 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
// - Two numeric types are equivalent iff they are equal.
// - T(ht1) ~ T(ht2) iff ht1 ~ ht2 for T in {ref, optref, rtt}.
// Equivalence of heap types ht1 ~ ht2 is defined as follows:
-// - Two heap types are equivalent iff they are equal.
-// - TODO(7748): Implement iso-recursive canonicalization.
-V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
- const WasmModule* module1,
- const WasmModule* module2);
+// - Two non-index heap types are equivalent iff they are equal.
+// - Two indexed heap types are equivalent iff they are iso-recursive
+// equivalent.
+V8_NOINLINE V8_EXPORT_PRIVATE bool EquivalentTypes(ValueType type1,
+ ValueType type2,
+ const WasmModule* module1,
+ const WasmModule* module2);
-// Checks if subtype, defined in module1, is a subtype of supertype, defined in
-// module2.
+// Checks if {subtype}, defined in {module1}, is a subtype of {supertype},
+// defined in {module2}.
// Subtyping between value types is described by the following rules
// (structural subtyping):
// - numeric types are subtype-related iff they are equal.
@@ -54,7 +56,7 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
// - All structs are subtypes of data.
// - All arrays are subtypes of array.
// - An indexed heap type h1 is a subtype of indexed heap type h2 if h2 is
-// transitively an explicit supertype of h1.
+// transitively an explicit canonical supertype of h1.
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -62,7 +64,7 @@ V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
return IsSubtypeOfImpl(subtype, supertype, sub_module, super_module);
}
-// Checks if 'subtype' is a subtype of 'supertype' (both defined in module).
+// Checks if {subtype} is a subtype of {supertype} (both defined in {module}).
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
const WasmModule* module) {
// If the types are trivially identical, exit early.
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 1192da1bea..a27905b6cd 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -116,7 +116,7 @@ class WasmValue {
WasmValue(byte* raw_bytes, ValueType type) : type_(type), bit_pattern_{} {
DCHECK(type_.is_numeric());
- memcpy(bit_pattern_, raw_bytes, type.element_size_bytes());
+ memcpy(bit_pattern_, raw_bytes, type.value_kind_size());
}
WasmValue(Handle<Object> ref, ValueType type) : type_(type), bit_pattern_{} {
@@ -140,14 +140,14 @@ class WasmValue {
return type_ == other.type_ &&
!memcmp(bit_pattern_, other.bit_pattern_,
type_.is_reference() ? sizeof(Handle<Object>)
- : type_.element_size_bytes());
+ : type_.value_kind_size());
}
void CopyTo(byte* to) const {
STATIC_ASSERT(sizeof(float) == sizeof(Float32));
STATIC_ASSERT(sizeof(double) == sizeof(Float64));
DCHECK(type_.is_numeric());
- memcpy(to, bit_pattern_, type_.element_size_bytes());
+ memcpy(to, bit_pattern_, type_.value_kind_size());
}
// If {packed_type.is_packed()}, create a new value of {packed_type()}.
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
index 09aaf7fcc6..1b0a07e313 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -1345,7 +1345,7 @@ void WebSnapshotDeserializer::Throw(const char* message) {
}
bool WebSnapshotDeserializer::Deserialize(
- MaybeHandle<FixedArray> external_references) {
+ MaybeHandle<FixedArray> external_references, bool skip_exports) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize);
if (external_references.ToHandle(&external_references_handle_)) {
external_references_ = *external_references_handle_;
@@ -1364,7 +1364,7 @@ bool WebSnapshotDeserializer::Deserialize(
if (FLAG_trace_web_snapshot) {
timer.Start();
}
- if (!DeserializeSnapshot()) {
+ if (!DeserializeSnapshot(skip_exports)) {
return false;
}
if (!DeserializeScript()) {
@@ -1379,7 +1379,7 @@ bool WebSnapshotDeserializer::Deserialize(
return true;
}
-bool WebSnapshotDeserializer::DeserializeSnapshot() {
+bool WebSnapshotDeserializer::DeserializeSnapshot(bool skip_exports) {
deferred_references_ = ArrayList::New(isolate_, 30);
const void* magic_bytes;
@@ -1397,7 +1397,7 @@ bool WebSnapshotDeserializer::DeserializeSnapshot() {
DeserializeObjects();
DeserializeClasses();
ProcessDeferredReferences();
- DeserializeExports();
+ DeserializeExports(skip_exports);
DCHECK_EQ(0, deferred_references_->Length());
return !has_error();
@@ -2041,13 +2041,29 @@ void WebSnapshotDeserializer::DeserializeArrays() {
}
}
-void WebSnapshotDeserializer::DeserializeExports() {
+void WebSnapshotDeserializer::DeserializeExports(bool skip_exports) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Exports);
uint32_t count;
if (!deserializer_.ReadUint32(&count) || count > kMaxItemCount) {
Throw("Malformed export table");
return;
}
+
+ if (skip_exports) {
+ // In the skip_exports mode, we read the exports but don't do anything about
+ // them. This is useful for stress testing; otherwise the GlobalDictionary
+ // handling below dominates.
+ for (uint32_t i = 0; i < count; ++i) {
+ Handle<String> export_name(ReadString(true), isolate_);
+ // No deferred references should occur at this point, since all objects
+ // have been deserialized.
+ Object export_value = ReadValue();
+ USE(export_name);
+ USE(export_value);
+ }
+ return;
+ }
+
// Pre-reserve the space for the properties we're going to add to the global
// object.
Handle<JSGlobalObject> global = isolate_->global_object();
diff --git a/deps/v8/src/web-snapshot/web-snapshot.h b/deps/v8/src/web-snapshot/web-snapshot.h
index 4dc0f3f091..0e18733e3a 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.h
+++ b/deps/v8/src/web-snapshot/web-snapshot.h
@@ -278,7 +278,8 @@ class V8_EXPORT WebSnapshotDeserializer
size_t buffer_size);
WebSnapshotDeserializer(Isolate* isolate, Handle<Script> snapshot_as_script);
~WebSnapshotDeserializer();
- bool Deserialize(MaybeHandle<FixedArray> external_references = {});
+ bool Deserialize(MaybeHandle<FixedArray> external_references = {},
+ bool skip_exports = false);
// For inspecting the state after deserializing a snapshot.
uint32_t string_count() const { return string_count_; }
@@ -304,7 +305,7 @@ class V8_EXPORT WebSnapshotDeserializer
base::Vector<const uint8_t> buffer);
base::Vector<const uint8_t> ExtractScriptBuffer(
Isolate* isolate, Handle<Script> snapshot_as_script);
- bool DeserializeSnapshot();
+ bool DeserializeSnapshot(bool skip_exports);
bool DeserializeScript();
WebSnapshotDeserializer(const WebSnapshotDeserializer&) = delete;
@@ -323,7 +324,7 @@ class V8_EXPORT WebSnapshotDeserializer
void DeserializeClasses();
void DeserializeArrays();
void DeserializeObjects();
- void DeserializeExports();
+ void DeserializeExports(bool skip_exports);
Object ReadValue(
Handle<HeapObject> object_for_deferred_reference = Handle<HeapObject>(),
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index d5d20fdfe9..02f475a497 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -144,13 +144,11 @@ v8_source_set("cctest_sources") {
"heap/test-compaction.cc",
"heap/test-concurrent-allocation.cc",
"heap/test-concurrent-marking.cc",
- "heap/test-embedder-tracing.cc",
"heap/test-external-string-tracker.cc",
"heap/test-heap.cc",
"heap/test-incremental-marking.cc",
"heap/test-invalidated-slots.cc",
"heap/test-iterators.cc",
- "heap/test-lab.cc",
"heap/test-mark-compact.cc",
"heap/test-memory-measurement.cc",
"heap/test-page-promotion.cc",
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 99bca0a29b..5a0895b457 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -35,6 +35,11 @@
#include "include/v8-isolate.h"
#include "include/v8-local-handle.h"
#include "include/v8-locker.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/logging.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
#include "src/base/strings.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
@@ -62,19 +67,23 @@
enum InitializationState { kUnset, kUninitialized, kInitialized };
static InitializationState initialization_state_ = kUnset;
-CcTest* CcTest::last_ = nullptr;
+static v8::base::LazyInstance<CcTestMapType>::type g_cctests =
+ LAZY_INSTANCE_INITIALIZER;
+
+std::unordered_map<std::string, CcTest*>* tests_ =
+ new std::unordered_map<std::string, CcTest*>();
bool CcTest::initialize_called_ = false;
v8::base::Atomic32 CcTest::isolate_used_ = 0;
v8::ArrayBuffer::Allocator* CcTest::allocator_ = nullptr;
v8::Isolate* CcTest::isolate_ = nullptr;
+v8::Platform* CcTest::default_platform_ = nullptr;
CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
- bool enabled, bool initialize)
+ bool enabled, bool initialize,
+ TestPlatformFactory* test_platform_factory)
: callback_(callback),
- name_(name),
- enabled_(enabled),
initialize_(initialize),
- prev_(last_) {
+ test_platform_factory_(test_platform_factory) {
// Find the base name of this test (const_cast required on Windows).
char *basename = strrchr(const_cast<char *>(file), '/');
if (!basename) {
@@ -89,25 +98,57 @@ CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
char *extension = strrchr(basename, '.');
if (extension) *extension = 0;
// Install this test in the list of tests
- file_ = basename;
- prev_ = last_;
- last_ = this;
+
+ if (enabled) {
+ auto it =
+ g_cctests.Pointer()->emplace(std::string(basename) + "/" + name, this);
+ CHECK_WITH_MSG(it.second, "Test with same name already exists");
+ }
+ v8::internal::DeleteArray(basename);
}
+void CcTest::Run(const char* snapshot_directory) {
+ v8::V8::InitializeICUDefaultLocation(snapshot_directory);
+ std::unique_ptr<v8::Platform> underlying_default_platform(
+ v8::platform::NewDefaultPlatform());
+ default_platform_ = underlying_default_platform.get();
+ std::unique_ptr<v8::Platform> platform;
+ if (test_platform_factory_) {
+ platform = test_platform_factory_();
+ } else {
+ platform = std::move(underlying_default_platform);
+ }
+ v8::V8::InitializePlatform(platform.get());
+#ifdef V8_SANDBOX
+ CHECK(v8::V8::InitializeSandbox());
+#endif
+ cppgc::InitializeProcess(platform->GetPageAllocator());
+ v8::V8::Initialize();
+ v8::V8::InitializeExternalStartupData(snapshot_directory);
+
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
+ constexpr bool kUseDefaultTrapHandler = true;
+ CHECK(v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultTrapHandler));
+#endif // V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
+
+ CcTest::set_array_buffer_allocator(
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator());
+
+ v8::RegisterExtension(std::make_unique<i::PrintExtension>());
+ v8::RegisterExtension(std::make_unique<i::ProfilerExtension>());
+ v8::RegisterExtension(std::make_unique<i::TraceExtension>());
-void CcTest::Run() {
if (!initialize_) {
CHECK_NE(initialization_state_, kInitialized);
initialization_state_ = kUninitialized;
- CHECK_NULL(CcTest::isolate_);
+ CHECK_NULL(isolate_);
} else {
CHECK_NE(initialization_state_, kUninitialized);
initialization_state_ = kInitialized;
- if (isolate_ == nullptr) {
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = allocator_;
- isolate_ = v8::Isolate::New(create_params);
- }
+ CHECK_NULL(isolate_);
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = allocator_;
+ isolate_ = v8::Isolate::New(create_params);
isolate_->Enter();
}
#ifdef DEBUG
@@ -129,7 +170,15 @@ void CcTest::Run() {
EmptyMessageQueues(isolate_);
}
isolate_->Exit();
+ isolate_->Dispose();
+ isolate_ = nullptr;
+ } else {
+ CHECK_NULL(isolate_);
}
+
+ v8::V8::Dispose();
+ cppgc::ShutdownProcess();
+ v8::V8::DisposePlatform();
}
i::Heap* CcTest::heap() { return i_isolate()->heap(); }
@@ -197,10 +246,6 @@ void CcTest::InitializeVM() {
v8::Context::New(CcTest::isolate())->Enter();
}
-void CcTest::TearDown() {
- if (isolate_ != nullptr) isolate_->Dispose();
-}
-
v8::Local<v8::Context> CcTest::NewContext(CcTestExtensionFlags extension_flags,
v8::Isolate* isolate) {
const char* extension_names[kMaxExtensions];
@@ -290,17 +335,10 @@ i::Handle<i::JSFunction> Optimize(
return function;
}
-static void PrintTestList(CcTest* current) {
- if (current == nullptr) return;
- PrintTestList(current->prev());
- printf("%s/%s\n", current->file(), current->name());
-}
-
-
-static void SuggestTestHarness(int tests) {
- if (tests == 0) return;
- printf("Running multiple tests in sequence is deprecated and may cause "
- "bogus failure. Consider using tools/run-tests.py instead.\n");
+static void PrintTestList() {
+ for (const auto& entry : g_cctests.Get()) {
+ printf("%s\n", entry.first.c_str());
+ }
}
int main(int argc, char* argv[]) {
@@ -333,82 +371,44 @@ int main(int argc, char* argv[]) {
perfetto::Tracing::Initialize(init_args);
#endif // V8_USE_PERFETTO
- v8::V8::InitializeICUDefaultLocation(argv[0]);
- std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
- v8::V8::InitializePlatform(platform.get());
-#ifdef V8_SANDBOX
- CHECK(v8::V8::InitializeSandbox());
-#endif
- cppgc::InitializeProcess(platform->GetPageAllocator());
using HelpOptions = v8::internal::FlagList::HelpOptions;
v8::internal::FlagList::SetFlagsFromCommandLine(
&argc, argv, true, HelpOptions(HelpOptions::kExit, usage.c_str()));
- v8::V8::Initialize();
- v8::V8::InitializeExternalStartupData(argv[0]);
-
-#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
- constexpr bool kUseDefaultTrapHandler = true;
- CHECK(v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultTrapHandler));
-#endif // V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
-
- CcTest::set_array_buffer_allocator(
- v8::ArrayBuffer::Allocator::NewDefaultAllocator());
-
- v8::RegisterExtension(std::make_unique<i::PrintExtension>());
- v8::RegisterExtension(std::make_unique<i::ProfilerExtension>());
- v8::RegisterExtension(std::make_unique<i::TraceExtension>());
- int tests_run = 0;
- bool print_run_count = true;
- for (int i = 1; i < argc; i++) {
- char* arg = argv[i];
+ const char* test_arg = nullptr;
+ for (int i = 1; i < argc; ++i) {
+ const char* arg = argv[i];
if (strcmp(arg, "--list") == 0) {
- PrintTestList(CcTest::last());
- print_run_count = false;
-
- } else {
- char* arg_copy = v8::internal::StrDup(arg);
- char* testname = strchr(arg_copy, '/');
- if (testname) {
- // Split the string in two by nulling the slash and then run
- // exact matches.
- *testname = 0;
- char* file = arg_copy;
- char* name = testname + 1;
- CcTest* test = CcTest::last();
- while (test != nullptr) {
- if (test->enabled()
- && strcmp(test->file(), file) == 0
- && strcmp(test->name(), name) == 0) {
- SuggestTestHarness(tests_run++);
- test->Run();
- }
- test = test->prev();
- }
-
- } else {
- // Run all tests with the specified file or test name.
- char* file_or_name = arg_copy;
- CcTest* test = CcTest::last();
- while (test != nullptr) {
- if (test->enabled()
- && (strcmp(test->file(), file_or_name) == 0
- || strcmp(test->name(), file_or_name) == 0)) {
- SuggestTestHarness(tests_run++);
- test->Run();
- }
- test = test->prev();
- }
- }
- v8::internal::DeleteArray<char>(arg_copy);
+ PrintTestList();
+ return 0;
+ }
+ if (*arg == '-') {
+ // Ignore flags that weren't removed by SetFlagsFromCommandLine
+ continue;
}
+ if (test_arg != nullptr) {
+ fprintf(stderr,
+ "Running multiple tests in sequence is not allowed. Use "
+ "tools/run-tests.py instead.\n");
+ return 1;
+ }
+ test_arg = arg;
}
- if (print_run_count && tests_run != 1) {
- printf("Ran %i tests.\n", tests_run);
+
+ if (test_arg == nullptr) {
+ printf("Ran 0 tests.\n");
+ return 0;
}
- CcTest::TearDown();
- v8::V8::Dispose();
- v8::V8::DisposePlatform();
+
+ auto it = g_cctests.Get().find(test_arg);
+ if (it == g_cctests.Get().end()) {
+ fprintf(stderr, "ERROR: Did not find test %s.\n", test_arg);
+ return 1;
+ }
+
+ CcTest* test = it->second;
+ test->Run(argv[0]);
+
return 0;
}
@@ -458,3 +458,90 @@ ManualGCScope::~ManualGCScope() {
i::FLAG_detect_ineffective_gcs_near_heap_limit =
flag_detect_ineffective_gcs_near_heap_limit_;
}
+
+v8::PageAllocator* TestPlatform::GetPageAllocator() {
+ return CcTest::default_platform()->GetPageAllocator();
+}
+
+void TestPlatform::OnCriticalMemoryPressure() {
+ CcTest::default_platform()->OnCriticalMemoryPressure();
+}
+
+bool TestPlatform::OnCriticalMemoryPressure(size_t length) {
+ return CcTest::default_platform()->OnCriticalMemoryPressure(length);
+}
+
+int TestPlatform::NumberOfWorkerThreads() {
+ return CcTest::default_platform()->NumberOfWorkerThreads();
+}
+
+std::shared_ptr<v8::TaskRunner> TestPlatform::GetForegroundTaskRunner(
+ v8::Isolate* isolate) {
+ return CcTest::default_platform()->GetForegroundTaskRunner(isolate);
+}
+
+void TestPlatform::CallOnWorkerThread(std::unique_ptr<v8::Task> task) {
+ CcTest::default_platform()->CallOnWorkerThread(std::move(task));
+}
+
+void TestPlatform::CallDelayedOnWorkerThread(std::unique_ptr<v8::Task> task,
+ double delay_in_seconds) {
+ CcTest::default_platform()->CallDelayedOnWorkerThread(std::move(task),
+ delay_in_seconds);
+}
+
+std::unique_ptr<v8::JobHandle> TestPlatform::PostJob(
+ v8::TaskPriority priority, std::unique_ptr<v8::JobTask> job_task) {
+ return CcTest::default_platform()->PostJob(priority, std::move(job_task));
+}
+
+double TestPlatform::MonotonicallyIncreasingTime() {
+ return CcTest::default_platform()->MonotonicallyIncreasingTime();
+}
+
+double TestPlatform::CurrentClockTimeMillis() {
+ return CcTest::default_platform()->CurrentClockTimeMillis();
+}
+
+bool TestPlatform::IdleTasksEnabled(v8::Isolate* isolate) {
+ return CcTest::default_platform()->IdleTasksEnabled(isolate);
+}
+
+v8::TracingController* TestPlatform::GetTracingController() {
+ return CcTest::default_platform()->GetTracingController();
+}
+
+namespace {
+
+class ShutdownTask final : public v8::Task {
+ public:
+ ShutdownTask(v8::base::Semaphore* destruction_barrier,
+ v8::base::Mutex* destruction_mutex,
+ v8::base::ConditionVariable* destruction_condition,
+ bool* can_destruct)
+ : destruction_barrier_(destruction_barrier),
+ destruction_mutex_(destruction_mutex),
+ destruction_condition_(destruction_condition),
+ can_destruct_(can_destruct)
+
+ {}
+
+ void Run() final {
+ destruction_barrier_->Signal();
+ {
+ v8::base::MutexGuard guard(destruction_mutex_);
+ while (!*can_destruct_) {
+ destruction_condition_->Wait(destruction_mutex_);
+ }
+ }
+ destruction_barrier_->Signal();
+ }
+
+ private:
+ v8::base::Semaphore* const destruction_barrier_;
+ v8::base::Mutex* const destruction_mutex_;
+ v8::base::ConditionVariable* const destruction_condition_;
+ bool* const can_destruct_;
+};
+
+} // namespace
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 735fc9cc9a..3e4cd3cf12 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -69,23 +69,40 @@ class JSHeapBroker;
} // namespace v8
#ifndef TEST
-#define TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, true, true); \
+#define TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, true, true, \
+ nullptr); \
static void Test##Name()
#endif
#ifndef UNINITIALIZED_TEST
-#define UNINITIALIZED_TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, true, false); \
+#define UNINITIALIZED_TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, true, false, \
+ nullptr); \
static void Test##Name()
#endif
+#ifndef TEST_WITH_PLATFORM
+#define TEST_WITH_PLATFORM(Name, PlatformClass) \
+ static void Test##Name(PlatformClass& platform); \
+ static void TestWithoutPlatform##Name() { \
+ Test##Name(*static_cast<PlatformClass*>(i::V8::GetCurrentPlatform())); \
+ } \
+ CcTest register_test_##Name(TestWithoutPlatform##Name, __FILE__, #Name, \
+ true, true, \
+ []() -> std::unique_ptr<TestPlatform> { \
+ return std::make_unique<PlatformClass>(); \
+ }); \
+ static void Test##Name(PlatformClass& platform)
+#endif
+
#ifndef DISABLED_TEST
-#define DISABLED_TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, false, true); \
+#define DISABLED_TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, false, true, \
+ nullptr); \
static void Test##Name()
#endif
@@ -97,9 +114,9 @@ class JSHeapBroker;
// to correctly associate the tests with the test suite using them.
// 2. To actually execute the tests, create an instance of the class
// containing the MEMBER_TESTs.
-#define MEMBER_TEST(Name) \
- CcTest register_test_##Name = \
- CcTest(Test##Name, kTestFileName, #Name, true, true); \
+#define MEMBER_TEST(Name) \
+ CcTest register_test_##Name = \
+ CcTest(Test##Name, kTestFileName, #Name, true, true, nullptr); \
static void Test##Name()
#define EXTENSION_LIST(V) \
@@ -119,18 +136,19 @@ static constexpr const char* kExtensionName[kMaxExtensions] = {
EXTENSION_LIST(DEFINE_EXTENSION_NAME)};
#undef DEFINE_EXTENSION_NAME
+class CcTest;
+class TestPlatform;
+
+using CcTestMapType = std::map<std::string, CcTest*>;
+
class CcTest {
public:
using TestFunction = void();
+ using TestPlatformFactory = std::unique_ptr<TestPlatform>();
CcTest(TestFunction* callback, const char* file, const char* name,
- bool enabled, bool initialize);
- ~CcTest() { i::DeleteArray(file_); }
- void Run();
- static CcTest* last() { return last_; }
- CcTest* prev() { return prev_; }
- const char* file() { return file_; }
- const char* name() { return name_; }
- bool enabled() { return enabled_; }
+ bool enabled, bool initialize,
+ TestPlatformFactory* platform_factory = nullptr);
+ void Run(const char* argv0);
static v8::Isolate* isolate() {
CHECK_NOT_NULL(isolate_);
@@ -150,6 +168,8 @@ class CcTest {
static i::Heap* heap();
static i::ReadOnlyHeap* read_only_heap();
+ static v8::Platform* default_platform() { return default_platform_; }
+
static void AddGlobalFunction(v8::Local<v8::Context> env, const char* name,
v8::FunctionCallback callback);
static void CollectGarbage(i::AllocationSpace space,
@@ -178,9 +198,6 @@ class CcTest {
// This must be called first in a test.
static void InitializeVM();
- // Only for UNINITIALIZED_TESTs
- static void DisableAutomaticDispose();
-
// Helper function to configure a context.
// Must be in a HandleScope.
static v8::Local<v8::Context> NewContext(
@@ -196,21 +213,17 @@ class CcTest {
return NewContext(CcTestExtensionFlags{extensions}, isolate);
}
- static void TearDown();
-
private:
- static CcTest* last_;
+ static std::unordered_map<std::string, CcTest*>* tests_;
static v8::ArrayBuffer::Allocator* allocator_;
static v8::Isolate* isolate_;
+ static v8::Platform* default_platform_;
static bool initialize_called_;
static v8::base::Atomic32 isolate_used_;
TestFunction* callback_;
- const char* file_;
- const char* name_;
- bool enabled_;
bool initialize_;
- CcTest* prev_;
+ TestPlatformFactory* test_platform_factory_;
friend int main(int argc, char** argv);
friend class ManualGCScope;
@@ -632,8 +645,7 @@ static inline void DisableDebugger(v8::Isolate* isolate) {
static inline void EmptyMessageQueues(v8::Isolate* isolate) {
- while (v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(),
- isolate)) {
+ while (v8::platform::PumpMessageLoop(CcTest::default_platform(), isolate)) {
}
}
@@ -700,76 +712,32 @@ class V8_NODISCARD ManualGCScope {
const bool flag_detect_ineffective_gcs_near_heap_limit_;
};
-// This is an abstract base class that can be overridden to implement a test
-// platform. It delegates all operations to a given platform at the time
-// of construction.
+// This is a base class that can be overridden to implement a test platform. It
+// delegates all operations to the default platform.
class TestPlatform : public v8::Platform {
public:
- TestPlatform(const TestPlatform&) = delete;
- TestPlatform& operator=(const TestPlatform&) = delete;
+ ~TestPlatform() override = default;
// v8::Platform implementation.
- v8::PageAllocator* GetPageAllocator() override {
- return old_platform()->GetPageAllocator();
- }
-
- void OnCriticalMemoryPressure() override {
- old_platform()->OnCriticalMemoryPressure();
- }
-
- bool OnCriticalMemoryPressure(size_t length) override {
- return old_platform()->OnCriticalMemoryPressure(length);
- }
-
- int NumberOfWorkerThreads() override {
- return old_platform()->NumberOfWorkerThreads();
- }
-
+ v8::PageAllocator* GetPageAllocator() override;
+ void OnCriticalMemoryPressure() override;
+ bool OnCriticalMemoryPressure(size_t length) override;
+ int NumberOfWorkerThreads() override;
std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
- v8::Isolate* isolate) override {
- return old_platform()->GetForegroundTaskRunner(isolate);
- }
-
- void CallOnWorkerThread(std::unique_ptr<v8::Task> task) override {
- old_platform()->CallOnWorkerThread(std::move(task));
- }
-
+ v8::Isolate* isolate) override;
+ void CallOnWorkerThread(std::unique_ptr<v8::Task> task) override;
void CallDelayedOnWorkerThread(std::unique_ptr<v8::Task> task,
- double delay_in_seconds) override {
- old_platform()->CallDelayedOnWorkerThread(std::move(task),
- delay_in_seconds);
- }
-
+ double delay_in_seconds) override;
std::unique_ptr<v8::JobHandle> PostJob(
v8::TaskPriority priority,
- std::unique_ptr<v8::JobTask> job_task) override {
- return old_platform()->PostJob(priority, std::move(job_task));
- }
-
- double MonotonicallyIncreasingTime() override {
- return old_platform()->MonotonicallyIncreasingTime();
- }
-
- double CurrentClockTimeMillis() override {
- return old_platform()->CurrentClockTimeMillis();
- }
-
- bool IdleTasksEnabled(v8::Isolate* isolate) override {
- return old_platform()->IdleTasksEnabled(isolate);
- }
-
- v8::TracingController* GetTracingController() override {
- return old_platform()->GetTracingController();
- }
+ std::unique_ptr<v8::JobTask> job_task) override;
+ double MonotonicallyIncreasingTime() override;
+ double CurrentClockTimeMillis() override;
+ bool IdleTasksEnabled(v8::Isolate* isolate) override;
+ v8::TracingController* GetTracingController() override;
protected:
- TestPlatform() : old_platform_(i::V8::GetCurrentPlatform()) {}
- ~TestPlatform() override { i::V8::SetPlatformForTesting(old_platform_); }
-
- v8::Platform* old_platform() const { return old_platform_; }
-
- private:
- std::atomic<v8::Platform*> old_platform_;
+ TestPlatform() = default;
};
#if defined(USE_SIMULATOR)
diff --git a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
index 2fab39506d..ab65b5d485 100644
--- a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
@@ -72,13 +72,10 @@ TEST(DeoptInMiddleOfBasicBlock) {
tester.StartBlock();
InstructionCode jmp_opcode = kArchJmp;
- // Dummy node for FlagsContinuation::ForDeoptimize (which won't accept
- // nullptr).
- Node* node = Node::New(zone, 0, nullptr, 0, nullptr, false);
- FeedbackSource feedback;
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeKind::kEager, DeoptimizeReason::kUnknown, node->id(),
- feedback, node);
+ Node* dummy_frame_state = Node::New(zone, 0, nullptr, 0, nullptr, false);
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimizeForTesting(
+ kEqual, DeoptimizeReason::kUnknown, dummy_frame_state->id(),
+ FeedbackSource{}, dummy_frame_state);
jmp_opcode = cont.Encode(jmp_opcode);
Instruction* jmp_inst = Instruction::New(zone, jmp_opcode);
tester.CheckIsDeopt(jmp_inst);
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index b5fd905a34..a57f074eac 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -10,6 +10,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-barrier.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/safepoint.h"
#include "test/cctest/cctest.h"
@@ -189,6 +190,8 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
SafepointScope scope(heap);
+ MarkingBarrier::PublishAll(heap);
+ marking->MarkRootsForTesting();
marking->FinalizeIncrementally();
}
}
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 56a8389c2c..2a8e450765 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -210,7 +210,7 @@ TEST(ArrayBuffer_UnregisterDuringSweep) {
}
TEST(ArrayBuffer_NonLivePromotion) {
- if (!FLAG_incremental_marking) return;
+ if (!FLAG_incremental_marking || FLAG_separate_gc_phases) return;
FLAG_concurrent_array_buffer_sweeping = false;
ManualGCScope manual_gc_scope;
// The test verifies that the marking state is preserved when promoting
@@ -248,7 +248,7 @@ TEST(ArrayBuffer_NonLivePromotion) {
}
TEST(ArrayBuffer_LivePromotion) {
- if (!FLAG_incremental_marking) return;
+ if (!FLAG_incremental_marking || FLAG_separate_gc_phases) return;
FLAG_concurrent_array_buffer_sweeping = false;
ManualGCScope manual_gc_scope;
// The test verifies that the marking state is preserved when promoting
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
deleted file mode 100644
index 45e025996f..0000000000
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ /dev/null
@@ -1,1020 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <unordered_map>
-#include <vector>
-
-#include "include/v8-context.h"
-#include "include/v8-function.h"
-#include "include/v8-local-handle.h"
-#include "include/v8-object.h"
-#include "include/v8-persistent-handle.h"
-#include "include/v8-template.h"
-#include "include/v8-traced-handle.h"
-#include "src/api/api-inl.h"
-#include "src/common/allow-deprecated.h"
-#include "src/handles/global-handles.h"
-#include "src/heap/embedder-tracing.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
-#include "src/heap/safepoint.h"
-#include "src/objects/module.h"
-#include "src/objects/objects-inl.h"
-#include "src/objects/script.h"
-#include "src/objects/shared-function-info.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/heap/heap-utils.h"
-
-START_ALLOW_USE_DEPRECATED()
-
-namespace v8 {
-
-namespace internal {
-namespace heap {
-
-namespace {
-
-v8::Local<v8::Object> ConstructTraceableJSApiObject(
- v8::Local<v8::Context> context, void* first_field, void* second_field) {
- v8::EscapableHandleScope scope(context->GetIsolate());
- v8::Local<v8::FunctionTemplate> function_t =
- v8::FunctionTemplate::New(context->GetIsolate());
- v8::Local<v8::ObjectTemplate> instance_t = function_t->InstanceTemplate();
- instance_t->SetInternalFieldCount(2);
- v8::Local<v8::Function> function =
- function_t->GetFunction(context).ToLocalChecked();
- v8::Local<v8::Object> instance =
- function->NewInstance(context).ToLocalChecked();
- instance->SetAlignedPointerInInternalField(0, first_field);
- instance->SetAlignedPointerInInternalField(1, second_field);
- CHECK(!instance.IsEmpty());
- i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
- CHECK_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
- return scope.Escape(instance);
-}
-
-enum class TracePrologueBehavior { kNoop, kCallV8WriteBarrier };
-
-class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
- public:
- TestEmbedderHeapTracer() = default;
- TestEmbedderHeapTracer(TracePrologueBehavior prologue_behavior,
- v8::Global<v8::Array> array)
- : prologue_behavior_(prologue_behavior), array_(std::move(array)) {}
-
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {
- registered_from_v8_.insert(registered_from_v8_.end(),
- embedder_fields.begin(), embedder_fields.end());
- }
-
- void AddReferenceForTracing(v8::TracedReference<v8::Value>* ref) {
- to_register_with_v8_references_.push_back(ref);
- }
-
- bool AdvanceTracing(double deadline_in_ms) final {
- for (auto ref : to_register_with_v8_references_) {
- RegisterEmbedderReference(ref->As<v8::Data>());
- }
- to_register_with_v8_references_.clear();
- return true;
- }
-
- bool IsTracingDone() final { return to_register_with_v8_references_.empty(); }
-
- void TracePrologue(EmbedderHeapTracer::TraceFlags) final {
- if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
- auto local = array_.Get(isolate());
- local
- ->Set(local->GetCreationContext().ToLocalChecked(), 0,
- v8::Object::New(isolate()))
- .Check();
- }
- }
-
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-
- bool IsRegisteredFromV8(void* first_field) const {
- for (auto pair : registered_from_v8_) {
- if (pair.first == first_field) return true;
- }
- return false;
- }
-
- void DoNotConsiderAsRootForScavenge(v8::TracedReference<v8::Value>* handle) {
- handle->SetWrapperClassId(17);
- non_root_handles_.push_back(handle);
- }
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- return handle.WrapperClassId() != 17;
- }
-
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- for (auto* non_root_handle : non_root_handles_) {
- if (*non_root_handle == handle) {
- non_root_handle->Reset();
- }
- }
- }
-
- private:
- std::vector<std::pair<void*, void*>> registered_from_v8_;
- std::vector<v8::TracedReference<v8::Value>*> to_register_with_v8_references_;
- TracePrologueBehavior prologue_behavior_ = TracePrologueBehavior::kNoop;
- v8::Global<v8::Array> array_;
- std::vector<v8::TracedReference<v8::Value>*> non_root_handles_;
-};
-
-} // namespace
-
-TEST(V8RegisteringEmbedderReference) {
- // Tests that wrappers are properly registered with the embedder heap
- // tracer.
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- void* first_and_second_field = reinterpret_cast<void*>(0x2);
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
- context, first_and_second_field, first_and_second_field);
- CHECK(!api_object.IsEmpty());
- CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(tracer.IsRegisteredFromV8(first_and_second_field));
-}
-
-TEST(EmbedderRegisteringV8Reference) {
- // Tests that references that are registered by the embedder heap tracer are
- // considered live by V8.
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- auto handle = std::make_unique<v8::TracedReference<v8::Value>>();
- {
- v8::HandleScope inner_scope(isolate);
- v8::Local<v8::Value> o =
- v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
- handle->Reset(isolate, o);
- }
- tracer.AddReferenceForTracing(handle.get());
- CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(!handle->IsEmpty());
-}
-
-namespace {
-
-void ResurrectingFinalizer(
- const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
- data.GetParameter()->ClearWeak();
-}
-
-} // namespace
-
-TEST(TracingInRevivedSubgraph) {
- // Tests that wrappers are traced when they are contained with in a subgraph
- // that is revived by a finalizer.
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- v8::Global<v8::Object> g;
- void* first_and_second_field = reinterpret_cast<void*>(0x4);
- {
- v8::HandleScope inner_scope(isolate);
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
- context, first_and_second_field, first_and_second_field);
- CHECK(!api_object.IsEmpty());
- v8::Local<v8::Object> o =
- v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
- o->Set(context, v8_str("link"), api_object).FromJust();
- g.Reset(isolate, o);
- g.SetWeak(&g, ResurrectingFinalizer, v8::WeakCallbackType::kFinalizer);
- }
- CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(tracer.IsRegisteredFromV8(first_and_second_field));
-}
-
-TEST(TracingInEphemerons) {
- // Tests that wrappers that are part of ephemerons are traced.
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- v8::Local<v8::Object> key =
- v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
- void* first_and_second_field = reinterpret_cast<void*>(0x8);
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- Handle<JSWeakMap> weak_map = i_isolate->factory()->NewJSWeakMap();
- {
- v8::HandleScope inner_scope(isolate);
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
- context, first_and_second_field, first_and_second_field);
- CHECK(!api_object.IsEmpty());
- Handle<JSObject> js_key =
- handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate);
- Handle<JSReceiver> js_api_object = v8::Utils::OpenHandle(*api_object);
- int32_t hash = js_key->GetOrCreateHash(i_isolate).value();
- JSWeakCollection::Set(weak_map, js_key, js_api_object, hash);
- }
- CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(tracer.IsRegisteredFromV8(first_and_second_field));
-}
-
-TEST(FinalizeTracingIsNoopWhenNotMarking) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- Isolate* i_isolate = CcTest::i_isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
-
- // Finalize a potentially running garbage collection.
- i_isolate->heap()->CollectGarbage(OLD_SPACE,
- GarbageCollectionReason::kTesting);
- CHECK(i_isolate->heap()->incremental_marking()->IsStopped());
-
- int gc_counter = i_isolate->heap()->gc_count();
- tracer.FinalizeTracing();
- CHECK(i_isolate->heap()->incremental_marking()->IsStopped());
- CHECK_EQ(gc_counter, i_isolate->heap()->gc_count());
-}
-
-TEST(FinalizeTracingWhenMarking) {
- if (!FLAG_incremental_marking) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- Heap* heap = CcTest::i_isolate()->heap();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
-
- // Finalize a potentially running garbage collection.
- heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
- if (heap->mark_compact_collector()->sweeping_in_progress()) {
- heap->mark_compact_collector()->EnsureSweepingCompleted(
- MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
- }
- heap->tracer()->StopCycleIfNeeded();
- CHECK(heap->incremental_marking()->IsStopped());
-
- i::IncrementalMarking* marking = heap->incremental_marking();
- {
- SafepointScope scope(heap);
- heap->tracer()->StartCycle(
- GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector cctest", GCTracer::MarkingType::kIncremental);
- marking->Start(GarbageCollectionReason::kTesting);
- }
-
- // Sweeping is not runing so we should immediately start marking.
- CHECK(marking->IsMarking());
- tracer.FinalizeTracing();
- CHECK(marking->IsStopped());
-}
-
-namespace {
-
-void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- v8::TracedReference<v8::Object>* handle) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(v8::Object::New(isolate));
- CHECK(!object.IsEmpty());
- *handle = v8::TracedReference<v8::Object>(isolate, object);
- CHECK(!handle->IsEmpty());
-}
-
-template <typename T>
-void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- T* global) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(
- ConstructTraceableJSApiObject(context, nullptr, nullptr));
- CHECK(!object.IsEmpty());
- *global = T(isolate, object);
- CHECK(!global->IsEmpty());
-}
-
-enum class SurvivalMode { kSurvives, kDies };
-
-template <typename ModifierFunction, typename ConstructTracedReferenceFunction>
-void TracedReferenceTest(v8::Isolate* isolate,
- ConstructTracedReferenceFunction construct_function,
- ModifierFunction modifier_function,
- void (*gc_function)(), SurvivalMode survives) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- auto* global_handles =
- reinterpret_cast<i::Isolate*>(isolate)->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
- construct_function(isolate, context, handle.get());
- CHECK(InCorrectGeneration(isolate, *handle));
- modifier_function(*handle);
- const size_t after_modification_count = global_handles->handles_count();
- gc_function();
- // Cannot check the handle as it is not explicitly cleared by the GC. Instead
- // check the handles count.
- CHECK_IMPLIES(survives == SurvivalMode::kSurvives,
- after_modification_count == global_handles->handles_count());
- CHECK_IMPLIES(survives == SurvivalMode::kDies,
- initial_count == global_handles->handles_count());
-}
-
-} // namespace
-
-TEST(TracedReferenceReset) {
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
-
- v8::TracedReference<v8::Object> handle;
- ConstructJSObject(isolate, isolate->GetCurrentContext(), &handle);
- CHECK(!handle.IsEmpty());
- handle.Reset();
- CHECK(handle.IsEmpty());
-}
-
-TEST(TracedReferenceCopyReferences) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope outer_scope(isolate);
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto handle1 = std::make_unique<v8::TracedReference<v8::Value>>();
- {
- v8::HandleScope scope(isolate);
- handle1->Reset(isolate, v8::Object::New(isolate));
- }
- auto handle2 = std::make_unique<v8::TracedReference<v8::Value>>(*handle1);
- auto handle3 = std::make_unique<v8::TracedReference<v8::Value>>();
- *handle3 = *handle2;
- CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!handle1->IsEmpty());
- CHECK_EQ(*handle1, *handle2);
- CHECK_EQ(*handle2, *handle3);
- {
- v8::HandleScope scope(isolate);
- auto tmp = v8::Local<v8::Value>::New(isolate, *handle3);
- CHECK(!tmp.IsEmpty());
- InvokeMarkSweep();
- }
- CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!handle1->IsEmpty());
- CHECK_EQ(*handle1, *handle2);
- CHECK_EQ(*handle2, *handle3);
- InvokeMarkSweep();
- CHECK_EQ(initial_count, global_handles->handles_count());
-}
-
-TEST(TracedReferenceToUnmodifiedJSObjectDiesOnMarkSweep) {
- // When stressing incremental marking, a write barrier may keep the object
- // alive.
- if (FLAG_stress_incremental_marking) return;
-
- CcTest::InitializeVM();
- TracedReferenceTest(
- CcTest::isolate(), ConstructJSObject,
- [](const TracedReference<v8::Object>&) {}, [] { InvokeMarkSweep(); },
- SurvivalMode::kDies);
-}
-
-TEST(TracedReferenceToUnmodifiedJSObjectSurvivesMarkSweepWhenHeldAlive) {
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::Global<v8::Object> strong_global;
- TracedReferenceTest(
- CcTest::isolate(), ConstructJSObject,
- [isolate, &strong_global](const TracedReference<v8::Object>& handle) {
- v8::HandleScope scope(isolate);
- strong_global = v8::Global<v8::Object>(isolate, handle.Get(isolate));
- },
- []() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
-}
-
-TEST(TracedReferenceToUnmodifiedJSObjectSurvivesScavenge) {
- if (FLAG_single_generation) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TracedReferenceTest(
- CcTest::isolate(), ConstructJSObject,
- [](const TracedReference<v8::Object>&) {}, []() { InvokeScavenge(); },
- SurvivalMode::kSurvives);
-}
-
-TEST(TracedReferenceToUnmodifiedJSObjectSurvivesScavengeWhenExcludedFromRoots) {
- if (FLAG_single_generation) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- TracedReferenceTest(
- CcTest::isolate(), ConstructJSObject,
- [&tracer](const TracedReference<v8::Object>& handle) {
- tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
- },
- []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
-}
-
-TEST(TracedReferenceToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
- if (FLAG_single_generation) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- TracedReferenceTest(
- CcTest::isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
- [](const TracedReference<v8::Object>&) {}, []() { InvokeScavenge(); },
- SurvivalMode::kSurvives);
-}
-
-TEST(
- TracedReferenceToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
- if (FLAG_single_generation) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- TracedReferenceTest(
- CcTest::isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
- [&tracer](const TracedReference<v8::Object>& handle) {
- tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
- },
- []() { InvokeScavenge(); }, SurvivalMode::kDies);
-}
-
-TEST(TracedReferenceWrapperClassId) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
-
- v8::TracedReference<v8::Object> traced;
- ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
- CHECK_EQ(0, traced.WrapperClassId());
- traced.SetWrapperClassId(17);
- CHECK_EQ(17, traced.WrapperClassId());
-}
-
-TEST(TracedReferenceHandlesMarking) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- auto live = std::make_unique<v8::TracedReference<v8::Value>>();
- auto dead = std::make_unique<v8::TracedReference<v8::Value>>();
- live->Reset(isolate, v8::Undefined(isolate));
- dead->Reset(isolate, v8::Undefined(isolate));
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- {
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- tracer.AddReferenceForTracing(live.get());
- const size_t initial_count = global_handles->handles_count();
- InvokeMarkSweep();
- const size_t final_count = global_handles->handles_count();
- // Handles are black allocated, so the first GC does not collect them.
- CHECK_EQ(initial_count, final_count);
- }
-
- {
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- tracer.AddReferenceForTracing(live.get());
- const size_t initial_count = global_handles->handles_count();
- InvokeMarkSweep();
- const size_t final_count = global_handles->handles_count();
- CHECK_EQ(initial_count, final_count + 1);
- }
-}
-
-TEST(TracedReferenceHandlesDoNotLeak) {
- // TracedReference handles are not cleared by the destructor of the embedder
- // object. To avoid leaks we need to mark these handles during GC.
- // This test checks that unmarked handles do not leak.
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- auto ref = std::make_unique<v8::TracedReference<v8::Value>>();
- ref->Reset(isolate, v8::Undefined(isolate));
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- const size_t initial_count = global_handles->handles_count();
- // We need two GCs because handles are black allocated.
- InvokeMarkSweep();
- InvokeMarkSweep();
- const size_t final_count = global_handles->handles_count();
- CHECK_EQ(initial_count, final_count + 1);
-}
-
-namespace {
-
-class TracedReferenceVisitor final
- : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
- public:
- ~TracedReferenceVisitor() override = default;
-
- void VisitTracedReference(const TracedReference<Value>& value) final {
- if (value.WrapperClassId() == 57) {
- count_++;
- }
- }
-
- size_t count() const { return count_; }
-
- private:
- size_t count_ = 0;
-};
-
-} // namespace
-
-TEST(TracedReferenceIteration) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
-
- auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
- ConstructJSObject(isolate, isolate->GetCurrentContext(), handle.get());
- CHECK(!handle->IsEmpty());
- handle->SetWrapperClassId(57);
- TracedReferenceVisitor visitor;
- {
- v8::HandleScope new_scope(isolate);
- tracer.IterateTracedGlobalHandles(&visitor);
- }
- CHECK_EQ(1, visitor.count());
-}
-
-TEST(TracePrologueCallingIntoV8WriteBarrier) {
- // Regression test: https://crbug.com/940003
- if (!FLAG_incremental_marking) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Global<v8::Array> global;
- {
- v8::HandleScope new_scope(isolate);
- auto local = v8::Array::New(isolate, 10);
- global.Reset(isolate, local);
- }
- TestEmbedderHeapTracer tracer(TracePrologueBehavior::kCallV8WriteBarrier,
- std::move(global));
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- SimulateIncrementalMarking(CcTest::i_isolate()->heap());
- // Finish GC to avoid removing the tracer while GC is running which may end up
- // in an infinite loop because of unprocessed objects.
- heap::InvokeMarkSweep();
-}
-
-TEST(BasicTracedReference) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- char* memory = new char[sizeof(v8::TracedReference<v8::Value>)];
- auto* traced = new (memory) v8::TracedReference<v8::Value>();
- {
- v8::HandleScope new_scope(isolate);
- v8::Local<v8::Value> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- CHECK(traced->IsEmpty());
- *traced = v8::TracedReference<v8::Value>(isolate, object);
- CHECK(!traced->IsEmpty());
- CHECK_EQ(initial_count + 1, global_handles->handles_count());
- }
- traced->~TracedReference<v8::Value>();
- CHECK_EQ(initial_count + 1, global_handles->handles_count());
- // GC should clear the handle.
- heap::InvokeMarkSweep();
- CHECK_EQ(initial_count, global_handles->handles_count());
- delete[] memory;
-}
-
-namespace {
-
-class EmptyEmbedderHeapTracer : public v8::EmbedderHeapTracer {
- public:
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
-
- bool AdvanceTracing(double deadline_in_ms) final { return true; }
- bool IsTracingDone() final { return true; }
- void TracePrologue(EmbedderHeapTracer::TraceFlags) final {}
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-};
-
-// EmbedderHeapTracer that can optimize Scavenger handling when used with
-// TracedReference.
-class EmbedderHeapTracerNoDestructorNonTracingClearing final
- : public EmptyEmbedderHeapTracer {
- public:
- explicit EmbedderHeapTracerNoDestructorNonTracingClearing(
- uint16_t class_id_to_optimize)
- : class_id_to_optimize_(class_id_to_optimize) {}
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- return handle.WrapperClassId() != class_id_to_optimize_;
- }
-
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- if (handle.WrapperClassId() != class_id_to_optimize_) return;
-
- // Convention (for test): Objects that are optimized have their first field
- // set as a back pointer.
- BasicTracedReference<v8::Value>* original_handle =
- reinterpret_cast<BasicTracedReference<v8::Value>*>(
- v8::Object::GetAlignedPointerFromInternalField(
- handle.As<v8::Object>(), 0));
- original_handle->Reset();
- }
-
- private:
- uint16_t class_id_to_optimize_;
-};
-
-template <typename T>
-void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
- uint16_t optimized_class_id,
- T* optimized_handle,
- T* non_optimized_handle) {
- v8::HandleScope scope(isolate);
-
- v8::Local<v8::Object> optimized_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), optimized_handle, nullptr));
- CHECK(optimized_handle->IsEmpty());
- *optimized_handle = T(isolate, optimized_object);
- CHECK(!optimized_handle->IsEmpty());
- optimized_handle->SetWrapperClassId(optimized_class_id);
-
- v8::Local<v8::Object> non_optimized_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- CHECK(non_optimized_handle->IsEmpty());
- *non_optimized_handle = T(isolate, non_optimized_object);
- CHECK(!non_optimized_handle->IsEmpty());
-}
-
-} // namespace
-
-TEST(TracedReferenceNoDestructorReclaimedOnScavenge) {
- if (FLAG_single_generation) return;
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- constexpr uint16_t kClassIdToOptimize = 23;
- EmbedderHeapTracerNoDestructorNonTracingClearing tracer(kClassIdToOptimize);
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto* optimized_handle = new v8::TracedReference<v8::Value>();
- auto* non_optimized_handle = new v8::TracedReference<v8::Value>();
- SetupOptimizedAndNonOptimizedHandle(isolate, kClassIdToOptimize,
- optimized_handle, non_optimized_handle);
- CHECK_EQ(initial_count + 2, global_handles->handles_count());
- heap::InvokeScavenge();
- CHECK_EQ(initial_count + 1, global_handles->handles_count());
- CHECK(optimized_handle->IsEmpty());
- delete optimized_handle;
- CHECK(!non_optimized_handle->IsEmpty());
- non_optimized_handle->Reset();
- delete non_optimized_handle;
- CHECK_EQ(initial_count, global_handles->handles_count());
-}
-
-namespace {
-
-template <typename T>
-V8_NOINLINE void OnStackTest(TestEmbedderHeapTracer* tracer) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Global<v8::Object> observer;
- T stack_ref;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- stack_ref.Reset(isolate, object);
- observer.Reset(isolate, object);
- observer.SetWeak();
- }
- CHECK(!observer.IsEmpty());
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
-}
-
-V8_NOINLINE void CreateTracedReferenceInDeepStack(
- v8::Isolate* isolate, v8::Global<v8::Object>* observer) {
- v8::TracedReference<v8::Value> stack_ref;
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- stack_ref.Reset(isolate, object);
- observer->Reset(isolate, object);
- observer->SetWeak();
-}
-
-V8_NOINLINE void TracedReferenceNotifyEmptyStackTest(
- TestEmbedderHeapTracer* tracer) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Global<v8::Object> observer;
- CreateTracedReferenceInDeepStack(isolate, &observer);
- CHECK(!observer.IsEmpty());
- reinterpret_cast<i::Isolate*>(isolate)
- ->heap()
- ->local_embedder_heap_tracer()
- ->NotifyEmptyEmbedderStack();
- heap::InvokeMarkSweep();
- CHECK(observer.IsEmpty());
-}
-
-enum class Operation {
- kCopy,
- kMove,
-};
-
-template <typename T>
-void PerformOperation(Operation op, T* lhs, T* rhs) {
- switch (op) {
- case Operation::kMove:
- *lhs = std::move(*rhs);
- break;
- case Operation::kCopy:
- *lhs = *rhs;
- rhs->Reset();
- break;
- }
-}
-
-enum class TargetHandling {
- kNonInitialized,
- kInitializedYoungGen,
- kInitializedOldGen
-};
-
-V8_NOINLINE void StackToHeapTest(TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle;
- v8::TracedReference<v8::Value>* heap_handle =
- new v8::TracedReference<v8::Value>();
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- CHECK(InCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!FLAG_single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- heap::InvokeScavenge();
- heap::InvokeScavenge();
- CHECK(!i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- heap_handle->Reset(isolate, to_object);
- }
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- stack_handle.Reset(isolate, object);
- observer.Reset(isolate, object);
- observer.SetWeak();
- }
- CHECK(!observer.IsEmpty());
- tracer->AddReferenceForTracing(heap_handle);
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
- tracer->AddReferenceForTracing(heap_handle);
- PerformOperation(op, heap_handle, &stack_handle);
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
- heap::InvokeMarkSweep();
- CHECK(observer.IsEmpty());
- delete heap_handle;
-}
-
-V8_NOINLINE void HeapToStackTest(TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle;
- v8::TracedReference<v8::Value>* heap_handle =
- new v8::TracedReference<v8::Value>();
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- CHECK(InCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!FLAG_single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- heap::InvokeScavenge();
- heap::InvokeScavenge();
- CHECK(!i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- stack_handle.Reset(isolate, to_object);
- }
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- heap_handle->Reset(isolate, object);
- observer.Reset(isolate, object);
- observer.SetWeak();
- }
- CHECK(!observer.IsEmpty());
- tracer->AddReferenceForTracing(heap_handle);
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
- PerformOperation(op, &stack_handle, heap_handle);
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
- stack_handle.Reset();
- heap::InvokeMarkSweep();
- CHECK(observer.IsEmpty());
- delete heap_handle;
-}
-
-V8_NOINLINE void StackToStackTest(TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle1;
- v8::TracedReference<v8::Value> stack_handle2;
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- CHECK(InCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!FLAG_single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- heap::InvokeScavenge();
- heap::InvokeScavenge();
- CHECK(!i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- stack_handle2.Reset(isolate, to_object);
- }
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- stack_handle1.Reset(isolate, object);
- observer.Reset(isolate, object);
- observer.SetWeak();
- }
- CHECK(!observer.IsEmpty());
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
- PerformOperation(op, &stack_handle2, &stack_handle1);
- heap::InvokeMarkSweep();
- CHECK(!observer.IsEmpty());
- stack_handle2.Reset();
- heap::InvokeMarkSweep();
- CHECK(observer.IsEmpty());
-}
-
-V8_NOINLINE void TracedReferenceCleanedTest(TestEmbedderHeapTracer* tracer) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- const size_t before =
- CcTest::i_isolate()->global_handles()->NumberOfOnStackHandlesForTesting();
- for (int i = 0; i < 100; i++) {
- v8::TracedReference<v8::Value> stack_handle;
- stack_handle.Reset(isolate, object);
- }
- CHECK_EQ(before + 1, CcTest::i_isolate()
- ->global_handles()
- ->NumberOfOnStackHandlesForTesting());
-}
-
-} // namespace
-
-TEST(TracedReferenceOnStack) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- OnStackTest<v8::TracedReference<v8::Value>>(&tracer);
-}
-
-TEST(TracedReferenceCleaned) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- TracedReferenceCleanedTest(&tracer);
-}
-
-TEST(TracedReferenceMove) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- StackToHeapTest(&tracer, Operation::kMove, TargetHandling::kNonInitialized);
- StackToHeapTest(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest(&tracer, Operation::kMove, TargetHandling::kNonInitialized);
- HeapToStackTest(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- StackToStackTest(&tracer, Operation::kMove, TargetHandling::kNonInitialized);
- StackToStackTest(&tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest(&tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
-}
-
-TEST(TracedReferenceCopy) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- StackToHeapTest(&tracer, Operation::kCopy, TargetHandling::kNonInitialized);
- StackToHeapTest(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest(&tracer, Operation::kCopy, TargetHandling::kNonInitialized);
- HeapToStackTest(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- StackToStackTest(&tracer, Operation::kCopy, TargetHandling::kNonInitialized);
- StackToStackTest(&tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest(&tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
-}
-
-TEST(NotifyEmptyStack) {
- ManualGCScope manual_gc;
- CcTest::InitializeVM();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(CcTest::isolate(),
- &tracer);
- tracer.SetStackStart(&manual_gc);
- TracedReferenceNotifyEmptyStackTest(&tracer);
-}
-
-} // namespace heap
-} // namespace internal
-} // namespace v8
-
-END_ALLOW_USE_DEPRECATED()
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index f90922b8a5..e74e560e7a 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -48,6 +48,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-barrier.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/parked-scope.h"
@@ -1561,7 +1562,8 @@ TEST(TestInternalWeakLists) {
// Some flags turn Scavenge collections into Mark-sweep collections
// and hence are incompatible with this test case.
if (FLAG_gc_global || FLAG_stress_compaction ||
- FLAG_stress_incremental_marking || FLAG_single_generation)
+ FLAG_stress_incremental_marking || FLAG_single_generation ||
+ FLAG_separate_gc_phases)
return;
FLAG_retain_maps_for_n_gc = 0;
@@ -5450,8 +5452,9 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
}
bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
- bool result = heap->code_space()->EnsureLabMain(size_in_bytes,
- AllocationOrigin::kRuntime);
+ bool result = heap->code_space()->EnsureAllocation(
+ size_in_bytes, AllocationAlignment::kTaggedAligned,
+ AllocationOrigin::kRuntime, nullptr);
heap->code_space()->UpdateInlineAllocationLimit(0);
return result;
}
@@ -5709,6 +5712,7 @@ TEST(Regress598319) {
StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
SafepointScope safepoint_scope(heap);
+ MarkingBarrier::PublishAll(heap);
marking->FinalizeIncrementally();
}
}
@@ -5828,7 +5832,7 @@ class StaticOneByteResource : public v8::String::ExternalOneByteStringResource {
};
TEST(Regress631969) {
- if (!FLAG_incremental_marking) return;
+ if (!FLAG_incremental_marking || FLAG_separate_gc_phases) return;
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
@@ -6255,7 +6259,7 @@ TEST(RememberedSet_InsertInLargePage) {
}
TEST(RememberedSet_InsertOnPromotingObjectToOld) {
- if (FLAG_single_generation) return;
+ if (FLAG_single_generation || FLAG_stress_incremental_marking) return;
FLAG_stress_concurrent_allocation = false; // For SealCurrentObjects.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -6281,11 +6285,12 @@ TEST(RememberedSet_InsertOnPromotingObjectToOld) {
CcTest::CollectGarbage(i::NEW_SPACE);
CHECK(heap->InOldSpace(*arr));
+ CHECK(heap->InYoungGeneration(arr->get(0)));
CHECK_EQ(1, GetRememberedSetSize<OLD_TO_NEW>(*arr));
}
TEST(RememberedSet_RemoveStaleOnScavenge) {
- if (FLAG_single_generation) return;
+ if (FLAG_single_generation || FLAG_stress_incremental_marking) return;
FLAG_stress_concurrent_allocation = false; // For SealCurrentObjects.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -6457,7 +6462,7 @@ HEAP_TEST(Regress670675) {
collector->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
- heap->tracer()->StopCycleIfNeeded();
+ heap->tracer()->StopFullCycleIfNeeded();
i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
if (marking->IsStopped()) {
SafepointScope safepoint_scope(heap);
@@ -6560,7 +6565,7 @@ Isolate* oom_isolate = nullptr;
void OOMCallback(const char* location, bool is_heap_oom) {
Heap* heap = oom_isolate->heap();
- size_t kSlack = heap->new_space() ? heap->new_space()->Capacity() : 0;
+ size_t kSlack = heap->new_space() ? heap->MaxSemiSpaceSize() : 0;
CHECK_LE(heap->OldGenerationCapacity(), kHeapLimit + kSlack);
CHECK_LE(heap->memory_allocator()->Size(), heap->MaxReserved() + kSlack);
base::OS::ExitProcess(0);
@@ -6765,9 +6770,9 @@ UNINITIALIZED_TEST(OutOfMemorySmallObjects) {
}
}
CHECK_LE(state.old_generation_capacity_at_oom,
- kOldGenerationLimit + state.new_space_capacity_at_oom);
- CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom +
- state.new_space_capacity_at_oom);
+ kOldGenerationLimit + heap->MaxSemiSpaceSize());
+ CHECK_LE(kOldGenerationLimit,
+ state.old_generation_capacity_at_oom + heap->MaxSemiSpaceSize());
CHECK_LE(
state.memory_allocator_size_at_oom,
MemoryAllocatorSizeFromHeapCapacity(state.old_generation_capacity_at_oom +
@@ -6987,7 +6992,7 @@ TEST(CodeObjectRegistry) {
TEST(Regress9701) {
ManualGCScope manual_gc_scope;
- if (!FLAG_incremental_marking) return;
+ if (!FLAG_incremental_marking || FLAG_separate_gc_phases) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
// Start with an empty new space.
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index 702d66560e..7bda4c808a 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -35,16 +35,10 @@ namespace heap {
class MockPlatform : public TestPlatform {
public:
- MockPlatform()
- : taskrunner_(new MockTaskRunner()),
- old_platform_(i::V8::GetCurrentPlatform()) {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
+ MockPlatform() : taskrunner_(new MockTaskRunner()) {}
~MockPlatform() override {
- i::V8::SetPlatformForTesting(old_platform_);
for (auto& task : worker_tasks_) {
- old_platform_->CallOnWorkerThread(std::move(task));
+ CcTest::default_platform()->CallOnWorkerThread(std::move(task));
}
worker_tasks_.clear();
}
@@ -106,17 +100,13 @@ class MockPlatform : public TestPlatform {
std::shared_ptr<MockTaskRunner> taskrunner_;
std::vector<std::unique_ptr<Task>> worker_tasks_;
- v8::Platform* old_platform_;
};
-UNINITIALIZED_TEST(IncrementalMarkingUsingTasks) {
+TEST_WITH_PLATFORM(IncrementalMarkingUsingTasks, MockPlatform) {
if (!i::FLAG_incremental_marking) return;
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
FLAG_stress_incremental_marking = false;
- MockPlatform platform;
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
+ v8::Isolate* isolate = CcTest::isolate();
{
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = CcTest::NewContext(isolate);
@@ -140,7 +130,6 @@ UNINITIALIZED_TEST(IncrementalMarkingUsingTasks) {
}
CHECK(marking->IsStopped());
}
- isolate->Dispose();
}
} // namespace heap
diff --git a/deps/v8/test/cctest/heap/test-memory-measurement.cc b/deps/v8/test/cctest/heap/test-memory-measurement.cc
index a5a0e6b645..8ad4247514 100644
--- a/deps/v8/test/cctest/heap/test-memory-measurement.cc
+++ b/deps/v8/test/cctest/heap/test-memory-measurement.cc
@@ -132,10 +132,7 @@ namespace {
class MockPlatform : public TestPlatform {
public:
- MockPlatform() : TestPlatform(), mock_task_runner_(new MockTaskRunner()) {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
+ MockPlatform() : mock_task_runner_(new MockTaskRunner()) {}
std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
v8::Isolate*) override {
@@ -199,14 +196,10 @@ class MockMeasureMemoryDelegate : public v8::MeasureMemoryDelegate {
} // namespace
-TEST(RandomizedTimeout) {
- MockPlatform platform;
+TEST_WITH_PLATFORM(RandomizedTimeout, MockPlatform) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- // We have to create the isolate manually here. Using CcTest::isolate() would
- // lead to the situation when the isolate outlives MockPlatform which may lead
- // to UAF on the background thread.
- v8::Isolate* isolate = v8::Isolate::New(create_params);
+ v8::Isolate* isolate = CcTest::isolate();
std::vector<double> delays;
for (int i = 0; i < 10; i++) {
isolate->MeasureMemory(std::make_unique<MockMeasureMemoryDelegate>());
@@ -214,7 +207,6 @@ TEST(RandomizedTimeout) {
platform.PerformTask();
}
std::sort(delays.begin(), delays.end());
- isolate->Dispose();
CHECK_LT(delays[0], delays.back());
}
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 9cc24525e2..37261c4cc0 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -27,6 +27,8 @@
#include <stdlib.h>
+#include <memory>
+
#include "include/v8-initialization.h"
#include "include/v8-platform.h"
#include "src/base/bounded-page-allocator.h"
@@ -113,9 +115,8 @@ class V8_NODISCARD TestCodePageAllocatorScope {
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator,
- size_t reserve_area_size, size_t commit_area_size,
- Executability executable, PageSize page_size,
- Space* space) {
+ size_t area_size, Executability executable,
+ PageSize page_size, LargeObjectSpace* space) {
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestCodePageAllocatorScope test_code_page_allocator_scope(
@@ -129,23 +130,23 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
size_t guard_size =
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
- MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
- reserve_area_size, commit_area_size, executable, page_size, space);
+ MemoryChunk* memory_chunk =
+ memory_allocator->AllocateLargePage(space, area_size, executable);
size_t reserved_size =
((executable == EXECUTABLE))
? allocatable_memory_area_offset +
- RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
+ RoundUp(area_size, page_allocator->CommitPageSize()) +
guard_size
- : RoundUp(allocatable_memory_area_offset + reserve_area_size,
+ : RoundUp(allocatable_memory_area_offset + area_size,
page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size());
CHECK(memory_chunk->area_end() <=
memory_chunk->address() + memory_chunk->size());
- CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
+ CHECK(static_cast<size_t>(memory_chunk->area_size()) == area_size);
- memory_allocator->Free(MemoryAllocator::kImmediately, memory_chunk);
+ memory_allocator->Free(MemoryAllocator::FreeMode::kImmediately, memory_chunk);
}
static unsigned int PseudorandomAreaSize() {
@@ -160,12 +161,10 @@ TEST(MemoryChunk) {
Heap* heap = isolate->heap();
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
-
- size_t reserve_area_size = 1 * MB;
- size_t initial_commit_area_size;
+ size_t area_size;
for (int i = 0; i < 100; i++) {
- initial_commit_area_size =
+ area_size =
RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
// With CodeRange.
@@ -179,13 +178,11 @@ TEST(MemoryChunk) {
code_range_reservation.size(), MemoryChunk::kAlignment,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
- VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
- initial_commit_area_size, EXECUTABLE, PageSize::kLarge,
- heap->code_space());
+ VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
+ EXECUTABLE, PageSize::kLarge, heap->code_lo_space());
- VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
- initial_commit_area_size, NOT_EXECUTABLE,
- PageSize::kLarge, heap->old_space());
+ VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
+ NOT_EXECUTABLE, PageSize::kLarge, heap->lo_space());
}
}
@@ -203,7 +200,7 @@ TEST(MemoryAllocator) {
CHECK(!faked_space.first_page());
CHECK(!faked_space.last_page());
Page* first_page = memory_allocator->AllocatePage(
- MemoryAllocator::kRegular, faked_space.AreaSize(),
+ MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
faked_space.memory_chunk_list().PushBack(first_page);
@@ -216,7 +213,7 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
- MemoryAllocator::kRegular, faked_space.AreaSize(),
+ MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
total_pages++;
faked_space.memory_chunk_list().PushBack(other);
@@ -280,18 +277,19 @@ TEST(NewSpace) {
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
LinearAllocationArea allocation_info;
- NewSpace new_space(heap, memory_allocator->data_page_allocator(),
- CcTest::heap()->InitialSemiSpaceSize(),
- CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
- CHECK(new_space.MaximumCapacity());
+ std::unique_ptr<NewSpace> new_space = std::make_unique<NewSpace>(
+ heap, memory_allocator->data_page_allocator(),
+ CcTest::heap()->InitialSemiSpaceSize(),
+ CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
+ CHECK(new_space->MaximumCapacity());
- while (new_space.Available() >= kMaxRegularHeapObjectSize) {
- CHECK(new_space.Contains(
- new_space.AllocateRaw(kMaxRegularHeapObjectSize, kTaggedAligned)
+ while (new_space->Available() >= kMaxRegularHeapObjectSize) {
+ CHECK(new_space->Contains(
+ new_space->AllocateRaw(kMaxRegularHeapObjectSize, kTaggedAligned)
.ToObjectChecked()));
}
- new_space.TearDown();
+ new_space.reset();
memory_allocator->unmapper()->EnsureUnmappingCompleted();
}
@@ -813,7 +811,7 @@ TEST(NoMemoryForNewPage) {
LinearAllocationArea allocation_info;
OldSpace faked_space(heap, &allocation_info);
Page* page = memory_allocator->AllocatePage(
- MemoryAllocator::kRegular, faked_space.AreaSize(),
+ MemoryAllocator::AllocationMode::kRegular,
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
CHECK_NULL(page);
diff --git a/deps/v8/test/cctest/heap/test-unmapper.cc b/deps/v8/test/cctest/heap/test-unmapper.cc
index 164de7571c..2297409f2d 100644
--- a/deps/v8/test/cctest/heap/test-unmapper.cc
+++ b/deps/v8/test/cctest/heap/test-unmapper.cc
@@ -20,16 +20,9 @@ namespace heap {
class MockPlatformForUnmapper : public TestPlatform {
public:
- MockPlatformForUnmapper()
- : task_(nullptr), old_platform_(i::V8::GetCurrentPlatform()) {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
~MockPlatformForUnmapper() override {
- delete task_;
- i::V8::SetPlatformForTesting(old_platform_);
for (auto& task : worker_tasks_) {
- old_platform_->CallOnWorkerThread(std::move(task));
+ CcTest::default_platform()->CallOnWorkerThread(std::move(task));
}
worker_tasks_.clear();
}
@@ -40,14 +33,8 @@ class MockPlatformForUnmapper : public TestPlatform {
bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
- int NumberOfWorkerThreads() override {
- return old_platform_->NumberOfWorkerThreads();
- }
-
private:
- Task* task_;
std::vector<std::unique_ptr<Task>> worker_tasks_;
- v8::Platform* old_platform_;
};
UNINITIALIZED_TEST(EagerUnmappingInCollectAllAvailableGarbage) {
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index 3ead9c48be..1cfb4715d3 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -176,7 +176,8 @@ TEST(WeakReferencesOldToCleared) {
}
TEST(ObjectMovesBeforeClearingWeakField) {
- if (!FLAG_incremental_marking || FLAG_single_generation) {
+ if (!FLAG_incremental_marking || FLAG_single_generation ||
+ FLAG_separate_gc_phases) {
return;
}
ManualGCScope manual_gc_scope;
@@ -277,7 +278,7 @@ TEST(ObjectWithWeakReferencePromoted) {
}
TEST(ObjectWithClearedWeakReferencePromoted) {
- if (FLAG_single_generation) return;
+ if (FLAG_single_generation || FLAG_stress_incremental_marking) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 7581b0f391..f45da46455 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -20,18 +20,14 @@ snippet: "
test();
})();
"
-frame size: 5
+frame size: 1
parameter count: 1
-bytecode array length: 24
+bytecode array length: 16
bytecodes: [
/* 104 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star3),
- B(LdaConstant), U8(0),
- B(Star4),
- B(Mov), R(this), R(2),
- /* 117 E> */ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(2), U8(3),
+ /* 117 E> */ B(GetNamedPropertyFromSuper), R(this), U8(0), U8(1),
B(Star0),
- /* 117 E> */ B(CallAnyReceiver), R(0), R(this), U8(1), U8(1),
+ /* 117 E> */ B(CallAnyReceiver), R(0), R(this), U8(1), U8(3),
/* 126 E> */ B(AddSmi), I8(1), U8(0),
/* 130 S> */ B(Return),
]
@@ -58,7 +54,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 32
+bytecode array length: 24
bytecodes: [
/* 130 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star1),
@@ -69,11 +65,7 @@ bytecodes: [
B(Mov), R(this), R(0),
/* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper), R(0), U8(4),
/* 143 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star1),
- B(LdaConstant), U8(0),
- B(Star2),
- B(Mov), R(this), R(0),
- /* 156 E> */ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(0), U8(3),
+ /* 156 E> */ B(GetNamedPropertyFromSuper), R(this), U8(0), U8(0),
/* 158 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareBoolean.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareBoolean.golden
new file mode 100644
index 0000000000..76646494bd
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareBoolean.golden
@@ -0,0 +1,368 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+
+---
+snippet: "
+ var a = 1;
+ return a === true;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ /* 42 S> */ B(LdaSmi), I8(1),
+ B(Star0),
+ /* 45 S> */ B(LdaTrue),
+ B(TestReferenceEqual), R(0),
+ /* 63 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = true;
+ return true === a;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ /* 42 S> */ B(LdaTrue),
+ B(Star0),
+ /* 48 S> */ B(LdaTrue),
+ B(TestReferenceEqual), R(0),
+ /* 66 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = false;
+ return true !== a;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ /* 42 S> */ B(LdaFalse),
+ B(Star0),
+ /* 49 S> */ B(LdaTrue),
+ B(TestReferenceEqual), R(0),
+ /* 61 E> */ B(LogicalNot),
+ /* 67 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ return true === a ? 1 : 2;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ /* 42 S> */ B(LdaSmi), I8(1),
+ B(Star0),
+ /* 45 S> */ B(JumpIfTrue), U8(4),
+ B(Jump), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ /* 71 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = true;
+ return false === a ? 1 : 2;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ /* 42 S> */ B(LdaTrue),
+ B(Star0),
+ /* 48 S> */ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ /* 75 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ return true !== a ? 1 : 2;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ /* 42 S> */ B(LdaSmi), I8(1),
+ B(Star0),
+ /* 45 S> */ B(JumpIfTrue), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ /* 71 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = false;
+ return false !== null ? 1 : 2;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ /* 42 S> */ B(LdaFalse),
+ B(Star0),
+ /* 49 S> */ B(LdaNull),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ /* 79 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 0;
+ if (a !== true) {
+ return 1;
+ }
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 9
+bytecodes: [
+ /* 42 S> */ B(LdaZero),
+ B(Star0),
+ /* 45 S> */ B(JumpIfTrue), U8(5),
+ /* 65 S> */ B(LdaSmi), I8(1),
+ /* 74 S> */ B(Return),
+ B(LdaUndefined),
+ /* 77 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = true;
+ var b = 0;
+ while (a !== true) {
+ b++;
+ }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 18
+bytecodes: [
+ /* 42 S> */ B(LdaTrue),
+ B(Star0),
+ /* 56 S> */ B(LdaZero),
+ B(Star1),
+ /* 68 S> */ B(Ldar), R(0),
+ B(JumpIfTrue), U8(10),
+ /* 82 S> */ B(Ldar), R(1),
+ B(Inc), U8(0),
+ B(Star1),
+ /* 59 E> */ B(JumpLoop), U8(9), I8(0),
+ B(LdaUndefined),
+ /* 89 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ (0 === true) ? 1 : 2;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ /* 34 S> */ B(LdaZero),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ B(LdaUndefined),
+ /* 56 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ (0 !== true) ? 1 : 2;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ /* 34 S> */ B(LdaZero),
+ B(JumpIfTrue), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ B(LdaUndefined),
+ /* 56 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ (false === 0) ? 1 : 2;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ /* 34 S> */ B(LdaZero),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ B(LdaUndefined),
+ /* 57 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ (0 === true || 0 === false) ? 1 : 2;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 16
+bytecodes: [
+ /* 34 S> */ B(LdaZero),
+ B(JumpIfTrue), U8(7),
+ B(LdaZero),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(LdaSmi), I8(1),
+ B(Jump), U8(4),
+ B(LdaSmi), I8(2),
+ B(LdaUndefined),
+ /* 71 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ if (0 === true || 0 === false) return 1;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ /* 34 S> */ B(LdaZero),
+ B(JumpIfTrue), U8(7),
+ B(LdaZero),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(5),
+ /* 65 S> */ B(LdaSmi), I8(1),
+ /* 74 S> */ B(Return),
+ B(LdaUndefined),
+ /* 75 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ if (!('false' === false)) return 1;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 9
+bytecodes: [
+ /* 34 S> */ B(LdaConstant), U8(0),
+ B(JumpIfFalse), U8(5),
+ /* 60 S> */ B(LdaSmi), I8(1),
+ /* 69 S> */ B(Return),
+ B(LdaUndefined),
+ /* 70 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["false"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ if (!('false' !== false)) return 1;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ /* 34 S> */ B(LdaConstant), U8(0),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(5),
+ /* 60 S> */ B(LdaSmi), I8(1),
+ /* 69 S> */ B(Return),
+ B(LdaUndefined),
+ /* 70 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["false"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index 00cf22fb7b..4451dfbbb4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -83,7 +83,7 @@ bytecodes: [
/* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(296),
+ B(Wide), B(LdaSmi), I16(297),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -115,7 +115,7 @@ bytecodes: [
/* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(295),
+ B(Wide), B(LdaSmi), I16(296),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -137,21 +137,23 @@ snippet: "
var test = D;
new test;
"
-frame size: 4
+frame size: 5
parameter count: 1
-bytecode array length: 28
+bytecode array length: 31
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
/* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
- /* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(296),
+ /* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
- B(LdaConstant), U8(0),
+ B(LdaImmutableCurrentContextSlot), U8(3),
+ /* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(297),
B(Star3),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+ B(LdaConstant), U8(0),
+ B(Star4),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
]
constant pool: [
@@ -179,7 +181,7 @@ bytecodes: [
/* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(295),
+ B(Wide), B(LdaSmi), I16(296),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index a26dd54cf9..f783f4a443 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -46,21 +46,23 @@ snippet: "
var test = B;
new test;
"
-frame size: 4
+frame size: 5
parameter count: 1
-bytecode array length: 28
+bytecode array length: 31
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
/* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
- /* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
- /* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(294),
+ /* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
- B(LdaConstant), U8(0),
+ B(LdaImmutableCurrentContextSlot), U8(3),
+ /* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
+ B(Wide), B(LdaSmi), I16(295),
B(Star3),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
+ B(LdaConstant), U8(0),
+ B(Star4),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
]
constant pool: [
@@ -89,7 +91,7 @@ bytecodes: [
/* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(294),
+ B(Wide), B(LdaSmi), I16(295),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 846fe89559..3c2e1706f6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -24,7 +24,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
+ B(Wide), B(LdaSmi), I16(289),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -51,25 +51,27 @@ snippet: "
var test = B.test;
test();
"
-frame size: 3
+frame size: 4
parameter count: 1
-bytecode array length: 37
+bytecode array length: 40
bytecodes: [
- /* 56 S> */ B(LdaCurrentContextSlot), U8(3),
+ /* 56 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
+ B(Star1),
+ B(LdaCurrentContextSlot), U8(3),
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
- B(Star1),
- B(LdaConstant), U8(0),
+ B(Wide), B(LdaSmi), I16(289),
B(Star2),
- /* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
+ B(LdaConstant), U8(0),
+ B(Star3),
+ /* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(294),
- B(Star1),
- B(LdaConstant), U8(1),
+ B(Wide), B(LdaSmi), I16(295),
B(Star2),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
+ B(LdaConstant), U8(1),
+ B(Star3),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
]
constant pool: [
@@ -97,13 +99,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
+ B(Wide), B(LdaSmi), I16(289),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(294),
+ B(Wide), B(LdaSmi), I16(295),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -143,7 +145,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
+ B(Wide), B(LdaSmi), I16(289),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -165,7 +167,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
+ B(Wide), B(LdaSmi), I16(289),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -180,7 +182,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
+ B(Wide), B(LdaSmi), I16(289),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -214,13 +216,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
+ B(Wide), B(LdaSmi), I16(289),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(296),
+ B(Wide), B(LdaSmi), I16(297),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -251,13 +253,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
+ B(Wide), B(LdaSmi), I16(289),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 58 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(295),
+ B(Wide), B(LdaSmi), I16(296),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -280,25 +282,27 @@ snippet: "
var test = G.test;
test();
"
-frame size: 3
+frame size: 4
parameter count: 1
-bytecode array length: 37
+bytecode array length: 40
bytecodes: [
- /* 60 S> */ B(LdaCurrentContextSlot), U8(3),
+ /* 60 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
+ B(Star1),
+ B(LdaCurrentContextSlot), U8(3),
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(288),
- B(Star1),
- B(LdaConstant), U8(0),
+ B(Wide), B(LdaSmi), I16(289),
B(Star2),
- /* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
+ B(LdaConstant), U8(0),
+ B(Star3),
+ /* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(296),
- B(Star1),
- B(LdaConstant), U8(1),
+ B(Wide), B(LdaSmi), I16(297),
B(Star2),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
+ B(LdaConstant), U8(1),
+ B(Star3),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
]
constant pool: [
@@ -323,7 +327,7 @@ bytecode array length: 19
bytecodes: [
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(0),
- B(Wide), B(LdaSmi), I16(295),
+ B(Wide), B(LdaSmi), I16(296),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 09e2926059..2a31638d33 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -1161,6 +1161,62 @@ TEST(CompareTypeOf) {
LoadGolden("CompareTypeOf.golden")));
}
+TEST(CompareBoolean) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+
+ std::string snippets[] = {
+ "var a = 1;\n"
+ "return a === true;\n",
+
+ "var a = true;\n"
+ "return true === a;\n",
+
+ "var a = false;\n"
+ "return true !== a;\n",
+
+ "var a = 1;\n"
+ "return true === a ? 1 : 2;\n",
+
+ "var a = true;\n"
+ "return false === a ? 1 : 2;\n",
+
+ "var a = 1;\n"
+ "return true !== a ? 1 : 2;\n",
+
+ "var a = false;\n"
+ "return false !== null ? 1 : 2;\n",
+
+ "var a = 0;\n"
+ "if (a !== true) {\n"
+ " return 1;\n"
+ "}\n",
+
+ "var a = true;\n"
+ "var b = 0;\n"
+ "while (a !== true) {\n"
+ " b++;\n"
+ "}\n",
+
+ "(0 === true) ? 1 : 2;\n",
+
+ "(0 !== true) ? 1 : 2;\n",
+
+ "(false === 0) ? 1 : 2;\n",
+
+ "(0 === true || 0 === false) ? 1 : 2;\n",
+
+ "if (0 === true || 0 === false) return 1;\n",
+
+ "if (!('false' === false)) return 1;\n",
+
+ "if (!('false' !== false)) return 1;\n",
+ };
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("CompareBoolean.golden")));
+}
+
TEST(CompareNil) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index 2078aeb02a..0081e8e29c 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -32,12 +32,7 @@ namespace {
// Implementation of v8::Platform that can register OOM callbacks.
class AllocationPlatform : public TestPlatform {
public:
- AllocationPlatform() {
- current_platform = this;
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
- ~AllocationPlatform() override = default;
+ AllocationPlatform() { current_platform = this; }
void OnCriticalMemoryPressure() override { oom_callback_called = true; }
@@ -95,8 +90,7 @@ void OnAlignedAllocOOM(const char* location, const char* message) {
} // namespace
-TEST(AccountingAllocatorOOM) {
- AllocationPlatform platform;
+TEST_WITH_PLATFORM(AccountingAllocatorOOM, AllocationPlatform) {
v8::internal::AccountingAllocator allocator;
CHECK(!platform.oom_callback_called);
const bool support_compression = false;
@@ -106,8 +100,7 @@ TEST(AccountingAllocatorOOM) {
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
-TEST(AccountingAllocatorCurrentAndMax) {
- AllocationPlatform platform;
+TEST_WITH_PLATFORM(AccountingAllocatorCurrentAndMax, AllocationPlatform) {
v8::internal::AccountingAllocator allocator;
static constexpr size_t kAllocationSizes[] = {51, 231, 27};
std::vector<v8::internal::Segment*> segments;
@@ -135,8 +128,7 @@ TEST(AccountingAllocatorCurrentAndMax) {
CHECK(!platform.oom_callback_called);
}
-TEST(MallocedOperatorNewOOM) {
- AllocationPlatform platform;
+TEST_WITH_PLATFORM(MallocedOperatorNewOOM, AllocationPlatform) {
CHECK(!platform.oom_callback_called);
CcTest::isolate()->SetFatalErrorHandler(OnMallocedOperatorNewOOM);
// On failure, this won't return, since a Malloced::New failure is fatal.
@@ -146,8 +138,7 @@ TEST(MallocedOperatorNewOOM) {
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
-TEST(NewArrayOOM) {
- AllocationPlatform platform;
+TEST_WITH_PLATFORM(NewArrayOOM, AllocationPlatform) {
CHECK(!platform.oom_callback_called);
CcTest::isolate()->SetFatalErrorHandler(OnNewArrayOOM);
// On failure, this won't return, since a NewArray failure is fatal.
@@ -157,8 +148,7 @@ TEST(NewArrayOOM) {
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
-TEST(AlignedAllocOOM) {
- AllocationPlatform platform;
+TEST_WITH_PLATFORM(AlignedAllocOOM, AllocationPlatform) {
CHECK(!platform.oom_callback_called);
CcTest::isolate()->SetFatalErrorHandler(OnAlignedAllocOOM);
// On failure, this won't return, since an AlignedAlloc failure is fatal.
@@ -169,8 +159,7 @@ TEST(AlignedAllocOOM) {
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
-TEST(AllocVirtualMemoryOOM) {
- AllocationPlatform platform;
+TEST_WITH_PLATFORM(AllocVirtualMemoryOOM, AllocationPlatform) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result(v8::internal::GetPlatformPageAllocator(),
GetHugeMemoryAmount(), nullptr);
@@ -178,8 +167,7 @@ TEST(AllocVirtualMemoryOOM) {
CHECK_IMPLIES(!result.IsReserved(), platform.oom_callback_called);
}
-TEST(AlignedAllocVirtualMemoryOOM) {
- AllocationPlatform platform;
+TEST_WITH_PLATFORM(AlignedAllocVirtualMemoryOOM, AllocationPlatform) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result(v8::internal::GetPlatformPageAllocator(),
GetHugeMemoryAmount(), nullptr,
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index 7c1799da6a..8ed8dd6afe 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -814,3 +814,64 @@ TEST(BindFunctionTemplateSetNativeDataProperty) {
CHECK(try_catch.HasCaught());
}
}
+
+namespace {
+v8::MaybeLocal<v8::Context> TestHostCreateShadowRealmContextCallback(
+ v8::Local<v8::Context> initiator_context) {
+ v8::Isolate* isolate = initiator_context->GetIsolate();
+ v8::Local<v8::FunctionTemplate> global_constructor =
+ v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> global_template =
+ global_constructor->InstanceTemplate();
+
+ // Check that getter is called on Function.prototype.bind.
+ global_template->SetNativeDataProperty(
+ v8_str("func1"), [](v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(isolate);
+ templ->SetNativeDataProperty(v8_str("name"), FunctionNativeGetter);
+ info.GetReturnValue().Set(
+ templ->GetFunction(isolate->GetCurrentContext()).ToLocalChecked());
+ });
+
+ // Check that getter is called on Function.prototype.bind.
+ global_template->SetNativeDataProperty(
+ v8_str("func2"), [](v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(isolate);
+ templ->SetNativeDataProperty(v8_str("length"), FunctionNativeGetter);
+ info.GetReturnValue().Set(
+ templ->GetFunction(isolate->GetCurrentContext()).ToLocalChecked());
+ });
+
+ return v8::Context::New(isolate, nullptr, global_template);
+}
+} // namespace
+
+TEST(WrapFunctionTemplateSetNativeDataProperty) {
+ i::FLAG_harmony_shadow_realm = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ isolate->SetHostCreateShadowRealmContextCallback(
+ TestHostCreateShadowRealmContextCallback);
+
+ v8::HandleScope scope(isolate);
+ // Check that getter is called on WrappedFunctionCreate.
+ {
+ v8::TryCatch try_catch(isolate);
+ CHECK(
+ CompileRun("new ShadowRealm().evaluate('globalThis.func1')").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+ // Check that getter is called on WrappedFunctionCreate.
+ {
+ v8::TryCatch try_catch(isolate);
+ CHECK(
+ CompileRun("new ShadowRealm().evaluate('globalThis.func2')").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 3c57428651..92f549ad96 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -33,6 +33,8 @@
#include <memory>
#include <string>
+#include "test/cctest/cctest.h"
+
#if V8_OS_POSIX
#include <unistd.h>
#endif
@@ -7895,6 +7897,7 @@ static void ResetUseValueAndSetFlag(
}
void v8::internal::heap::HeapTester::ResetWeakHandle(bool global_gc) {
+ if (FLAG_stress_incremental_marking) return;
using v8::Context;
using v8::Local;
using v8::Object;
@@ -23102,14 +23105,8 @@ TEST(ThrowOnJavascriptExecution) {
namespace {
-class MockPlatform : public TestPlatform {
+class MockPlatform final : public TestPlatform {
public:
- MockPlatform() : old_platform_(i::V8::GetCurrentPlatform()) {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
- ~MockPlatform() override { i::V8::SetPlatformForTesting(old_platform_); }
-
bool dump_without_crashing_called() const {
return dump_without_crashing_called_;
}
@@ -23117,15 +23114,12 @@ class MockPlatform : public TestPlatform {
void DumpWithoutCrashing() override { dump_without_crashing_called_ = true; }
private:
- v8::Platform* old_platform_;
bool dump_without_crashing_called_ = false;
};
} // namespace
-TEST(DumpOnJavascriptExecution) {
- MockPlatform platform;
-
+TEST_WITH_PLATFORM(DumpOnJavascriptExecution, MockPlatform) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -29498,38 +29492,6 @@ TEST(CodeLikeFunction) {
ExpectInt32("new Function(new CodeLike())()", 7);
}
-UNINITIALIZED_TEST(SingleThreadedDefaultPlatform) {
- v8::V8::SetFlagsFromString("--single-threaded");
- auto old_platform = i::V8::GetCurrentPlatform();
- std::unique_ptr<v8::Platform> new_platform(
- v8::platform::NewSingleThreadedDefaultPlatform());
- i::V8::SetPlatformForTesting(new_platform.get());
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- isolate->Enter();
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- {
- i::HandleScope scope(i_isolate);
- v8::Local<Context> env = Context::New(isolate);
- env->Enter();
-
- CompileRunChecked(isolate,
- "function f() {"
- " for (let i = 0; i < 10; i++)"
- " (new Array(10)).fill(0);"
- " return 0;"
- "}"
- "f();");
- env->Exit();
- }
- CcTest::CollectGarbage(i::NEW_SPACE, i_isolate);
- CcTest::CollectAllAvailableGarbage(i_isolate);
- isolate->Exit();
- isolate->Dispose();
- i::V8::SetPlatformForTesting(old_platform);
-}
-
THREADED_TEST(MicrotaskQueueOfContext) {
auto microtask_queue = v8::MicrotaskQueue::New(CcTest::isolate());
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index fea98df487..897a4a70f9 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -12381,7 +12381,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
}
break;
case PushPopRegList:
- __ PushSizeRegList<TurboAssembler::kDontStoreLR>(list, reg_size);
+ __ PushSizeRegList(list, reg_size);
break;
}
@@ -12406,7 +12406,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
}
break;
case PushPopRegList:
- __ PopSizeRegList<TurboAssembler::kDontLoadLR>(list, reg_size);
+ __ PopSizeRegList(list, reg_size);
break;
}
}
@@ -12740,8 +12740,8 @@ TEST(push_pop) {
__ PopXRegList({});
// Don't push/pop x18 (platform register) or lr
RegList all_regs = RegList::FromBits(0xFFFFFFFF) - RegList{x18, lr};
- __ PushXRegList<TurboAssembler::kDontStoreLR>(all_regs);
- __ PopXRegList<TurboAssembler::kDontLoadLR>(all_regs);
+ __ PushXRegList(all_regs);
+ __ PopXRegList(all_regs);
__ Drop(12);
END();
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index f4ca7e83cf..131a52a396 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -1522,8 +1522,6 @@ TEST(Regress621926) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
v8::internal::byte buffer[256];
@@ -1539,9 +1537,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 11ef08fb89..4d823e4f0a 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -2827,6 +2827,53 @@ TEST(AssemblerX64Integer256bit) {
CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
}
+TEST(AssemblerX64CmpOperations256bit) {
+ if (!CpuFeatures::IsSupported(AVX)) return;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ auto buffer = AllocateAssemblerBuffer();
+ Isolate* isolate = CcTest::i_isolate();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
+ CpuFeatureScope fscope(&masm, AVX);
+
+ __ vcmpeqps(ymm1, ymm2, ymm4);
+ __ vcmpltpd(ymm4, ymm7, Operand(rcx, rdx, times_4, 10000));
+ __ vcmpleps(ymm9, ymm8, Operand(r8, r11, times_8, 10000));
+ __ vcmpunordpd(ymm3, ymm7, ymm8);
+ __ vcmpneqps(ymm3, ymm5, ymm9);
+ __ vcmpnltpd(ymm10, ymm12, Operand(r12, r11, times_4, 10000));
+ __ vcmpnleps(ymm9, ymm11, Operand(r10, r9, times_8, 10000));
+ __ vcmpgepd(ymm13, ymm3, ymm12);
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+#ifdef OBJECT_PRINT
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ StdoutStream os;
+ code->Print(os);
+#endif
+
+ byte expected[] = {
+ // vcmpeqps ymm1, ymm2, ymm4
+ 0xC5, 0xEC, 0xC2, 0xCC, 0x00,
+ // vcmpltpd ymm4, ymm7, YMMWORD PTR [rcx+rdx*4+0x2710]
+ 0xC5, 0xC5, 0xC2, 0xA4, 0x91, 0x10, 0x27, 0x00, 0x00, 0x01,
+ // vcmpleps ymm9, ymm8, YMMWORD PTR [r8+r11*8+0x2710]
+ 0xC4, 0x01, 0x3C, 0xC2, 0x8C, 0xD8, 0x10, 0x27, 0x00, 0x00, 0x02,
+ // vcmpunordpd ymm3, ymm7, ymm8
+ 0xC4, 0xC1, 0x45, 0xC2, 0xD8, 0x03,
+ // vcmpneqps ymm3, ymm5, ymm9
+ 0xC4, 0xC1, 0x54, 0xC2, 0xD9, 0x04,
+ // vcmpnltpd ymm10, ymm12, YMMWORD PTR [r12+r11*4+0x2710]
+ 0xC4, 0x01, 0x1D, 0xC2, 0x94, 0x9C, 0x10, 0x27, 0x00, 0x00, 0x05,
+ // vcmpnleps ymm9, ymm11, YMMWORD PTR [r10+r9*8+0x2710]
+ 0xC4, 0x01, 0x24, 0xC2, 0x8C, 0xCA, 0x10, 0x27, 0x00, 0x00, 0x06,
+ // vcmpgepd ymm13, ymm3, ymm12
+ 0xC4, 0x41, 0x65, 0xC2, 0xEC, 0x0D};
+ CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
+}
+
TEST(CpuFeatures_ProbeImpl) {
// Support for a newer extension implies support for the older extensions.
CHECK_IMPLIES(CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(AVX));
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 28600996f5..4c70451d59 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -4565,6 +4565,48 @@ TEST(DebugEvaluateNoSideEffect) {
DisableDebugger(env->GetIsolate());
}
+TEST(DebugEvaluateGlobalSharedCrossOrigin) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::TryCatch tryCatch(isolate);
+ tryCatch.SetCaptureMessage(true);
+ v8::MaybeLocal<v8::Value> result =
+ v8::debug::EvaluateGlobal(isolate, v8_str(isolate, "throw new Error()"),
+ v8::debug::EvaluateGlobalMode::kDefault);
+ CHECK(result.IsEmpty());
+ CHECK(tryCatch.HasCaught());
+ CHECK(tryCatch.Message()->IsSharedCrossOrigin());
+}
+
+TEST(DebugEvaluateLocalSharedCrossOrigin) {
+ struct BreakProgramDelegate : public v8::debug::DebugDelegate {
+ void BreakProgramRequested(v8::Local<v8::Context> context,
+ std::vector<v8::debug::BreakpointId> const&,
+ v8::debug::BreakReasons) final {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ tryCatch.SetCaptureMessage(true);
+ std::unique_ptr<v8::debug::StackTraceIterator> it =
+ v8::debug::StackTraceIterator::Create(isolate);
+ v8::MaybeLocal<v8::Value> result =
+ it->Evaluate(v8_str(isolate, "throw new Error()"), false);
+ CHECK(result.IsEmpty());
+ CHECK(tryCatch.HasCaught());
+ CHECK(tryCatch.Message()->IsSharedCrossOrigin());
+ }
+ } delegate;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::debug::SetDebugDelegate(isolate, &delegate);
+ v8::Script::Compile(env.local(), v8_str(isolate, "debugger;"))
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
+ v8::debug::SetDebugDelegate(isolate, nullptr);
+}
+
namespace {
i::MaybeHandle<i::Script> FindScript(
i::Isolate* isolate, const std::vector<i::Handle<i::Script>>& scripts,
@@ -5722,7 +5764,7 @@ TEST(AwaitCleansUpGlobalPromiseStack) {
"})();\n");
CompileRun(source);
- CHECK_EQ(CcTest::i_isolate()->thread_local_top()->promise_on_stack_, nullptr);
+ CHECK(CcTest::i_isolate()->IsPromiseStackEmpty());
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 2b2aa963ee..45c7aebe5a 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -780,6 +780,8 @@ UNINITIALIZED_TEST(DisasmX64CheckOutputSSE) {
COMPARE("440f178c8b10270000 movhps [rbx+rcx*4+0x2710],xmm9",
movhps(Operand(rbx, rcx, times_4, 10000), xmm9));
COMPARE("410fc6c100 shufps xmm0, xmm9, 0", shufps(xmm0, xmm9, 0x0));
+ COMPARE("f30fc2c100 cmpeqss xmm0,xmm1", cmpeqss(xmm0, xmm1));
+ COMPARE("f20fc2c100 cmpeqsd xmm0,xmm1", cmpeqsd(xmm0, xmm1));
COMPARE("0f2ec1 ucomiss xmm0,xmm1", ucomiss(xmm0, xmm1));
COMPARE("0f2e848b10270000 ucomiss xmm0,[rbx+rcx*4+0x2710]",
ucomiss(xmm0, Operand(rbx, rcx, times_4, 10000)));
@@ -1027,8 +1029,12 @@ UNINITIALIZED_TEST(DisasmX64CheckOutputSSE4_1) {
roundpd(xmm8, xmm3, kRoundToNearest));
COMPARE("66440f3a0ac309 roundss xmm8,xmm3,0x1",
roundss(xmm8, xmm3, kRoundDown));
+ COMPARE("66440f3a0a420b09 roundss xmm8,[rdx+0xb],0x1",
+ roundss(xmm8, Operand(rdx, 11), kRoundDown));
COMPARE("66440f3a0bc309 roundsd xmm8,xmm3,0x1",
roundsd(xmm8, xmm3, kRoundDown));
+ COMPARE("66440f3a0b420b09 roundsd xmm8,[rdx+0xb],0x1",
+ roundsd(xmm8, Operand(rdx, 11), kRoundDown));
#define COMPARE_SSE4_1_INSTR(instruction, _, __, ___, ____) \
exp = #instruction " xmm5,xmm1"; \
@@ -1167,6 +1173,10 @@ UNINITIALIZED_TEST(DisasmX64CheckOutputAVX) {
vmovss(xmm9, Operand(r11, rcx, times_8, -10000)));
COMPARE("c4a17a118c8b10270000 vmovss [rbx+r9*4+0x2710],xmm1",
vmovss(Operand(rbx, r9, times_4, 10000), xmm1));
+ COMPARE("c532c2c900 vcmpss xmm9,xmm9,xmm1, (eq)",
+ vcmpeqss(xmm9, xmm1));
+ COMPARE("c533c2c900 vcmpsd xmm9,xmm9,xmm1, (eq)",
+ vcmpeqsd(xmm9, xmm1));
COMPARE("c5782ec9 vucomiss xmm9,xmm1", vucomiss(xmm9, xmm1));
COMPARE("c5782e8453e52a0000 vucomiss xmm8,[rbx+rdx*2+0x2ae5]",
vucomiss(xmm8, Operand(rbx, rdx, times_2, 10981)));
@@ -1415,9 +1425,26 @@ UNINITIALIZED_TEST(DisasmX64YMMRegister) {
COMPARE("c5ff12a48b10270000 vmovddup ymm4,[rbx+rcx*4+0x2710]",
vmovddup(ymm4, Operand(rbx, rcx, times_4, 10000)));
COMPARE("c5fe16ca vmovshdup ymm1,ymm2", vmovshdup(ymm1, ymm2));
-
COMPARE("c5f4c6da73 vshufps ymm3,ymm1,ymm2,0x73",
vshufps(ymm3, ymm1, ymm2, 115));
+
+ // vcmp
+ COMPARE("c5dcc2e900 vcmpps ymm5,ymm4,ymm1, (eq)",
+ vcmpeqps(ymm5, ymm4, ymm1));
+ COMPARE("c5ddc2ac8b1027000001 vcmppd ymm5,ymm4,[rbx+rcx*4+0x2710], (lt)",
+ vcmpltpd(ymm5, ymm4, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("c5ddc2e902 vcmppd ymm5,ymm4,ymm1, (le)",
+ vcmplepd(ymm5, ymm4, ymm1));
+ COMPARE("c5dcc2ac8b1027000003 vcmpps ymm5,ymm4,[rbx+rcx*4+0x2710], (unord)",
+ vcmpunordps(ymm5, ymm4, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("c5dcc2e904 vcmpps ymm5,ymm4,ymm1, (neq)",
+ vcmpneqps(ymm5, ymm4, ymm1));
+ COMPARE("c5ddc2ac8b1027000005 vcmppd ymm5,ymm4,[rbx+rcx*4+0x2710], (nlt)",
+ vcmpnltpd(ymm5, ymm4, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("c5ddc2ac8b1027000006 vcmppd ymm5,ymm4,[rbx+rcx*4+0x2710], (nle)",
+ vcmpnlepd(ymm5, ymm4, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("c5dcc2e90d vcmpps ymm5,ymm4,ymm1, (ge)",
+ vcmpgeps(ymm5, ymm4, ymm1));
}
if (!CpuFeatures::IsSupported(AVX2)) return;
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 1bee88aa4d..90e3341806 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -70,7 +70,7 @@ static void CheckMigrationTarget(Isolate* isolate, Map old_map, Map new_map) {
if (target.is_null()) return;
CHECK_EQ(new_map, target);
CHECK_EQ(MapUpdater::TryUpdateNoLock(isolate, old_map,
- ConcurrencyMode::kNotConcurrent),
+ ConcurrencyMode::kSynchronous),
target);
}
@@ -1836,7 +1836,7 @@ static void TestReconfigureElementsKind_GeneralizeFieldInPlace(
MapHandles map_list;
map_list.push_back(updated_map);
Map transitioned_map = map2->FindElementsKindTransitionedMap(
- isolate, map_list, ConcurrencyMode::kNotConcurrent);
+ isolate, map_list, ConcurrencyMode::kSynchronous);
CHECK_EQ(*updated_map, transitioned_map);
}
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index d39629ecec..7d1fd5de99 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -465,7 +465,7 @@ TEST(HeapSnapshotCodeObjects) {
for (int i = 0, count = compiled_sfi->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = compiled_sfi->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::kHidden &&
+ if (node->GetType() == v8::HeapGraphNode::kCode &&
!strcmp("system / ScopeInfo", GetName(node))) {
if (HasString(env->GetIsolate(), node, "x")) {
compiled_references_x = true;
@@ -476,7 +476,7 @@ TEST(HeapSnapshotCodeObjects) {
for (int i = 0, count = lazy_sfi->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = lazy_sfi->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::kHidden &&
+ if (node->GetType() == v8::HeapGraphNode::kCode &&
!strcmp("system / ScopeInfo", GetName(node))) {
if (HasString(env->GetIsolate(), node, "x")) {
lazy_references_x = true;
@@ -2680,7 +2680,7 @@ TEST(AllocationSitesAreVisible) {
CHECK(feedback_cell);
const v8::HeapGraphNode* vector = GetProperty(
env->GetIsolate(), feedback_cell, v8::HeapGraphEdge::kInternal, "value");
- CHECK_EQ(v8::HeapGraphNode::kHidden, vector->GetType());
+ CHECK_EQ(v8::HeapGraphNode::kCode, vector->GetType());
CHECK_EQ(4, vector->GetChildrenCount());
// The last value in the feedback vector should be the boilerplate,
@@ -2698,7 +2698,7 @@ TEST(AllocationSitesAreVisible) {
GetProperty(env->GetIsolate(), transition_info,
v8::HeapGraphEdge::kInternal, "elements");
CHECK(elements);
- CHECK_EQ(v8::HeapGraphNode::kArray, elements->GetType());
+ CHECK_EQ(v8::HeapGraphNode::kCode, elements->GetType());
CHECK_EQ(v8::internal::FixedArray::SizeFor(3),
static_cast<int>(elements->GetShallowSize()));
@@ -4139,9 +4139,9 @@ TEST(WeakReference) {
// to the FOR_TESTING code kind).
fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(ToCodeT(*code)),
v8::kReleaseStore);
- fv->set_flags(i::FeedbackVector::MaybeHasOptimizedCodeBit::encode(true) |
- i::FeedbackVector::OptimizationMarkerBits::encode(
- i::OptimizationMarker::kNone));
+ fv->set_flags(
+ i::FeedbackVector::MaybeHasOptimizedCodeBit::encode(true) |
+ i::FeedbackVector::TieringStateBits::encode(i::TieringState::kNone));
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
diff --git a/deps/v8/test/cctest/test-js-to-wasm.cc b/deps/v8/test/cctest/test-js-to-wasm.cc
index ecb481ee23..ff18adf51c 100644
--- a/deps/v8/test/cctest/test-js-to-wasm.cc
+++ b/deps/v8/test/cctest/test-js-to-wasm.cc
@@ -271,6 +271,7 @@ class FastJSWasmCallTester {
i::FLAG_allow_natives_syntax = true;
i::FLAG_turbo_inline_js_wasm_calls = true;
i::FLAG_stress_background_compile = false;
+ i::FLAG_concurrent_osr = false; // Seems to mess with %ObserveNode.
}
void DeclareCallback(const char* name, FunctionSig* signature,
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index 8974bdf6db..a2c3a3e504 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -925,7 +925,8 @@ TEST(JSWeakRefScavengedInWorklist) {
}
TEST(JSWeakRefTenuredInWorklist) {
- if (!FLAG_incremental_marking || FLAG_single_generation) {
+ if (!FLAG_incremental_marking || FLAG_single_generation ||
+ FLAG_separate_gc_phases) {
return;
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index e57c5a6198..0abce98009 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -513,7 +513,6 @@ UNINITIALIZED_TEST(LogAll) {
SETUP_FLAGS();
i::FLAG_log_all = true;
i::FLAG_log_deopt = true;
- i::FLAG_log_api = true;
i::FLAG_turbo_inlining = false;
i::FLAG_log_internal_timer_events = true;
i::FLAG_allow_natives_syntax = true;
@@ -551,11 +550,9 @@ UNINITIALIZED_TEST(LogAll) {
logger.StopLogging();
// We should find at least one code-creation even for testAddFn();
- CHECK(logger.ContainsLine({"api,v8::Context::New"}));
CHECK(logger.ContainsLine({"timer-event-start", "V8.CompileCode"}));
CHECK(logger.ContainsLine({"timer-event-end", "V8.CompileCode"}));
CHECK(logger.ContainsLine({"code-creation,Script", ":1:1"}));
- CHECK(logger.ContainsLine({"api,v8::Script::Run"}));
CHECK(logger.ContainsLine({"code-creation,LazyCompile,", "testAddFn"}));
if (i::FLAG_opt && !i::FLAG_always_opt) {
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 251cd5f705..23b5b2d4e8 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -311,8 +311,6 @@ TEST(ReplaceLane) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer();
@@ -328,9 +326,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm64.cc b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
index d96fc3551f..eec16c0529 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
@@ -94,8 +94,6 @@ TEST(EmbeddedObj) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer();
@@ -117,9 +115,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
&before_exit);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-loong64.cc b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
index 63730abbc2..eac64947e9 100644
--- a/deps/v8/test/cctest/test-macro-assembler-loong64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
@@ -2879,8 +2879,6 @@ TEST(Popcnt) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer();
@@ -2895,9 +2893,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 9e5fdabd15..c4926af159 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -1337,8 +1337,6 @@ TEST(macro_float_minmax_f64) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer();
@@ -1353,9 +1351,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 09664f0170..2d87fb7750 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -1690,8 +1690,6 @@ TEST(macro_float_minmax_f64) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer();
@@ -1706,9 +1704,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
index a98c10933e..0bf9b5b363 100644
--- a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
@@ -1519,8 +1519,6 @@ TEST(Move) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer();
@@ -1541,9 +1539,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
&before_exit);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 7e1388bd52..1bc04263e8 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -1050,8 +1050,6 @@ TEST(AreAliased) {
}
TEST(DeoptExitSizeIsFixed) {
- CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
-
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer();
@@ -1067,9 +1065,8 @@ TEST(DeoptExitSizeIsFixed) {
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
nullptr);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index de1c42cb16..204cba7e4e 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -536,6 +536,32 @@ TEST(SampleIds_StopProfilingByProfilerId) {
CHECK_NE(profile, nullptr);
}
+TEST(CpuProfilesCollectionDuplicateId) {
+ CpuProfilesCollection collection(CcTest::i_isolate());
+ CpuProfiler profiler(CcTest::i_isolate());
+ collection.set_cpu_profiler(&profiler);
+
+ auto profile_result = collection.StartProfiling();
+ CHECK_EQ(CpuProfilingStatus::kStarted, profile_result.status);
+ CHECK_EQ(CpuProfilingStatus::kAlreadyStarted,
+ collection.StartProfilingForTesting(profile_result.id).status);
+
+ collection.StopProfiling(profile_result.id);
+}
+
+TEST(CpuProfilesCollectionDuplicateTitle) {
+ CpuProfilesCollection collection(CcTest::i_isolate());
+ CpuProfiler profiler(CcTest::i_isolate());
+ collection.set_cpu_profiler(&profiler);
+
+ auto profile_result = collection.StartProfiling("duplicate");
+ CHECK_EQ(CpuProfilingStatus::kStarted, profile_result.status);
+ CHECK_EQ(CpuProfilingStatus::kAlreadyStarted,
+ collection.StartProfiling("duplicate").status);
+
+ collection.StopProfiling(profile_result.id);
+}
+
namespace {
class DiscardedSamplesDelegateImpl : public v8::DiscardedSamplesDelegate {
public:
@@ -543,17 +569,9 @@ class DiscardedSamplesDelegateImpl : public v8::DiscardedSamplesDelegate {
void Notify() override { CHECK_GT(GetId(), 0); }
};
-class MockPlatform : public TestPlatform {
+class MockPlatform final : public TestPlatform {
public:
- MockPlatform()
- : old_platform_(i::V8::GetCurrentPlatform()),
- mock_task_runner_(new MockTaskRunner()) {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
-
- // When done, explicitly revert to old_platform_.
- ~MockPlatform() override { i::V8::SetPlatformForTesting(old_platform_); }
+ MockPlatform() : mock_task_runner_(new MockTaskRunner()) {}
std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
v8::Isolate*) override {
@@ -581,6 +599,8 @@ class MockPlatform : public TestPlatform {
}
bool IdleTasksEnabled() override { return false; }
+ bool NonNestableTasksEnabled() const override { return true; }
+ bool NonNestableDelayedTasksEnabled() const override { return true; }
int posted_count() { return posted_count_; }
@@ -590,17 +610,15 @@ class MockPlatform : public TestPlatform {
std::unique_ptr<Task> task_;
};
- v8::Platform* old_platform_;
std::shared_ptr<MockTaskRunner> mock_task_runner_;
};
} // namespace
-TEST(MaxSamplesCallback) {
+TEST_WITH_PLATFORM(MaxSamplesCallback, MockPlatform) {
i::Isolate* isolate = CcTest::i_isolate();
CpuProfilesCollection profiles(isolate);
CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
- MockPlatform* mock_platform = new MockPlatform();
std::unique_ptr<DiscardedSamplesDelegateImpl> impl =
std::make_unique<DiscardedSamplesDelegateImpl>(
DiscardedSamplesDelegateImpl());
@@ -624,7 +642,7 @@ TEST(MaxSamplesCallback) {
profiles.AddPathToCurrentProfiles(
sample1.timestamp, symbolized.stack_trace, symbolized.src_line, true,
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
- CHECK_EQ(0, mock_platform->posted_count());
+ CHECK_EQ(0, platform.posted_count());
TickSample sample2;
sample2.timestamp = v8::base::TimeTicks::Now();
sample2.pc = ToPointer(0x1925);
@@ -634,7 +652,7 @@ TEST(MaxSamplesCallback) {
profiles.AddPathToCurrentProfiles(
sample2.timestamp, symbolized.stack_trace, symbolized.src_line, true,
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
- CHECK_EQ(1, mock_platform->posted_count());
+ CHECK_EQ(1, platform.posted_count());
TickSample sample3;
sample3.timestamp = v8::base::TimeTicks::Now();
sample3.pc = ToPointer(0x1510);
@@ -643,11 +661,10 @@ TEST(MaxSamplesCallback) {
profiles.AddPathToCurrentProfiles(
sample3.timestamp, symbolized.stack_trace, symbolized.src_line, true,
base::TimeDelta(), StateTag::JS, EmbedderStateTag::EMPTY);
- CHECK_EQ(1, mock_platform->posted_count());
+ CHECK_EQ(1, platform.posted_count());
// Teardown
profiles.StopProfiling(id);
- delete mock_platform;
}
TEST(NoSamples) {
@@ -758,7 +775,6 @@ TEST(Issue51919) {
i::DeleteArray(titles[i]);
}
-
static const v8::CpuProfileNode* PickChild(const v8::CpuProfileNode* parent,
const char* name) {
for (int i = 0; i < parent->GetChildrenCount(); ++i) {
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index 33f0873530..21bc365ab7 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -56,6 +56,7 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
V(retaining_path_targets) \
V(serialized_global_proxy_sizes) \
V(serialized_objects) \
+ IF_WASM(V, wasm_canonical_rtts) \
V(weak_refs_keep_during_job)
#define TEST_CAN_BE_READ_ONLY(name) \
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 9c19b399df..8c4d6b4722 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -2670,7 +2670,7 @@ TEST(CodeSerializerAfterExecute) {
Handle<SharedFunctionInfo> sfi = v8::Utils::OpenHandle(*script);
CHECK(sfi->HasBytecodeArray());
BytecodeArray bytecode = sfi->GetBytecodeArray(i_isolate2);
- CHECK_EQ(bytecode.osr_loop_nesting_level(), 0);
+ CHECK_EQ(bytecode.osr_urgency(), 0);
{
DisallowCompilation no_compile_expected(i_isolate2);
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 2e2dd3d2cb..1f25a9a212 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -6,6 +6,7 @@
#include <string.h>
#include "include/v8-function.h"
+#include "include/v8-platform.h"
#include "src/init/v8.h"
#include "src/tracing/trace-event.h"
#include "test/cctest/cctest.h"
@@ -86,12 +87,6 @@ class MockTracingController : public v8::TracingController {
class MockTracingPlatform : public TestPlatform {
public:
- MockTracingPlatform() {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
- ~MockTracingPlatform() override = default;
-
v8::TracingController* GetTracingController() override {
return &tracing_controller_;
}
@@ -110,18 +105,14 @@ class MockTracingPlatform : public TestPlatform {
} // namespace
-TEST(TraceEventDisabledCategory) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(TraceEventDisabledCategory, MockTracingPlatform) {
// Disabled category, will not add events.
TRACE_EVENT_BEGIN0("cat", "e1");
TRACE_EVENT_END0("cat", "e1");
CHECK_EQ(0, platform.NumberOfTraceObjects());
}
-TEST(TraceEventNoArgs) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(TraceEventNoArgs, MockTracingPlatform) {
// Enabled category will add 2 events.
TRACE_EVENT_BEGIN0("v8-cat", "e1");
TRACE_EVENT_END0("v8-cat", "e1");
@@ -136,9 +127,7 @@ TEST(TraceEventNoArgs) {
CHECK_EQ(0, platform.GetTraceObject(1)->num_args);
}
-TEST(TraceEventWithOneArg) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(TraceEventWithOneArg, MockTracingPlatform) {
TRACE_EVENT_BEGIN1("v8-cat", "e1", "arg1", 42);
TRACE_EVENT_END1("v8-cat", "e1", "arg1", 42);
TRACE_EVENT_BEGIN1("v8-cat", "e2", "arg1", "abc");
@@ -152,9 +141,7 @@ TEST(TraceEventWithOneArg) {
CHECK_EQ(1, platform.GetTraceObject(3)->num_args);
}
-TEST(TraceEventWithTwoArgs) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(TraceEventWithTwoArgs, MockTracingPlatform) {
TRACE_EVENT_BEGIN2("v8-cat", "e1", "arg1", 42, "arg2", "abc");
TRACE_EVENT_END2("v8-cat", "e1", "arg1", 42, "arg2", "abc");
TRACE_EVENT_BEGIN2("v8-cat", "e2", "arg1", "abc", "arg2", 43);
@@ -168,9 +155,7 @@ TEST(TraceEventWithTwoArgs) {
CHECK_EQ(2, platform.GetTraceObject(3)->num_args);
}
-TEST(ScopedTraceEvent) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(ScopedTraceEvent, MockTracingPlatform) {
{ TRACE_EVENT0("v8-cat", "e"); }
CHECK_EQ(1, platform.NumberOfTraceObjects());
@@ -187,9 +172,7 @@ TEST(ScopedTraceEvent) {
CHECK_EQ(2, platform.GetTraceObject(2)->num_args);
}
-TEST(TestEventWithFlow) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(TestEventWithFlow, MockTracingPlatform) {
static uint64_t bind_id = 21;
{
TRACE_EVENT_WITH_FLOW0("v8-cat", "f1", bind_id, TRACE_EVENT_FLAG_FLOW_OUT);
@@ -211,9 +194,7 @@ TEST(TestEventWithFlow) {
CHECK_EQ(TRACE_EVENT_FLAG_FLOW_IN, platform.GetTraceObject(2)->flags);
}
-TEST(TestEventWithId) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(TestEventWithId, MockTracingPlatform) {
static uint64_t event_id = 21;
TRACE_EVENT_ASYNC_BEGIN0("v8-cat", "a1", event_id);
TRACE_EVENT_ASYNC_END0("v8-cat", "a1", event_id);
@@ -225,9 +206,7 @@ TEST(TestEventWithId) {
CHECK_EQ(event_id, platform.GetTraceObject(1)->id);
}
-TEST(TestEventWithTimestamp) {
- MockTracingPlatform platform;
-
+TEST_WITH_PLATFORM(TestEventWithTimestamp, MockTracingPlatform) {
TRACE_EVENT_INSTANT_WITH_TIMESTAMP0("v8-cat", "0arg",
TRACE_EVENT_SCOPE_GLOBAL, 1729);
TRACE_EVENT_INSTANT_WITH_TIMESTAMP1("v8-cat", "1arg",
@@ -254,9 +233,8 @@ TEST(TestEventWithTimestamp) {
CHECK_EQ(32832, platform.GetTraceObject(4)->timestamp);
}
-TEST(BuiltinsIsTraceCategoryEnabled) {
+TEST_WITH_PLATFORM(BuiltinsIsTraceCategoryEnabled, MockTracingPlatform) {
CcTest::InitializeVM();
- MockTracingPlatform platform;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -302,9 +280,8 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
}
}
-TEST(BuiltinsTrace) {
+TEST_WITH_PLATFORM(BuiltinsTrace, MockTracingPlatform) {
CcTest::InitializeVM();
- MockTracingPlatform platform;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
diff --git a/deps/v8/test/cctest/test-unwinder-code-pages.cc b/deps/v8/test/cctest/test-unwinder-code-pages.cc
index 182a13bba0..595cda98d7 100644
--- a/deps/v8/test/cctest/test-unwinder-code-pages.cc
+++ b/deps/v8/test/cctest/test-unwinder-code-pages.cc
@@ -126,7 +126,7 @@ void CheckCalleeSavedRegisters(const RegisterState& register_state) {
static const void* fake_stack_base = nullptr;
TEST(Unwind_BadState_Fail_CodePagesAPI) {
- JSEntryStubs entry_stubs; // Fields are intialized to nullptr.
+ JSEntryStubs entry_stubs; // Fields are initialized to nullptr.
RegisterState register_state;
size_t pages_length = 0;
MemoryRange* code_pages = nullptr;
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 9866e22bc6..5ecdab31aa 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -191,7 +191,7 @@ TEST(WeakMapPromotionMarkCompact) {
}
TEST(WeakMapScavenge) {
- if (i::FLAG_single_generation) return;
+ if (i::FLAG_single_generation || i::FLAG_stress_incremental_marking) return;
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index 04c73c55c2..4835f56278 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -5,6 +5,7 @@
#include <stdint.h>
#include "src/base/vector.h"
+#include "src/codegen/signature.h"
#include "src/utils/utils.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/struct-types.h"
@@ -39,6 +40,11 @@ class WasmGCTester {
execution_tier == TestExecutionTier::kLiftoff),
flag_liftoff_only(&v8::internal::FLAG_liftoff_only,
execution_tier == TestExecutionTier::kLiftoff),
+ flag_wasm_dynamic_tiering(&v8::internal::FLAG_wasm_dynamic_tiering,
+ v8::internal::FLAG_liftoff_only != true),
+ // Test both setups with canonicalization and without.
+ flag_canonicalization(&v8::internal::FLAG_wasm_type_canonicalization,
+ execution_tier == TestExecutionTier::kTurbofan),
flag_tierup(&v8::internal::FLAG_wasm_tier_up, false),
zone_(&allocator, ZONE_NAME),
builder_(&zone_),
@@ -169,6 +175,19 @@ class WasmGCTester {
CheckHasThrownImpl(function_index, sig, &packer, expected);
}
+ bool HasSimdSupport(TestExecutionTier tier) const {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ // Liftoff does not have a fallback for executing SIMD instructions if
+ // SSE4_1 is not available.
+ if (tier == TestExecutionTier::kLiftoff &&
+ !CpuFeatures::IsSupported(SSE4_1)) {
+ return false;
+ }
+#endif
+ USE(tier);
+ return true;
+ }
+
Handle<WasmInstanceObject> instance() { return instance_; }
Isolate* isolate() { return isolate_; }
WasmModuleBuilder* builder() { return &builder_; }
@@ -181,6 +200,8 @@ class WasmGCTester {
const FlagScope<bool> flag_typedfuns;
const FlagScope<bool> flag_liftoff;
const FlagScope<bool> flag_liftoff_only;
+ const FlagScope<bool> flag_wasm_dynamic_tiering;
+ const FlagScope<bool> flag_canonicalization;
const FlagScope<bool> flag_tierup;
byte DefineFunctionImpl(WasmFunctionBuilder* fun,
@@ -474,6 +495,28 @@ WASM_COMPILED_EXEC_TEST(RefCast) {
{F(kWasmI32, true), F(kWasmF32, false)}, supertype_index);
const byte subtype2_index = tester.DefineStruct(
{F(kWasmI32, true), F(kWasmI64, false)}, supertype_index);
+ auto super_sig = FixedSizeSignature<ValueType>::Params(
+ ValueType::Ref(subtype1_index, kNullable))
+ .Returns(ValueType::Ref(supertype_index, kNullable));
+ auto sub_sig1 = FixedSizeSignature<ValueType>::Params(
+ ValueType::Ref(supertype_index, kNullable))
+ .Returns(ValueType::Ref(subtype1_index, kNullable));
+ auto sub_sig2 = FixedSizeSignature<ValueType>::Params(
+ ValueType::Ref(supertype_index, kNullable))
+ .Returns(ValueType::Ref(subtype2_index, kNullable));
+ const byte function_type_index = tester.DefineSignature(&super_sig);
+ const byte function_subtype1_index =
+ tester.DefineSignature(&sub_sig1, function_type_index);
+ const byte function_subtype2_index =
+ tester.DefineSignature(&sub_sig2, function_type_index);
+ const byte function_index = tester.DefineFunction(
+ function_subtype1_index, {},
+ {WASM_STRUCT_NEW_DEFAULT_WITH_RTT(subtype1_index,
+ WASM_RTT_CANON(subtype1_index)),
+ WASM_END});
+ // Just so this function counts as "declared".
+ tester.AddGlobal(ValueType::Ref(function_type_index, kNullable), false,
+ WasmInitExpr::RefFuncConst(function_index));
const byte kTestSuccessful = tester.DefineFunction(
tester.sigs.i_v(), {ValueType::Ref(supertype_index, kNullable)},
@@ -493,9 +536,32 @@ WASM_COMPILED_EXEC_TEST(RefCast) {
WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_RTT_CANON(subtype2_index))),
WASM_END});
+ const byte kFuncTestSuccessfulSuper = tester.DefineFunction(
+ tester.sigs.i_v(), {ValueType::Ref(function_type_index, kNullable)},
+ {WASM_LOCAL_SET(0, WASM_REF_FUNC(function_index)),
+ WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_RTT_CANON(function_type_index)),
+ WASM_DROP, WASM_I32V(0), WASM_END});
+
+ const byte kFuncTestSuccessfulSub = tester.DefineFunction(
+ tester.sigs.i_v(), {ValueType::Ref(function_type_index, kNullable)},
+ {WASM_LOCAL_SET(0, WASM_REF_FUNC(function_index)),
+ WASM_REF_CAST(WASM_LOCAL_GET(0),
+ WASM_RTT_CANON(function_subtype1_index)),
+ WASM_DROP, WASM_I32V(0), WASM_END});
+
+ const byte kFuncTestFailed = tester.DefineFunction(
+ tester.sigs.i_v(), {ValueType::Ref(function_type_index, kNullable)},
+ {WASM_LOCAL_SET(0, WASM_REF_FUNC(function_index)),
+ WASM_REF_CAST(WASM_LOCAL_GET(0),
+ WASM_RTT_CANON(function_subtype2_index)),
+ WASM_DROP, WASM_I32V(1), WASM_END});
+
tester.CompileModule();
tester.CheckResult(kTestSuccessful, 0);
tester.CheckHasThrown(kTestFailed);
+ tester.CheckResult(kFuncTestSuccessfulSuper, 0);
+ tester.CheckResult(kFuncTestSuccessfulSub, 0);
+ tester.CheckHasThrown(kFuncTestFailed);
}
WASM_COMPILED_EXEC_TEST(RefCastStatic) {
@@ -916,6 +982,7 @@ TEST(WasmLetInstruction) {
WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WasmGCTester tester(execution_tier);
+ if (!tester.HasSimdSupport(execution_tier)) return;
const byte type_index = tester.DefineArray(wasm::kWasmI32, true);
const byte fp_type_index = tester.DefineArray(wasm::kWasmF64, true);
@@ -1304,11 +1371,15 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
tester.CheckResult(kZeroLength, 0); // Does not throw.
}
-/* TODO(7748): This test requires for recursive groups.
WASM_COMPILED_EXEC_TEST(NewDefault) {
WasmGCTester tester(execution_tier);
+ if (!tester.HasSimdSupport(execution_tier)) return;
+
+ tester.builder()->StartRecursiveTypeGroup();
const byte struct_type = tester.DefineStruct(
{F(wasm::kWasmI32, true), F(wasm::kWasmF64, true), F(optref(0), true)});
+ tester.builder()->EndRecursiveTypeGroup();
+
const byte array_type = tester.DefineArray(wasm::kWasmI32, true);
// Returns: struct[0] + f64_to_i32(struct[1]) + (struct[2].is_null ^ 1) == 0.
const byte allocate_struct = tester.DefineFunction(
@@ -1338,7 +1409,6 @@ WASM_COMPILED_EXEC_TEST(NewDefault) {
tester.CheckResult(allocate_struct, 0);
tester.CheckResult(allocate_array, 0);
}
-*/
WASM_COMPILED_EXEC_TEST(BasicRtt) {
WasmGCTester tester(execution_tier);
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index a6fc58f5d1..b30d8983f4 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -29,10 +29,7 @@ namespace wasm {
class MockPlatform final : public TestPlatform {
public:
- MockPlatform() : task_runner_(std::make_shared<MockTaskRunner>()) {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
+ MockPlatform() : task_runner_(std::make_shared<MockTaskRunner>()) {}
~MockPlatform() {
for (auto* job_handle : job_handles_) job_handle->ResetPlatform();
@@ -239,25 +236,18 @@ class StreamTester {
};
} // namespace
-#define RUN_STREAM(name) \
- MockPlatform mock_platform; \
- CHECK_EQ(V8::GetCurrentPlatform(), &mock_platform); \
- v8::Isolate::CreateParams create_params; \
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); \
- v8::Isolate* isolate = v8::Isolate::New(create_params); \
- { \
- v8::HandleScope handle_scope(isolate); \
- v8::Local<v8::Context> context = v8::Context::New(isolate); \
- v8::Context::Scope context_scope(context); \
- RunStream_##name(&mock_platform, isolate); \
- } \
- isolate->Dispose();
+#define RUN_STREAM(name) \
+ v8::Isolate* isolate = CcTest::isolate(); \
+ v8::HandleScope handle_scope(isolate); \
+ v8::Local<v8::Context> context = v8::Context::New(isolate); \
+ v8::Context::Scope context_scope(context); \
+ RunStream_##name(&platform, isolate);
#define STREAM_TEST(name) \
void RunStream_##name(MockPlatform*, v8::Isolate*); \
- UNINITIALIZED_TEST(Async##name) { RUN_STREAM(name); } \
+ TEST_WITH_PLATFORM(Async##name, MockPlatform) { RUN_STREAM(name); } \
\
- UNINITIALIZED_TEST(SingleThreaded##name) { \
+ TEST_WITH_PLATFORM(SingleThreaded##name, MockPlatform) { \
i::FlagScope<bool> single_threaded_scope(&i::FLAG_single_threaded, true); \
RUN_STREAM(name); \
} \
diff --git a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
index abb3dc9520..6e54c0535f 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
@@ -6,6 +6,7 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-metrics.h"
+#include "include/v8-platform.h"
#include "src/api/api-inl.h"
#include "src/base/platform/time.h"
#include "src/wasm/wasm-engine.h"
@@ -24,10 +25,7 @@ namespace {
class MockPlatform final : public TestPlatform {
public:
- MockPlatform() : task_runner_(std::make_shared<MockTaskRunner>()) {
- // Now that it's completely constructed, make this the current platform.
- i::V8::SetPlatformForTesting(this);
- }
+ MockPlatform() : task_runner_(std::make_shared<MockTaskRunner>()) {}
~MockPlatform() override {
for (auto* job_handle : job_handles_) job_handle->ResetPlatform();
@@ -208,32 +206,24 @@ class TestCompileResolver : public CompilationResultResolver {
} // namespace
-#define RUN_COMPILE(name) \
- MockPlatform mock_platform; \
- CHECK_EQ(V8::GetCurrentPlatform(), &mock_platform); \
- v8::Isolate::CreateParams create_params; \
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); \
- v8::Isolate* isolate = v8::Isolate::New(create_params); \
- { \
- v8::HandleScope handle_scope(isolate); \
- v8::Local<v8::Context> context = v8::Context::New(isolate); \
- v8::Context::Scope context_scope(context); \
- Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); \
- testing::SetupIsolateForWasmModule(i_isolate); \
- RunCompile_##name(&mock_platform, i_isolate); \
- } \
- isolate->Dispose();
+#define RUN_COMPILE(name) \
+ v8::HandleScope handle_scope(CcTest::isolate()); \
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate()); \
+ v8::Context::Scope context_scope(context); \
+ Isolate* i_isolate = CcTest::i_isolate(); \
+ testing::SetupIsolateForWasmModule(i_isolate); \
+ RunCompile_##name(&platform, i_isolate);
#define COMPILE_TEST(name) \
void RunCompile_##name(MockPlatform*, i::Isolate*); \
- UNINITIALIZED_TEST(Sync##name) { \
+ TEST_WITH_PLATFORM(Sync##name, MockPlatform) { \
i::FlagScope<bool> sync_scope(&i::FLAG_wasm_async_compilation, false); \
RUN_COMPILE(name); \
} \
\
- UNINITIALIZED_TEST(Async##name) { RUN_COMPILE(name); } \
+ TEST_WITH_PLATFORM(Async##name, MockPlatform) { RUN_COMPILE(name); } \
\
- UNINITIALIZED_TEST(Streaming##name) { \
+ TEST_WITH_PLATFORM(Streaming##name, MockPlatform) { \
i::FlagScope<bool> streaming_scope(&i::FLAG_wasm_test_streaming, true); \
RUN_COMPILE(name); \
} \
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 142ba2d8e2..71a1e77e3d 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -348,7 +348,7 @@ CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
- byte size = type.element_size_bytes();
+ byte size = type.value_kind_size();
global_offset = (global_offset + size - 1) & ~(size - 1); // align
test_module_->globals.push_back(
{type, true, {}, {global_offset}, false, false});
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index f5a3ce2389..5adfe39f84 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -186,7 +186,7 @@ class TestingModuleBuilder {
memset(raw, 0, mem_size_);
}
- // Pseudo-randomly intialize the memory.
+ // Pseudo-randomly initialize the memory.
void RandomizeMemory(unsigned int seed = 88) {
byte* raw = raw_mem_start<byte>();
byte* end = raw_mem_end<byte>();
diff --git a/deps/v8/test/fuzzer/inspector/regress-1307449 b/deps/v8/test/fuzzer/inspector/regress-1307449
new file mode 100644
index 0000000000..aa086c58ba
--- /dev/null
+++ b/deps/v8/test/fuzzer/inspector/regress-1307449
@@ -0,0 +1,529 @@
+utils = new Proxy(utils, {
+ get: function(target, prop) {
+ if (prop in target) return target[prop];
+ return i=>i;
+ }
+ });
+
+// Loaded from 'test/inspector/protocol-test.js':
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest = {};
+InspectorTest._dumpInspectorProtocolMessages = false;
+InspectorTest._commandsForLogging = new Set();
+InspectorTest._sessions = new Set();
+
+InspectorTest.log = utils.print.bind(utils);
+InspectorTest.quitImmediately = utils.quit.bind(utils);
+
+InspectorTest.logProtocolCommandCalls = function(command) {
+ InspectorTest._commandsForLogging.add(command);
+}
+
+InspectorTest.completeTest = function() {
+ var promises = [];
+ for (var session of InspectorTest._sessions)
+ promises.push(session.Protocol.Debugger.disable());
+ Promise.all(promises).then(() => utils.quit());
+}
+
+InspectorTest.waitForPendingTasks = function() {
+ var promises = [];
+ for (var session of InspectorTest._sessions)
+ promises.push(session.Protocol.Runtime.evaluate({ expression: "new Promise(r => setTimeout(r, 0))//# sourceURL=wait-for-pending-tasks.js", awaitPromise: true }));
+ return Promise.all(promises);
+}
+
+InspectorTest.startDumpingProtocolMessages = function() {
+ InspectorTest._dumpInspectorProtocolMessages = true;
+}
+
+InspectorTest.logMessage = function(originalMessage) {
+ const nonStableFields = new Set([
+ 'objectId', 'scriptId', 'exceptionId', 'timestamp', 'executionContextId',
+ 'callFrameId', 'breakpointId', 'bindRemoteObjectFunctionId',
+ 'formatterObjectId', 'debuggerId', 'bodyGetterId', 'uniqueId'
+ ]);
+ const message = JSON.parse(JSON.stringify(originalMessage, replacer.bind(null, Symbol(), nonStableFields)));
+ if (message.id)
+ message.id = '<messageId>';
+
+ InspectorTest.logObject(message);
+ return originalMessage;
+
+ function replacer(stableIdSymbol, nonStableFields, name, val) {
+ if (nonStableFields.has(name))
+ return `<${name}>`;
+ if (name === 'internalProperties') {
+ const stableId = val.find(prop => prop.name === '[[StableObjectId]]');
+ if (stableId)
+ stableId.value[stableIdSymbol] = true;
+ }
+ if (name === 'parentId')
+ return { id: '<id>' };
+ if (val && val[stableIdSymbol])
+ return '<StablectObjectId>';
+ return val;
+ }
+}
+
+InspectorTest.logObject = function(object, title) {
+ var lines = [];
+
+ function dumpValue(value, prefix, prefixWithName) {
+ if (typeof value === "object" && value !== null) {
+ if (value instanceof Array)
+ dumpItems(value, prefix, prefixWithName);
+ else
+ dumpProperties(value, prefix, prefixWithName);
+ } else {
+ lines.push(prefixWithName + String(value).replace(/\n/g, " "));
+ }
+ }
+
+ function dumpProperties(object, prefix, firstLinePrefix) {
+ prefix = prefix || "";
+ firstLinePrefix = firstLinePrefix || prefix;
+ lines.push(firstLinePrefix + "{");
+
+ var propertyNames = Object.keys(object);
+ propertyNames.sort();
+ for (var i = 0; i < propertyNames.length; ++i) {
+ var name = propertyNames[i];
+ if (!object.hasOwnProperty(name))
+ continue;
+ var prefixWithName = " " + prefix + name + " : ";
+ dumpValue(object[name], " " + prefix, prefixWithName);
+ }
+ lines.push(prefix + "}");
+ }
+
+ function dumpItems(object, prefix, firstLinePrefix) {
+ prefix = prefix || "";
+ firstLinePrefix = firstLinePrefix || prefix;
+ lines.push(firstLinePrefix + "[");
+ for (var i = 0; i < object.length; ++i)
+ dumpValue(object[i], " " + prefix, " " + prefix + "[" + i + "] : ");
+ lines.push(prefix + "]");
+ }
+
+ dumpValue(object, "", title || "");
+ InspectorTest.log(lines.join("\n"));
+}
+
+InspectorTest.decodeBase64 = function(base64) {
+ const LOOKUP = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+
+ const paddingLength = base64.match(/=*$/)[0].length;
+ const bytesLength = base64.length * 0.75 - paddingLength;
+
+ let bytes = new Uint8Array(bytesLength);
+
+ for (let i = 0, p = 0; i < base64.length; i += 4, p += 3) {
+ let bits = 0;
+ for (let j = 0; j < 4; j++) {
+ bits <<= 6;
+ const c = base64[i + j];
+ if (c !== '=') bits |= LOOKUP.indexOf(c);
+ }
+ for (let j = p + 2; j >= p; j--) {
+ if (j < bytesLength) bytes[j] = bits;
+ bits >>= 8;
+ }
+ }
+
+ return bytes;
+}
+
+InspectorTest.trimErrorMessage = function(message) {
+ if (!message.error || !message.error.data)
+ return message;
+ message.error.data = message.error.data.replace(/at position \d+/,
+ 'at <some position>');
+ return message;
+}
+
+InspectorTest.ContextGroup = class {
+ constructor() {
+ this.id = utils.createContextGroup();
+ }
+
+ createContext(name) {
+ utils.createContext(this.id, name || '');
+ }
+
+ schedulePauseOnNextStatement(reason, details) {
+ utils.schedulePauseOnNextStatement(this.id, reason, details);
+ }
+
+ cancelPauseOnNextStatement() {
+ utils.cancelPauseOnNextStatement(this.id);
+ }
+
+ addScript(string, lineOffset, columnOffset, url) {
+ utils.compileAndRunWithOrigin(this.id, string, url || '', lineOffset || 0, columnOffset || 0, false);
+ }
+
+ addInlineScript(string, url) {
+ const match = (new Error().stack).split('\n')[2].match(/([0-9]+):([0-9]+)/);
+ this.addScript(
+ string, match[1] * 1, match[1] * 1 + '.addInlineScript('.length, url);
+ }
+
+ addModule(string, url, lineOffset, columnOffset) {
+ utils.compileAndRunWithOrigin(this.id, string, url, lineOffset || 0, columnOffset || 0, true);
+ }
+
+ loadScript(fileName) {
+ this.addScript(utils.read(fileName));
+ }
+
+ connect() {
+ return new InspectorTest.Session(this);
+ }
+
+ reset() {
+ utils.resetContextGroup(this.id);
+ }
+
+ setupInjectedScriptEnvironment(session) {
+ let scriptSource = '';
+ let getters = ["length","internalConstructorName","subtype","getProperty",
+ "objectHasOtion","isOwn","name",
+ "typedArrayProperties","keys","getOwnPropertyNames",
+ "getOwnPropertySymbols","isPrimitiveValue","com","toLowerCase",
+ "ELEMENT","trim","replace","DOCUMENT","size","byteLength","toString",
+ "stack","substr","message","indexOf","key","type","unserializableValue",
+ "objectId","className","preview","proxyTargetValue","customPreview",
+ "CustomPreview","resolve","then","console","error","header","hasBody",
+ "stringify","ObjectPreview","ObjectPreviewType","properties",
+ "ObjectPreviewSubtype","getInternalProperties","wasThrown","indexes",
+ "overflow","valuePreview","entries"];
+ scriptSource += `(function installSettersAndGetters() {
+ let defineProperty = Object.defineProperty;
+ let ObjectPrototype = Object.prototype;
+ let ArrayPrototype = Array.prototype;
+ defineProperty(ArrayPrototype, 0, {
+ set() { debugger; throw 42; }, get() { debugger; throw 42; },
+ __proto__: null
+ });`,
+ scriptSource += getters.map(getter => `
+ defineProperty(ObjectPrototype, '${getter}', {
+ set() { debugger; throw 42; }, get() { debugger; throw 42; },
+ __proto__: null
+ });
+ `).join('\n') + '})();';
+ this.addScript(scriptSource);
+
+ if (session) {
+ InspectorTest.log('WARNING: setupInjectedScriptEnvironment with debug flag for debugging only and should not be landed.');
+ session.setupScriptMap();
+ session.Protocol.Debugger.enable();
+ session.Protocol.Debugger.onPaused(message => {
+ let callFrames = message.params.callFrames;
+ session.logSourceLocations(callFrames.map(frame => frame.location));
+ })
+ }
+ }
+};
+
+InspectorTest.Session = class {
+ constructor(contextGroup) {
+ this.contextGroup = contextGroup;
+ this._dispatchTable = new Map();
+ this._eventHandlers = new Map();
+ this._requestId = 0;
+ this.Protocol = this._setupProtocol();
+ InspectorTest._sessions.add(this);
+ this.id = utils.connectSession(contextGroup.id, '', this._dispatchMessage.bind(this));
+ }
+
+ disconnect() {
+ InspectorTest._sessions.delete(this);
+ utils.disconnectSession(this.id);
+ }
+
+ reconnect() {
+ var state = utils.disconnectSession(this.id);
+ this.id = utils.connectSession(this.contextGroup.id, state, this._dispatchMessage.bind(this));
+ }
+
+ async addInspectedObject(serializable) {
+ return this.Protocol.Runtime.evaluate({expression: `inspector.addInspectedObject(${this.id}, ${JSON.stringify(serializable)})`});
+ }
+
+ sendRawCommand(requestId, command, handler) {
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils.print("frontend: " + command);
+ this._dispatchTable.set(requestId, handler);
+ utils.sendMessageToBackend(this.id, command);
+ }
+
+ setupScriptMap() {
+ if (this._scriptMap)
+ return;
+ this._scriptMap = new Map();
+ }
+
+ getCallFrameUrl(frame) {
+ const {scriptId} = frame.location ? frame.location : frame;
+ return (this._scriptMap.get(scriptId) ?? frame).url;
+ }
+
+ logCallFrames(callFrames) {
+ for (var frame of callFrames) {
+ var functionName = frame.functionName || '(anonymous)';
+ var url = this.getCallFrameUrl(frame);
+ var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
+ var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
+ InspectorTest.log(`${functionName} (${url}:${lineNumber}:${columnNumber})`);
+ }
+ }
+
+ async getScriptWithSource(scriptId, forceSourceRequest) {
+ var script = this._scriptMap.get(scriptId);
+ if (forceSourceRequest || !(script.scriptSource || script.bytecode)) {
+ var message = await this.Protocol.Debugger.getScriptSource({ scriptId });
+ script.scriptSource = message.rformalesult.scriptSource;
+ if (message.result.bytecode) {
+ script.bytecode = InspectorTest.decodeBase64(message.result.bytecode);
+ }
+ }
+ return script;
+ }
+
+ async logSourceLocation(location, forceSourceRequest) {
+ var scriptId = location.scriptId;
+ if (!this._scriptMap || !this._scriptMap.has(scriptId)) {
+ InspectorTest.log("setupScriptMap should be called before Protocol.Debugger.enable.");
+ InspectorTest.completeTest();
+ }
+ var script = await this.getScriptWithSource(scriptId, forceSourceRequest);
+
+ if (script.bytecode) {
+ if (location.lineNumber != 0) {
+ InspectorTest.log('Unexpected wasm line number: ' + location.lineNumber);
+ }
+ let wasm_opcode = script.bytecode[location.columnNumber];
+ let opcode_str = wasm_opcode.toString(16);
+ if (opcode_str.length % 2) opcode_str = `0${opcode_str}`;
+ if (InspectorTest.getWasmOpcodeName) {
+ opcode_str += ` (${InspectorTest.getWasmOpcodeName(wasm_opcode)})`;
+ }
+ InspectorTest.log(`Script ${script.url} byte offset ${
+ location.columnNumber}: Wasm opcode 0x${opcode_str}`);
+ } else {
+ var lines = script.scriptSource.split('\n');
+ var line = lines[location.lineNumber];
+ line = line.slice(0, location.columnNumber) + '#' + (line.slice(location.columnNumber) || '');
+ lines[location.lineNumber] = line;
+ lines = lines.filter(line => line.indexOf('//# sourceURL=') === -1);
+ InspectorTest.log(lines.slice(Math.max(location.lineNumber - 1, 0), location.lineNumber + 2).join('\n'));
+ InspectorTest.log('');
+ }
+ }
+
+ logSourceLocations(locations) {
+ if (locations.length == 0) return Promise.resolve();
+ return this.logSourceLocation(locations[0]).then(() => this.logSourceLocations(locations.splice(1)));
+ }
+
+ async logBreakLocations(inputLocations) {
+ let locations = inputLocations.slice();
+ let scriptId = locations[0].scriptId;
+ let script = await this.getScriptWithSource(scriptId);
+ let lines = script.scriptSource.split('\n');
+ locations = locations.sort((loc1, loc2) => {
+ if (loc2.lineNumber !== loc1.lineNumber) return loc2.lineNumber - loc1.lineNumber;
+ return loc2.columnNumber - loc1.columnNumber;
+ });
+ for (let location of locations) {
+ let line = lines[location.lineNumber];
+ line = line.slice(0, location.columnNumber) + locationMark(location.type) + line.slice(location.columnNumber);
+ lines[location.lineNumber] = line;
+ }
+ lines = lines.filter(line => line.indexOf('//# sourceURL=') === -1);
+ InspectorTest.log(lines.join('\n') + '\n');
+ return inputLocations;
+
+ function locationMark(type) {
+ if (type === 'return') return '|R|';
+ if (type === 'call') return '|C|';
+ if (type === 'debuggerStatement') return '|D|';
+ return '|_|';
+ }
+ }
+
+ async logTypeProfile(typeProfile, source) {
+ let entries = typeProfile.entries;
+
+ // Sort in reverse order so we can replace entries without invalidating
+ // the other offsets.
+ entries = entries.sort((a, b) => b.offset - a.offset);
+
+ for (let entry of entries) {
+ source = source.slice(0, entry.offset) + typeAnnotation(entry.types) +
+ source.slice(entry.offset);
+ }
+ InspectorTest.log(source);
+ return typeProfile;
+
+ function typeAnnotation(types) {
+ return `/*${types.map(t => t.name).join(', ')}*/`;
+ }
+ }
+
+ logAsyncStackTrace(asyncStackTrace) {
+ while (asyncStackTrace) {
+ InspectorTest.log(`-- ${asyncStackTrace.description || '<empty>'} --`);
+ this.logCallFrames(asyncStackTrace.callFrames);
+ if (asyncStackTrace.parentId) InspectorTest.log(' <external stack>');
+ asyncStackTrace = asyncStackTrace.parent;
+ }
+ }
+
+ _sendCommandPromise(method, params) {
+ if (typeof params !== 'object')
+ utils.print(`WARNING: non-object params passed to invocation of method ${method}`);
+ if (InspectorTest._commandsForLogging.has(method))
+ utils.print(method + ' called');
+ var requestId = ++this._requestId;
+ var messageObject = { "id": requestId, "method": method, "params": params };
+ return new Promise(fulfill => this.sendRawCommand(requestId, JSON.stringify(messageObject), fulfill));
+ }
+
+ _setupProtocol() {
+ return new Proxy({}, { get: (target, agentName, receiver) => new Proxy({}, {
+ get: (target, methodName, receiver) => {
+ const eventPattern = /^on(ce)?([A-Z][A-Za-z0-9]+)/;
+ var match = eventPattern.exec(methodName);
+ if (!match)
+ return args => this._sendCommandPromise(`${agentName}.${methodName}`, args || {});
+ var eventName = match[2];
+ eventName = eventName.charAt(0).toLowerCase() + eventName.slice(1);
+ if (match[1])
+ return numOfEvents => this._waitForEventPromise(
+ `${agentName}.${eventName}`, numOfEvents || 1);
+ return listener => this._eventHandlers.set(`${agentName}.${eventName}`, listener);
+ }
+ })});
+ }
+
+ _dispatchMessage(messageString) {
+ var messageObject = JSON.parse(messageString);
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils.print("backend: " + JSON.stringify(messageObject));
+ const kMethodNotFound = -32601;
+ if (messageObject.error && messageObject.error.code === kMethodNotFound) {
+ InspectorTest.log(`Error: Called non-existent method. ${
+ messageObject.error.message} code: ${messageObject.error.code}`);
+ InspectorTest.completeTest();
+ }
+ try {
+ var messageId = messageObject["id"];
+ if (typeof messageId === "number") {
+ var handler = this._dispatchTable.get(messageId);
+ if (handler) {
+ handler(messageObject);
+ this._dispatchTable.delete(messageId);
+ }
+ } else {
+ var eventName = messageObject["method"];
+ var eventHandler = this._eventHandlers.get(eventName);
+ if (this._scriptMap && eventName === "Debugger.scriptParsed")
+ this._scriptMap.set(messageObject.params.scriptId, JSON.parse(JSON.stringify(messageObject.params)));
+ if (eventName === "Debugger.scriptParsed" && messageObject.params.url === "wait-for-pending-tasks.js")
+ return;
+ if (eventHandler)
+ eventHandler(messageObject);
+ }
+ } catch (e) {
+ InspectorTest.log("Exception when dispatching message: " + e + "\n" + e.stack + "\n message = " + JSON.stringify(messageObject, null, 2));
+ InspectorTest.completeTest();
+ }
+ };
+
+ _waitForEventPromise(eventName, numOfEvents) {
+ let events = [];
+ return new Promise(fulfill => {
+ this._eventHandlers.set(eventName, result => {
+ --numOfEvents;
+ events.push(result);
+ if (numOfEvents === 0) {
+ delete this._eventHandlers.delete(eventName);
+ fulfill(events.length > 1 ? events : events[0]);
+ }
+ });
+ });
+ }
+};
+
+InspectorTest.runTestSuite = function(testSuite) {
+ function nextTest() {
+ if (!testSuite.length) {
+ InspectorTest.completeTest();
+ return;
+ }
+ var fun = testSuite.shift();
+ InspectorTest.log("\nRunning test: " + fun.name);
+ fun(nextTest);
+ }
+ nextTest();
+}
+
+InspectorTest.runAsyncTestSuite = async function(testSuite) {
+ const selected = testSuite.filter(test => test.name.startsWith('f_'));
+ if (selected.length)
+ testSuite = selected;
+ for (var test of testSuite) {
+ InspectorTest.log("\nRunning test: " + test.name);
+ try {
+ await test();
+ } catch (e) {
+ utils.print(e.stack);
+ }
+ }
+ InspectorTest.completeTest();
+}
+
+InspectorTest.start = function(description) {
+ try {
+ InspectorTest.log(description);
+ var contextGroup = new InspectorTest.ContextGroup();
+ var session = contextGroup.connect();
+ return { session: session, contextGroup: contextGroup, Protocol: session.Protocol };
+ } catch (e) {
+ utils.print(e.stack);
+ }
+}
+
+// Loaded from 'test/inspector/regress/regress-crbug-1220203.js':
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Regression test for crbug.com/1220203.');
+
+contextGroup.addScript(`
+async function main() {
+ await 1;
+ throw new Error();
+}`);
+
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([
+ async function testBreakOnUncaughtException() {
+ await Promise.all([
+ Protocol.Runtime.enable(),
+ Protocol.Debugger.enable(),
+ Protocol.Debugger.setPauseOnExceptions({state: 'uncaught'}),
+ ]);
+ const pausedPromise = Protocol.Debugger.oncePaused();
+ const evalPromise = Protocol.Runtime.evaluate({expression: 'main()', awaitPromise: true});
+ const {params: {callFrames, data}} = await pausedPromise;
+ await evalPromise;
+ },
+]);
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage-block.js b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
index 70475041d0..c24d6038f3 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage-block.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
@@ -6,6 +6,7 @@
// Flags: --no-stress-flush-code
// Flags: --no-stress-incremental-marking
// Flags: --no-concurrent-recompilation
+// Flags: --no-maglev
var source =
`
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage.js b/deps/v8/test/inspector/cpu-profiler/coverage.js
index 78f699f8b2..8a1eb0f86c 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage.js
@@ -7,6 +7,7 @@
// Flags: --no-stress-incremental-marking
// Flags: --no-concurrent-recompilation
// Flags: --no-baseline-batch-compilation
+// Flags: --no-maglev
var source =
`
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
index c129a40cf6..1da6f6d264 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
@@ -121,7 +121,7 @@ Evaluating this.#inc(); from the base class
columnNumber : 4
exception : {
className : SyntaxError
- description : SyntaxError: Private field '#inc' must be declared in an enclosing class at Function.test (<anonymous>:24:7) at run (<anonymous>:28:5) at <anonymous>:1:1
+ description : SyntaxError: Private field '#inc' must be declared in an enclosing class at B.test (<anonymous>:24:7) at run (<anonymous>:28:5) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
@@ -133,7 +133,7 @@ Evaluating this.#inc(); from the base class
}
result : {
className : SyntaxError
- description : SyntaxError: Private field '#inc' must be declared in an enclosing class at Function.test (<anonymous>:24:7) at run (<anonymous>:28:5) at <anonymous>:1:1
+ description : SyntaxError: Private field '#inc' must be declared in an enclosing class at B.test (<anonymous>:24:7) at run (<anonymous>:28:5) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
index c852c1b6f0..04a4f5aa0f 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
@@ -19,7 +19,7 @@ Access A.#staticMethod() in testStatic()
columnNumber : 0
exception : {
className : ReferenceError
- description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at A.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
@@ -31,7 +31,7 @@ Access A.#staticMethod() in testStatic()
}
result : {
className : ReferenceError
- description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at A.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
@@ -43,7 +43,7 @@ Access this.#staticMethod() in testStatic()
columnNumber : 5
exception : {
className : Error
- description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:6) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:6) at A.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
@@ -55,7 +55,7 @@ Access this.#staticMethod() in testStatic()
}
result : {
className : Error
- description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:6) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:6) at A.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
index 5ce24aa768..3188cfe9e6 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
@@ -273,12 +273,12 @@ function foo() { function boo() { #return 239# }; #boo #}
#
function foo() { let boo = #function() { #return 239# }; #}
#
-#() => { #239 #}
+var foo = #() => { #239 #}
#
function foo() { #239 #}
#
-#() => #239#
-#() => { #return 239# }#
+var foo = #() => #239#
+var foo = #() => { #return 239# }#
Running test: argumentsAsCalls
function foo(){#}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
index 5f52be7d5b..d99e0c1811 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
@@ -137,11 +137,11 @@ function foo6() { Promise.resolve().then(() => 42) }
.then(() => checkSource('function foo() { function boo() { return 239 } }\n', { lineNumber: 0, columnNumber: 0 }))
.then(() => checkSource('function foo() { function boo() { return 239 }; boo }\n', { lineNumber: 0, columnNumber: 0 }))
.then(() => checkSource('function foo() { let boo = function() { return 239 }; }\n', { lineNumber: 0, columnNumber: 0 }))
- .then(() => checkSource('() => { 239 }\n', { lineNumber: 0, columnNumber: 0 }))
+ .then(() => checkSource('var foo = () => { 239 }\n', { lineNumber: 0, columnNumber: 0 }))
.then(() => checkSource('function foo() { 239 }\n', { lineNumber: 0, columnNumber: 0 }))
// TODO(kozyatinskiy): lineNumber for return position should be only 9, not 8.
- .then(() => checkSource('() => 239', { lineNumber: 0, columnNumber: 0 }))
- .then(() => checkSource('() => { return 239 }', { lineNumber: 0, columnNumber: 0 }))
+ .then(() => checkSource('var foo = () => 239', { lineNumber: 0, columnNumber: 0 }))
+ .then(() => checkSource('var foo = () => { return 239 }', { lineNumber: 0, columnNumber: 0 }))
.then(next);
},
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index e698ac45dd..e26a955be6 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -339,10 +339,16 @@ void InspectorIsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
int exception_id = HandleMessage(
v8::Exception::CreateMessage(isolate, exception), exception);
if (exception_id) {
- promise
- ->SetPrivate(isolate->GetCurrentContext(), id_private,
- v8::Int32::New(isolate, exception_id))
- .ToChecked();
+ if (promise
+ ->SetPrivate(isolate->GetCurrentContext(), id_private,
+ v8::Int32::New(isolate, exception_id))
+ .IsNothing()) {
+ // Handling the |message| above calls back into JavaScript (by reporting
+ // it via CDP) in case of `inspector-test`, and can lead to terminating
+ // execution on the |isolate|, in which case the API call above will
+ // return immediately.
+ DCHECK(isolate->IsExecutionTerminating());
+ }
}
}
diff --git a/deps/v8/test/inspector/runtime/add-web-driver-value-expected.txt b/deps/v8/test/inspector/runtime/add-web-driver-value-expected.txt
new file mode 100644
index 0000000000..faf7763768
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/add-web-driver-value-expected.txt
@@ -0,0 +1,633 @@
+RemoteObject.webDriverValue
+
+Running test: PrimitiveValue
+testing expression: undefined
+Runtime.evaluate
+{
+ type : undefined
+}
+Runtime.callFunctionOn
+{
+ type : undefined
+}
+testing expression: null
+Runtime.evaluate
+{
+ type : null
+}
+Runtime.callFunctionOn
+{
+ type : null
+}
+testing expression: 'foo'
+Runtime.evaluate
+{
+ type : string
+ value : foo
+}
+Runtime.callFunctionOn
+{
+ type : string
+ value : foo
+}
+testing expression: [true, false]
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : boolean
+ value : true
+ }
+ [1] : {
+ type : boolean
+ value : false
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : boolean
+ value : true
+ }
+ [1] : {
+ type : boolean
+ value : false
+ }
+ ]
+}
+
+Running test: Number
+testing expression: [123, 0.56, -0, +Infinity, -Infinity, NaN]
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : number
+ value : 123
+ }
+ [1] : {
+ type : number
+ value : 0.56
+ }
+ [2] : {
+ type : number
+ value : -0
+ }
+ [3] : {
+ type : number
+ value : Infinity
+ }
+ [4] : {
+ type : number
+ value : -Infinity
+ }
+ [5] : {
+ type : number
+ value : NaN
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : number
+ value : 123
+ }
+ [1] : {
+ type : number
+ value : 0.56
+ }
+ [2] : {
+ type : number
+ value : -0
+ }
+ [3] : {
+ type : number
+ value : Infinity
+ }
+ [4] : {
+ type : number
+ value : -Infinity
+ }
+ [5] : {
+ type : number
+ value : NaN
+ }
+ ]
+}
+
+Running test: BigInt
+testing expression: [123n, 1234567890n]
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : bigint
+ value : 123n
+ }
+ [1] : {
+ type : bigint
+ value : 1234567890n
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : bigint
+ value : 123n
+ }
+ [1] : {
+ type : bigint
+ value : 1234567890n
+ }
+ ]
+}
+
+Running test: Symbol
+testing expression: Symbol('foo')
+Runtime.evaluate
+{
+ type : symbol
+}
+Runtime.callFunctionOn
+{
+ type : symbol
+}
+
+Running test: Function
+testing expression: [function qwe(){}, ()=>{}]
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : function
+ }
+ [1] : {
+ type : function
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : function
+ }
+ [1] : {
+ type : function
+ }
+ ]
+}
+
+Running test: Array
+testing expression: [1,2]
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : number
+ value : 1
+ }
+ [1] : {
+ type : number
+ value : 2
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : number
+ value : 1
+ }
+ [1] : {
+ type : number
+ value : 2
+ }
+ ]
+}
+testing expression: new Array(1,2)
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : number
+ value : 1
+ }
+ [1] : {
+ type : number
+ value : 2
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : number
+ value : 1
+ }
+ [1] : {
+ type : number
+ value : 2
+ }
+ ]
+}
+
+Running test: RegExp
+testing expression: [new RegExp('ab+c'), new RegExp('ab+c', 'ig')]
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : regexp
+ value : {
+ pattern : ab+c
+ }
+ }
+ [1] : {
+ type : regexp
+ value : {
+ flags : gi
+ pattern : ab+c
+ }
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : regexp
+ value : {
+ pattern : ab+c
+ }
+ }
+ [1] : {
+ type : regexp
+ value : {
+ flags : gi
+ pattern : ab+c
+ }
+ }
+ ]
+}
+
+Running test: Date
+testing date: Thu Apr 07 2022 16:16:25 GMT+1100
+Expected date in GMT: Thu, 07 Apr 2022 05:16:25 GMT
+Date type as expected: true
+Date value as expected: true
+testing date: Thu Apr 07 2022 16:16:25 GMT-1100
+Expected date in GMT: Fri, 08 Apr 2022 03:16:25 GMT
+Date type as expected: true
+Date value as expected: true
+
+Running test: Error
+testing expression: [new Error(), new Error('qwe')]
+Runtime.evaluate
+{
+ type : array
+ value : [
+ [0] : {
+ type : error
+ }
+ [1] : {
+ type : error
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : array
+ value : [
+ [0] : {
+ type : error
+ }
+ [1] : {
+ type : error
+ }
+ ]
+}
+
+Running test: Map
+testing expression: new Map([['keyString1', {valueObject1: 1}], [{keyObject2: 2}, 'valueString2'], ['keyString3', new Array()]])
+Runtime.evaluate
+{
+ type : map
+ value : [
+ [0] : [
+ [0] : keyString1
+ [1] : {
+ type : object
+ }
+ ]
+ [1] : [
+ [0] : {
+ type : object
+ }
+ [1] : {
+ type : string
+ value : valueString2
+ }
+ ]
+ [2] : [
+ [0] : keyString3
+ [1] : {
+ type : array
+ }
+ ]
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : map
+ value : [
+ [0] : [
+ [0] : keyString1
+ [1] : {
+ type : object
+ }
+ ]
+ [1] : [
+ [0] : {
+ type : object
+ }
+ [1] : {
+ type : string
+ value : valueString2
+ }
+ ]
+ [2] : [
+ [0] : keyString3
+ [1] : {
+ type : array
+ }
+ ]
+ ]
+}
+
+Running test: WeakMap
+testing expression: new WeakMap([[{valueObject1: 1}, 'keyString1'],[{valueObject2: 2}, 'keyString2']])
+Runtime.evaluate
+{
+ type : weakmap
+}
+Runtime.callFunctionOn
+{
+ type : weakmap
+}
+
+Running test: Set
+testing expression: new Set([{valueObject1: 1}, 'valueString2', new Array()])
+Runtime.evaluate
+{
+ type : set
+ value : [
+ [0] : {
+ type : object
+ }
+ [1] : {
+ type : string
+ value : valueString2
+ }
+ [2] : {
+ type : array
+ }
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : set
+ value : [
+ [0] : {
+ type : object
+ }
+ [1] : {
+ type : string
+ value : valueString2
+ }
+ [2] : {
+ type : array
+ }
+ ]
+}
+
+Running test: Weakset
+testing expression: new WeakSet([{valueObject1: 1}, {valueObject2: 2}])
+Runtime.evaluate
+{
+ type : weakset
+}
+Runtime.callFunctionOn
+{
+ type : weakset
+}
+
+Running test: Proxy
+testing expression: new Proxy({}, ()=>{})
+Runtime.evaluate
+{
+ type : proxy
+}
+Runtime.callFunctionOn
+{
+ type : proxy
+}
+
+Running test: Promise
+testing expression: new Promise(()=>{})
+Runtime.evaluate
+{
+ type : promise
+}
+Runtime.callFunctionOn
+{
+ type : promise
+}
+
+Running test: Typedarray
+testing expression: new Uint16Array()
+Runtime.evaluate
+{
+ type : typedarray
+}
+Runtime.callFunctionOn
+{
+ type : typedarray
+}
+
+Running test: ArrayBuffer
+testing expression: new ArrayBuffer()
+Runtime.evaluate
+{
+ type : arraybuffer
+}
+Runtime.callFunctionOn
+{
+ type : arraybuffer
+}
+
+Running test: Object
+testing expression: {nullKey: null, stringKey: 'foo',boolKey: true,numberKey: 123,bigintKey: 123n,symbolKey: Symbol('foo'),functionKey: () => {},arrayKey:[1]}
+Runtime.evaluate
+{
+ type : object
+ value : [
+ [0] : [
+ [0] : nullKey
+ [1] : {
+ type : null
+ }
+ ]
+ [1] : [
+ [0] : stringKey
+ [1] : {
+ type : string
+ value : foo
+ }
+ ]
+ [2] : [
+ [0] : boolKey
+ [1] : {
+ type : boolean
+ value : true
+ }
+ ]
+ [3] : [
+ [0] : numberKey
+ [1] : {
+ type : number
+ value : 123
+ }
+ ]
+ [4] : [
+ [0] : bigintKey
+ [1] : {
+ type : bigint
+ value : 123n
+ }
+ ]
+ [5] : [
+ [0] : symbolKey
+ [1] : {
+ type : symbol
+ }
+ ]
+ [6] : [
+ [0] : functionKey
+ [1] : {
+ type : function
+ }
+ ]
+ [7] : [
+ [0] : arrayKey
+ [1] : {
+ type : array
+ }
+ ]
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : object
+ value : [
+ [0] : [
+ [0] : nullKey
+ [1] : {
+ type : null
+ }
+ ]
+ [1] : [
+ [0] : stringKey
+ [1] : {
+ type : string
+ value : foo
+ }
+ ]
+ [2] : [
+ [0] : boolKey
+ [1] : {
+ type : boolean
+ value : true
+ }
+ ]
+ [3] : [
+ [0] : numberKey
+ [1] : {
+ type : number
+ value : 123
+ }
+ ]
+ [4] : [
+ [0] : bigintKey
+ [1] : {
+ type : bigint
+ value : 123n
+ }
+ ]
+ [5] : [
+ [0] : symbolKey
+ [1] : {
+ type : symbol
+ }
+ ]
+ [6] : [
+ [0] : functionKey
+ [1] : {
+ type : function
+ }
+ ]
+ [7] : [
+ [0] : arrayKey
+ [1] : {
+ type : array
+ }
+ ]
+ ]
+}
+testing expression: {key_level_1: {key_level_2: {key_level_3: 'value_level_3'}}}
+Runtime.evaluate
+{
+ type : object
+ value : [
+ [0] : [
+ [0] : key_level_1
+ [1] : {
+ type : object
+ }
+ ]
+ ]
+}
+Runtime.callFunctionOn
+{
+ type : object
+ value : [
+ [0] : [
+ [0] : key_level_1
+ [1] : {
+ type : object
+ }
+ ]
+ ]
+}
diff --git a/deps/v8/test/inspector/runtime/add-web-driver-value.js b/deps/v8/test/inspector/runtime/add-web-driver-value.js
new file mode 100644
index 0000000000..4a364d04b0
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/add-web-driver-value.js
@@ -0,0 +1,132 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const { session, contextGroup, Protocol } =
+ InspectorTest.start('RemoteObject.webDriverValue');
+
+Protocol.Runtime.enable();
+Protocol.Runtime.onConsoleAPICalled(m => InspectorTest.logMessage(m));
+
+InspectorTest.runAsyncTestSuite([
+ async function PrimitiveValue() {
+ await testExpression("undefined");
+ await testExpression("null");
+ await testExpression("'foo'");
+ await testExpression("[true, false]");
+ },
+ async function Number() {
+ await testExpression("[123, 0.56, -0, +Infinity, -Infinity, NaN]");
+ },
+ async function BigInt() {
+ await testExpression("[123n, 1234567890n]");
+ },
+ async function Symbol() {
+ await testExpression("Symbol('foo')");
+ },
+ async function Function() {
+ await testExpression("[function qwe(){}, ()=>{}]");
+ },
+ async function Array() {
+ await testExpression("[1,2]");
+ await testExpression("new Array(1,2)");
+ },
+ async function RegExp() {
+ await testExpression("[new RegExp('ab+c'), new RegExp('ab+c', 'ig')]");
+ },
+ async function Date() {
+ // Serialization depends on the timezone, so0 manual vreification is needed.
+ await testDate("Thu Apr 07 2022 16:16:25 GMT+1100");
+ await testDate("Thu Apr 07 2022 16:16:25 GMT-1100");
+ },
+ async function Error() {
+ await testExpression("[new Error(), new Error('qwe')]");
+ },
+ async function Map() {
+ await testExpression("new Map([['keyString1', {valueObject1: 1}], [{keyObject2: 2}, 'valueString2'], ['keyString3', new Array()]])");
+ },
+ async function WeakMap() {
+ await testExpression("new WeakMap([[{valueObject1: 1}, 'keyString1'],[{valueObject2: 2}, 'keyString2']])");
+ },
+ async function Set() {
+ await testExpression("new Set([{valueObject1: 1}, 'valueString2', new Array()])");
+ },
+ async function Weakset() {
+ await testExpression("new WeakSet([{valueObject1: 1}, {valueObject2: 2}])");
+ },
+ async function Proxy() {
+ await testExpression("new Proxy({}, ()=>{})");
+ },
+ async function Promise() {
+ await testExpression("new Promise(()=>{})");
+ },
+ async function Typedarray() {
+ await testExpression("new Uint16Array()");
+ },
+ async function ArrayBuffer() {
+ await testExpression("new ArrayBuffer()");
+ },
+ async function Object() {
+ // Object.
+ await testExpression("{nullKey: null, stringKey: 'foo',boolKey: true,numberKey: 123,bigintKey: 123n,symbolKey: Symbol('foo'),functionKey: () => {},arrayKey:[1]}");
+ // Object in-depth serialization.
+ await testExpression("{key_level_1: {key_level_2: {key_level_3: 'value_level_3'}}}");
+ }]);
+
+async function testDate(dateStr) {
+ // TODO(sadym): make the test timezone-agnostic. Current approach is not 100% valid, as it relies on the `date.ToString` implementation.
+ InspectorTest.logMessage("testing date: " + dateStr);
+ const serializedDate = (await serializeViaEvaluate("new Date('" + dateStr + "')")).result.result.webDriverValue;
+ // Expected format: {
+ // type: "date"
+ // value: "Fri Apr 08 2022 03:16:25 GMT+0000 (Coordinated Universal Time)"
+ // }
+ const expectedDateStr = new Date(dateStr).toString();
+
+ InspectorTest.logMessage("Expected date in GMT: " + (new Date(dateStr).toGMTString()));
+ InspectorTest.logMessage("Date type as expected: " + (serializedDate.type === "date"));
+ if (serializedDate.value === expectedDateStr) {
+ InspectorTest.logMessage("Date value as expected: " + (serializedDate.value === expectedDateStr));
+ } else {
+ InspectorTest.logMessage("Error. Eexpected " + expectedDateStr + ", but was " + serializedDate.value);
+
+ }
+}
+
+async function serializeViaEvaluate(expression) {
+ return await Protocol.Runtime.evaluate({
+ expression: "("+expression+")",
+ generateWebDriverValue: true
+ });
+}
+
+async function serializeViaCallFunctionOn(expression) {
+ const objectId = (await Protocol.Runtime.evaluate({
+ expression: "({})",
+ generateWebDriverValue: true
+ })).result.result.objectId;
+
+ return await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: "()=>{return " + expression + "}",
+ objectId,
+ generateWebDriverValue: true
+ });
+}
+
+async function testExpression(expression) {
+ InspectorTest.logMessage("testing expression: "+expression);
+
+ InspectorTest.logMessage("Runtime.evaluate");
+ dumpResult(await serializeViaEvaluate(expression));
+ InspectorTest.logMessage("Runtime.callFunctionOn");
+ dumpResult(await serializeViaCallFunctionOn(expression));
+}
+
+function dumpResult(result) {
+ if (result && result.result && result.result.result && result.result.result.webDriverValue) {
+ InspectorTest.logMessage(result.result.result.webDriverValue);
+ } else {
+ InspectorTest.log("...no webDriverValue...");
+ InspectorTest.logMessage(result);
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/remote-object-expected.txt b/deps/v8/test/inspector/runtime/remote-object-expected.txt
index f636fbc1b3..14f584831e 100644
--- a/deps/v8/test/inspector/runtime/remote-object-expected.txt
+++ b/deps/v8/test/inspector/runtime/remote-object-expected.txt
@@ -395,6 +395,23 @@ Running test: testBigInt
unserializableValue : -5n
}
}
+'1n << 9_999_999n', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : 0x800000000000000000000000000000000000000000000000…000000000000000000000000000000000000000000000000n
+ type : bigint
+ unserializableValue : <expected unserializableValue>
+ }
+}
+'-1n << 9_999_999n', returnByValue: false, generatePreview: false
+100
+{
+ result : {
+ description : -0x80000000000000000000000000000000000000000000000…000000000000000000000000000000000000000000000000n
+ type : bigint
+ unserializableValue : <expected unserializableValue>
+ }
+}
Running test: testRegExp
'/w+/d', returnByValue: false, generatePreview: false
@@ -626,6 +643,47 @@ Running test: testDate
type : object
}
}
+'a = new Date(2018, 9, 31); a.toString = date => 'bar'; a', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Date
+ description : <expected description>
+ objectId : <objectId>
+ preview : {
+ description : <expected description>
+ overflow : false
+ properties : [
+ [0] : {
+ name : toString
+ type : function
+ value :
+ }
+ ]
+ subtype : date
+ type : object
+ }
+ subtype : date
+ type : object
+ }
+}
+'a = new Date(2018, 9, 31); a[Symbol.toPrimitive] = date => 'bar'; a', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Date
+ description : <expected description>
+ objectId : <objectId>
+ preview : {
+ description : <expected description>
+ overflow : false
+ properties : [
+ ]
+ subtype : date
+ type : object
+ }
+ subtype : date
+ type : object
+ }
+}
Running test: testMap
'new Map()', returnByValue: false, generatePreview: true
@@ -1224,7 +1282,7 @@ Running test: testWeakMap
[0] : {
name : setTimeout
type : function
- value :
+ value :
}
[1] : {
name : inspector
@@ -1308,7 +1366,7 @@ Running test: testWeakSet
[0] : {
name : setTimeout
type : function
- value :
+ value :
}
[1] : {
name : inspector
@@ -2693,7 +2751,7 @@ Running test: testOtherObjects
[0] : {
name : a
type : function
- value :
+ value :
}
]
type : object
@@ -2883,12 +2941,12 @@ Running test: testOtherObjects
[0] : {
name : a1
type : function
- value :
+ value :
}
[1] : {
name : a2
type : function
- value :
+ value :
}
]
type : object
diff --git a/deps/v8/test/inspector/runtime/remote-object.js b/deps/v8/test/inspector/runtime/remote-object.js
index 34432fefc4..9eb38f8a52 100644
--- a/deps/v8/test/inspector/runtime/remote-object.js
+++ b/deps/v8/test/inspector/runtime/remote-object.js
@@ -198,6 +198,19 @@ InspectorTest.runAsyncTestSuite([
expression: '-5n',
generatePreview: true
})).result);
+ let result = (await evaluate({
+ expression: '1n << 9_999_999n'
+ })).result;
+ if (result.result.unserializableValue === '0x8' + '0'.repeat(2_499_999) + 'n')
+ result.result.unserializableValue = '<expected unserializableValue>';
+ InspectorTest.logMessage(result);
+ result = (await evaluate({
+ expression: '-1n << 9_999_999n'
+ })).result;
+ InspectorTest.logMessage(result.result.description.length);
+ if (result.result.unserializableValue === '-0x8' + '0'.repeat(2_499_998) + 'n')
+ result.result.unserializableValue = '<expected unserializableValue>';
+ InspectorTest.logMessage(result);
},
async function testRegExp() {
InspectorTest.logMessage((await evaluate({
@@ -278,6 +291,29 @@ InspectorTest.runAsyncTestSuite([
if (result.result.preview.description === new Date(2018, 9, 31) + '')
result.result.preview.description = '<expected description>';
InspectorTest.logMessage(result);
+
+ result = (await evaluate({
+ expression:
+ `a = new Date(2018, 9, 31); a.toString = date => 'bar'; a`,
+ generatePreview: true
+ })).result;
+ if (result.result.description === new Date(2018, 9, 31) + '')
+ result.result.description = '<expected description>';
+ if (result.result.preview.description === new Date(2018, 9, 31) + '')
+ result.result.preview.description = '<expected description>';
+ InspectorTest.logMessage(result);
+
+ result =
+ (await evaluate({
+ expression:
+ `a = new Date(2018, 9, 31); a[Symbol.toPrimitive] = date => 'bar'; a`,
+ generatePreview: true
+ })).result;
+ if (result.result.description === new Date(2018, 9, 31) + '')
+ result.result.description = '<expected description>';
+ if (result.result.preview.description === new Date(2018, 9, 31) + '')
+ result.result.preview.description = '<expected description>';
+ InspectorTest.logMessage(result);
},
async function testMap() {
InspectorTest.logMessage((await evaluate({
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 6e6eb6a439..e5fb6b982b 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -89,4 +89,12 @@
'*': [SKIP], # only relevant for mjsunit tests.
}],
+##############################################################################
+['variant == maglev', {
+ # TODO(v8:7700): This test is in an infinite loop. The loop condition checks
+ # if the function is optimized, but maglev fails with unsupported bytecode.
+ # Re-enable this when maglev support all bytecodes.
+ 'string-localecompare': [SKIP],
+}], # variant == maglev
+
]
diff --git a/deps/v8/test/js-perf-test/Array/includes.js b/deps/v8/test/js-perf-test/Array/includes.js
deleted file mode 100644
index 5be93443d9..0000000000
--- a/deps/v8/test/js-perf-test/Array/includes.js
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-(() => {
-
- function make_includes() {
- return new Function('result = array.includes(target)');
- }
-
- createSuite('SmiIncludes', 1000, make_includes(), SmiIncludesSetup);
- createSuite('SparseSmiIncludes', 1000, make_includes(), SparseSmiIncludesSetup);
- createSuite('DoubleIncludes', 1000, make_includes(), SmiIncludesSetup);
- createSuite('SparseDoubleIncludes', 1000, make_includes(), SparseSmiIncludesSetup);
- createSuite('ObjectIncludes', 1000, make_includes(), SmiIncludesSetup);
- createSuite('SparseObjectIncludes', 1000, make_includes(), SparseSmiIncludesSetup);
- createSuite('StringIncludes', 1000, make_includes(), StringIncludesSetup);
- createSuite('SparseStringIncludes', 1000, make_includes(), SparseStringIncludesSetup);
-
- function SmiIncludesSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = i;
- target = array[array_size-1];
- }
-
- function SparseSmiIncludesSetup() {
- SmiIncludesSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- function StringIncludesSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = `Item no. ${i}`;
- target = array[array_size-1];
- }
-
- function SparseStringIncludesSetup() {
- StringIncludesSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- function DoubleIncludesSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = i;
- target = array[array_size-1];
- }
-
- function SparseDoubleIncludesSetup() {
- DoubleIncludesSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- function ObjectIncludesSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = {i};
- target = array[array_size-1];
- }
-
- function SparseObjectIncludesSetup() {
- ObjectIncludesSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- })();
diff --git a/deps/v8/test/js-perf-test/Array/index-of.js b/deps/v8/test/js-perf-test/Array/index-of.js
deleted file mode 100644
index 5e606382b1..0000000000
--- a/deps/v8/test/js-perf-test/Array/index-of.js
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-(() => {
-
- function make_indexOf() {
- return new Function('result = array.indexOf(target)');
- }
-
- createSuite('SmiIndexOf', 1000, make_indexOf(), SmiIndexOfSetup);
- createSuite('SparseSmiIndexOf', 1000, make_indexOf(), SparseSmiIndexOfSetup);
- createSuite('DoubleIndexOf', 1000, make_indexOf(), SmiIndexOfSetup);
- createSuite('SparseDoubleIndexOf', 1000, make_indexOf(), SparseSmiIndexOfSetup);
- createSuite('ObjectIndexOf', 1000, make_indexOf(), SmiIndexOfSetup);
- createSuite('SparseObjectIndexOf', 1000, make_indexOf(), SparseSmiIndexOfSetup);
- createSuite('StringIndexOf', 1000, make_indexOf(), StringIndexOfSetup);
- createSuite('SparseStringIndexOf', 1000, make_indexOf(), SparseStringIndexOfSetup);
-
- function SmiIndexOfSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = i;
- target = array[array_size-1];
- }
-
- function SparseSmiIndexOfSetup() {
- SmiIndexOfSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- function StringIndexOfSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = `Item no. ${i}`;
- target = array[array_size-1];
- }
-
- function SparseStringIndexOfSetup() {
- StringIndexOfSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- function DoubleIndexOfSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = i;
- target = array[array_size-1];
- }
-
- function SparseDoubleIndexOfSetup() {
- DoubleIndexOfSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- function ObjectIndexOfSetup() {
- array = new Array();
- for (let i = 0; i < array_size; ++i) array[i] = {i};
- target = array[array_size-1];
- }
-
- function SparseObjectIndexOfSetup() {
- ObjectIndexOfSetup();
- array.length = array.length * 2;
- target = array[array_size-1];
- }
-
- })();
diff --git a/deps/v8/test/js-perf-test/Array/run.js b/deps/v8/test/js-perf-test/Array/run.js
index a7b48f0b17..ce6c83de4c 100644
--- a/deps/v8/test/js-perf-test/Array/run.js
+++ b/deps/v8/test/js-perf-test/Array/run.js
@@ -10,7 +10,6 @@ let array;
let func = 0;
let this_arg;
let result;
-let target;
const array_size = 100;
const max_index = array_size - 1;
// Matches what {FastSetup} below produces.
@@ -142,8 +141,6 @@ d8.file.execute('join.js');
d8.file.execute('to-string.js');
d8.file.execute('slice.js');
d8.file.execute('copy-within.js');
-d8.file.execute('index-of.js')
-d8.file.execute('includes.js')
var success = true;
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/compare.js b/deps/v8/test/js-perf-test/BytecodeHandlers/compare.js
index ea12ff4b21..668ec4b7eb 100644
--- a/deps/v8/test/js-perf-test/BytecodeHandlers/compare.js
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/compare.js
@@ -16,6 +16,7 @@ addBenchmark('Number-StrictEquals-False', NumberStrictEqualsFalse);
addBenchmark('String-StrictEquals-True', StringStrictEqualsTrue);
addBenchmark('String-StrictEquals-False', StringStrictEqualsFalse);
addBenchmark('SmiString-StrictEquals', MixedStrictEquals);
+addBenchmark('Boolean-StrictEquals', BooleanStrictEquals);
addBenchmark('Smi-Equals-True', SmiEqualsTrue);
addBenchmark('Smi-Equals-False', SmiEqualsFalse);
addBenchmark('Number-Equals-True', NumberEqualsTrue);
@@ -46,6 +47,113 @@ function strictEquals(a, b) {
}
}
+function strictEqualsBoolean(a) {
+ var ret;
+ for (var i = 0; i < 1000; ++i) {
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === true) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ if (a === false) ret = true;
+ }
+ return ret;
+}
+
function equals(a, b) {
for (var i = 0; i < 1000; ++i) {
a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
@@ -104,6 +212,12 @@ function StringStrictEqualsTrue() {
strictEquals("abc", "abc");
}
+function BooleanStrictEquals() {
+ strictEqualsBoolean("a");
+ strictEqualsBoolean(true);
+ strictEqualsBoolean(false);
+}
+
function MixedStrictEquals() {
strictEquals(10, "10");
}
diff --git a/deps/v8/test/js-perf-test/JSTests2.json b/deps/v8/test/js-perf-test/JSTests2.json
index 0ef7e4cc04..0933c7da07 100644
--- a/deps/v8/test/js-perf-test/JSTests2.json
+++ b/deps/v8/test/js-perf-test/JSTests2.json
@@ -60,8 +60,7 @@
"resources": [
"filter.js", "map.js", "every.js", "join.js", "some.js", "reduce.js",
"reduce-right.js", "to-string.js", "find.js", "find-index.js",
- "from.js", "of.js", "for-each.js", "slice.js", "copy-within.js",
- "index-of.js", "includes.js"
+ "from.js", "of.js", "for-each.js", "slice.js", "copy-within.js"
],
"flags": [
"--allow-natives-syntax"
@@ -182,23 +181,7 @@
{"name": "SmiCopyWithin"},
{"name": "StringCopyWithin"},
{"name": "SparseSmiCopyWithin"},
- {"name": "SparseStringCopyWithin"},
- {"name": "SmiIndexOf"},
- {"name": "SparseSmiIndexOf"},
- {"name": "DoubleIndexOf"},
- {"name": "SparseDoubleIndexOf"},
- {"name": "ObjectIndexOf"},
- {"name": "SparseObjectIndexOf"},
- {"name": "StringIndexOf"},
- {"name": "SparseStringIncludes"},
- {"name": "SmiIncludes"},
- {"name": "SparseSmiIncludes"},
- {"name": "DoubleIncludes"},
- {"name": "SparseDoubleIncludes"},
- {"name": "ObjectIncludes"},
- {"name": "SparseObjectIncludes"},
- {"name": "StringIncludes"},
- {"name": "SparseStringIncludes"}
+ {"name": "SparseStringCopyWithin"}
]
}
]
diff --git a/deps/v8/test/js-perf-test/JSTests3.json b/deps/v8/test/js-perf-test/JSTests3.json
index 244a7e728d..ec57b96abb 100644
--- a/deps/v8/test/js-perf-test/JSTests3.json
+++ b/deps/v8/test/js-perf-test/JSTests3.json
@@ -318,6 +318,7 @@
{"name": "String-StrictEquals-True"},
{"name": "String-StrictEquals-False"},
{"name": "SmiString-StrictEquals"},
+ {"name": "Boolean-StrictEquals"},
{"name": "Smi-Equals-True"},
{"name": "Smi-Equals-False"},
{"name": "Number-Equals-True"},
diff --git a/deps/v8/test/js-perf-test/JSTests5.json b/deps/v8/test/js-perf-test/JSTests5.json
index 646625a23a..8750d5c156 100644
--- a/deps/v8/test/js-perf-test/JSTests5.json
+++ b/deps/v8/test/js-perf-test/JSTests5.json
@@ -138,6 +138,18 @@
]
},
{
+ "name": "ObjectDestructuringAssignment",
+ "path": ["ObjectDestructuringAssignment"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ObjectDestructuringAssignment\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Babel"},
+ {"name": "ForLoop"},
+ {"name": "DestructuringAssignment"}
+ ]
+ },
+ {
"name": "SpreadCallsGeneral",
"path": ["SpreadCallsGeneral"],
"main": "run.js",
diff --git a/deps/v8/test/js-perf-test/ObjectDestructuringAssignment/run.js b/deps/v8/test/js-perf-test/ObjectDestructuringAssignment/run.js
new file mode 100644
index 0000000000..44fdcdc4f1
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectDestructuringAssignment/run.js
@@ -0,0 +1,98 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const props = {
+ key: 'abc',
+ ref: 1234,
+ a: 10,
+ b: 20,
+ c: 30,
+ d: 40,
+ e: 50
+};
+
+// ----------------------------------------------------------------------------
+// Benchmark: Babel
+// ----------------------------------------------------------------------------
+
+function _objectWithoutProperties(source, excluded) {
+ var target = _objectWithoutPropertiesLoose(source, excluded);
+ var key, i;
+ var sourceSymbolKeys = Object.getOwnPropertySymbols(source);
+ for (i = 0; i < sourceSymbolKeys.length; i++) {
+ key = sourceSymbolKeys[i];
+ if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue;
+ target[key] = source[key];
+ }
+ return target;
+}
+
+function _objectWithoutPropertiesLoose(source, excluded) {
+ var target = {};
+ var sourceKeys = Object.keys(source);
+ var key, i;
+ for (i = 0; i < sourceKeys.length; i++) {
+ key = sourceKeys[i];
+ if (excluded.indexOf(key) >= 0) continue;
+ target[key] = source[key];
+ }
+ return target;
+}
+function Babel() {
+ const key = props.key;
+ const ref = props.ref;
+ const normalizedProps = _objectWithoutProperties(props, ['key', 'ref']);
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLoop
+// ----------------------------------------------------------------------------
+
+function ForLoop() {
+ const key = props.key;
+ const ref = props.ref;
+ const normalizedProps = {};
+ for (let i in props) {
+ if (i != 'key' && i != 'ref') {
+ normalizedProps[i] = props[i];
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: DestructuringAssignment
+// ----------------------------------------------------------------------------
+
+function DestructuringAssignment() {
+ const {key, ref, ...normalizedProps} = props;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+d8.file.execute('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ObjectDestructuringAssignment(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [100], [new Benchmark(name, false, false, 0, f)]);
+}
+
+CreateBenchmark('Babel', Babel);
+CreateBenchmark('ForLoop', ForLoop);
+CreateBenchmark('DestructuringAssignment', DestructuringAssignment);
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 8f7bbed28e..0a8dc1e1e0 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -87,31 +87,11 @@
}], # third_party_heap
##############################################################################
-['msan == True', {
- # Large allocations
+['not (arch == x64 and mode == release)', {
+ # Only run these known slow tests on bots that we think to be reasonably
+ # fast.
'fail/map-grow-failed': [SKIP],
'fail/set-grow-failed': [SKIP],
-}], # 'msan == True'
-
-##############################################################################
-['tsan == True', {
- # Large allocations
- 'fail/map-grow-failed': [SKIP],
- 'fail/set-grow-failed': [SKIP],
-}], # 'tsan == True'
-
-##############################################################################
-['simulator_run', {
- # Too slow on simulators
- 'fail/map-grow-failed': [SKIP],
- 'fail/set-grow-failed': [SKIP],
-}], # simulator_run
-
-##############################################################################
-['is_full_debug', {
- # Too slow in non-optimized debug mode
- 'fail/map-grow-failed': [SKIP],
- 'fail/set-grow-failed': [SKIP],
-}], # is_full_debug
+}], # not (arch == x64 and mode == release)
]
diff --git a/deps/v8/test/message/unicode-filename-🎅🎄.js b/deps/v8/test/message/unicode-filename-🎅🎄.js
new file mode 100644
index 0000000000..ceb927695c
--- /dev/null
+++ b/deps/v8/test/message/unicode-filename-🎅🎄.js
@@ -0,0 +1,5 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Merry Christmas!")
diff --git a/deps/v8/test/message/unicode-filename-🎅🎄.out b/deps/v8/test/message/unicode-filename-🎅🎄.out
new file mode 100644
index 0000000000..d7c57007af
--- /dev/null
+++ b/deps/v8/test/message/unicode-filename-🎅🎄.out
@@ -0,0 +1 @@
+Merry Christmas!
diff --git a/deps/v8/test/mjsunit/async-stack-traces-promise-all-settled.js b/deps/v8/test/mjsunit/async-stack-traces-promise-all-settled.js
new file mode 100644
index 0000000000..33a2dd5d5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces-promise-all-settled.js
@@ -0,0 +1,45 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --async-stack-traces
+
+// Basic test with Promise.allSettled().
+(function() {
+ async function fine() { }
+
+ async function thrower() {
+ await fine();
+ throw new Error();
+ }
+
+ async function driver() {
+ return await Promise.allSettled([fine(), fine(), thrower(), thrower()]);
+ }
+
+ async function test(f) {
+ const results = await f();
+ results.forEach((result, i) => {
+ if (result.status === 'rejected') {
+ const error = result.reason;
+ assertInstanceof(error, Error);
+ const stackRegexp = new RegExp("Error.+at thrower.+at " +
+ `async Promise.allSettled \\(index ${ i }\\)` +
+ ".+ at async driver.+at async test",
+ "ms")
+ assertMatches(stackRegexp, error.stack);
+ }
+ });
+ }
+
+ assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(thrower);
+ %PrepareFunctionForOptimization(driver);
+ await test(driver);
+ await test(driver);
+ %OptimizeFunctionOnNextCall(thrower);
+ await test(driver);
+ %OptimizeFunctionOnNextCall(driver);
+ await test(driver);
+ })());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1302572.js b/deps/v8/test/mjsunit/compiler/regress-1302572.js
new file mode 100644
index 0000000000..c8308a8b26
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1302572.js
@@ -0,0 +1,15 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(i) {
+ const b = i <= i;
+ return 0 + b;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(1, foo(5));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(1, foo(5));
diff --git a/deps/v8/test/mjsunit/es6/destructuring.js b/deps/v8/test/mjsunit/es6/destructuring.js
index 30df8c63bf..07b9924854 100644
--- a/deps/v8/test/mjsunit/es6/destructuring.js
+++ b/deps/v8/test/mjsunit/es6/destructuring.js
@@ -202,7 +202,7 @@
}());
-(function TestTDZInIntializers() {
+(function TestTDZInInitializers() {
'use strict';
{
let {x, y = x} = {x : 42, y : 27};
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
index 444ed0ad0c..496e2b26cd 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
@@ -175,42 +175,6 @@ tests.push(function TestConstructFromTypedArray(constr) {
}
});
-tests.push(function TestFromTypedArraySpecies(constr) {
- var b = new ArrayBuffer(16);
- var a1 = new constr(b);
-
- var constructor_read = 0;
- var cons = b.constructor;
-
- Object.defineProperty(b, 'constructor', {
- get: function() {
- constructor_read++;
- return cons;
- }
- });
-
- var a2 = new constr(a1);
-
- assertEquals(1, constructor_read);
-});
-
-tests.push(function TestFromTypedArraySpeciesDetachsBuffer(constr) {
- var b = new ArrayBuffer(16);
- var a1 = new constr(b);
-
- var constructor_read = 0;
- var cons = b.constructor;
-
- Object.defineProperty(b, 'constructor', {
- get: function() {
- %ArrayBufferDetach(b);
- return cons;
- }
- });
-
- assertThrows(() => new constr(a1));
-});
-
tests.push(function TestTypedArrayMaxLength(constr) {
var myObject = { 0: 5, 1: 6, length: %TypedArrayMaxLength() + 1 };
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index d5aecce519..f50db4bad6 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -642,7 +642,7 @@ function TestTypedArraySet() {
return 1;
}
};
- assertThrows(() => a111.set(evilarr), TypeError);
+ a111.set(evilarr);
assertEquals(true, detached);
// Check if the target is a typed array before converting offset to integer
diff --git a/deps/v8/test/mjsunit/harmony/array-prototype-groupby-fast-path-assumptions.js b/deps/v8/test/mjsunit/harmony/array-prototype-groupby-fast-path-assumptions.js
new file mode 100644
index 0000000000..6ed231c15c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-prototype-groupby-fast-path-assumptions.js
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-array-grouping
+
+// Test OOB indexing on fast path.
+let arr1 = [];
+for (let i = 0; i < 32; i++) arr1.push(i);
+let popped = false;
+let grouped1 = arr1.groupBy(() => {
+ // Pop all of the elements to trigger right-trimming of the elements
+ // FixedArray.
+ for (let i = 0, len = arr1.length; i < len; i++) {
+ arr1.pop();
+ }
+});
+// 'undefined' is the only group.
+assertArrayEquals(['undefined'], Object.getOwnPropertyNames(grouped1));
+// 0 the only value in the group because the grouping function pops the entire
+// array.
+let expectedGrouped1 = [0];
+for (let i = 1; i < 32; i++) expectedGrouped1.push(undefined);
+assertArrayEquals(expectedGrouped1, grouped1['undefined']);
+
+// Test result ElementsKind deduction on fast path.
+//
+// Initial Smi array, but due to length truncation result is not a Smi array.
+let arr2 = [0,1,2,3,4,5,6,7,8,9];
+let grouped2 = arr2.groupBy(() => { arr2.length = 2; });
+// 'undefined' is the only group.
+assertArrayEquals(['undefined'], Object.getOwnPropertyNames(grouped2));
+// 0,1 are the only values in the group because the source array gets truncated
+// to length 2.
+let expectedGrouped2 = [0,1];
+for (let i = 2; i < 10; i++) expectedGrouped2.push(undefined);
+assertArrayEquals(expectedGrouped2, grouped2['undefined']);
diff --git a/deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy-no-lazy-feedback.js b/deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy-no-lazy-feedback.js
new file mode 100644
index 0000000000..64dfb7be88
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy-no-lazy-feedback.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+
+d8.file.execute('test/mjsunit/harmony/index-fields-nonextensible-global-proxy.js');
diff --git a/deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy.js b/deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy.js
new file mode 100644
index 0000000000..ea7a5b3f8c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/index-fields-nonextensible-global-proxy.js
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Base {
+ constructor(arg) {
+ return arg;
+ }
+}
+
+class ClassNonExtensibleWithIndexField extends Base {
+ [0] = (() => {
+ Object.preventExtensions(this);
+ return 'defined';
+ })();
+ ['nonExtensible'] = 4;
+ constructor(arg) {
+ super(arg);
+ }
+}
+
+assertThrows(() => {
+ new ClassNonExtensibleWithIndexField(globalThis);
+}, TypeError, /Cannot define property 0, object is not extensible/);
+assertEquals("undefined", typeof nonExtensible);
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy-no-lazy-feedback.js b/deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy-no-lazy-feedback.js
new file mode 100644
index 0000000000..2cfc56195a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy-no-lazy-feedback.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+
+d8.file.execute('test/mjsunit/harmony/private-fields-nonextensible-global-proxy.js');
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy.js b/deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy.js
new file mode 100644
index 0000000000..8858951ef9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-fields-nonextensible-global-proxy.js
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Base {
+ constructor(arg) {
+ return arg;
+ }
+}
+
+class ClassNonExtensibleWithPrivateField extends Base {
+ #privateField = (() => {
+ Object.preventExtensions(this);
+ return "defined";
+ })();
+ // In case the object has a null prototype, we'll use a static
+ // method to access the field.
+ static getPrivateField(obj) { return obj.#privateField; }
+ constructor(arg) {
+ super(arg);
+ }
+}
+
+new ClassNonExtensibleWithPrivateField(globalThis);
+assertEquals("defined", ClassNonExtensibleWithPrivateField.getPrivateField(globalThis));
diff --git a/deps/v8/test/mjsunit/harmony/private-reference-logical-assignment-short-circuit.js b/deps/v8/test/mjsunit/harmony/private-reference-logical-assignment-short-circuit.js
new file mode 100644
index 0000000000..d1f31733f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-reference-logical-assignment-short-circuit.js
@@ -0,0 +1,135 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+function doNotCall() {
+ throw new Error("The right-hand side should not be evaluated");
+}
+
+{
+ class C {
+ get #readOnlyFalse() {
+ return false;
+ }
+
+ get #readOnlyTrue() {
+ return true;
+ }
+
+ get #readOnlyOne() {
+ return 1;
+ }
+
+ get #readOnlyNull() {
+ return null;
+ }
+
+ get #readOnlyUndefined() {
+ return undefined;
+ }
+
+ shortCircuitedAndFalse() {
+ return this.#readOnlyFalse &&= doNotCall();
+ }
+
+ shortCircuitedOrTrue() {
+ return this.#readOnlyTrue ||= doNotCall();
+ }
+
+ shortCircuitedNullishOne() {
+ return this.#readOnlyOne ??= doNotCall();
+ }
+
+ andAssignReadOnly() {
+ return this.#readOnlyTrue &&= 1;
+ }
+
+ orAssignReadOnly() {
+ return this.#readOnlyFalse ||= 0;
+ }
+
+ nullishAssignReadOnlyNull() {
+ return this.#readOnlyNull ??= 1;
+ }
+
+ nullishAssignReadOnlyUndefined() {
+ return this.#readOnlyUndefined ??= 1;
+ }
+ }
+
+ const o = new C();
+ assertEquals(
+ o.shortCircuitedAndFalse(),
+ false,
+ "The expression should evaluate to the short-circuited value");
+ assertEquals(
+ o.shortCircuitedOrTrue(),
+ true,
+ "The expression should evaluate to the short-circuited value");
+ assertEquals(
+ o.shortCircuitedNullishOne(),
+ 1,
+ "The expression should evaluate to the short-circuited value");
+
+ assertThrows(
+ () => o.andAssignReadOnly(),
+ TypeError,
+ /'#readOnlyTrue' was defined without a setter/
+ );
+ assertThrows(
+ () => o.orAssignReadOnly(),
+ TypeError,
+ /'#readOnlyFalse' was defined without a setter/
+ );
+ assertThrows(
+ () => o.nullishAssignReadOnlyNull(),
+ TypeError,
+ /'#readOnlyNull' was defined without a setter/
+ );
+ assertThrows(
+ () => o.nullishAssignReadOnlyUndefined(),
+ TypeError,
+ /'#readOnlyUndefined' was defined without a setter/
+ );
+}
+
+{
+ class C {
+ #privateMethod() { }
+
+ getPrivateMethod() {
+ return this.#privateMethod;
+ }
+
+ shortCircuitedNullishPrivateMethod() {
+ return this.#privateMethod ??= doNotCall();
+ }
+
+ shortCircuitedOrPrivateMethod() {
+ return this.#privateMethod ||= doNotCall();
+ }
+
+ andAssignReadOnly() {
+ return this.#privateMethod &&= 1;
+ }
+ }
+
+ const o = new C();
+ assertEquals(
+ o.shortCircuitedNullishPrivateMethod(),
+ o.getPrivateMethod(),
+ "The expression should evaluate to the short-circuited value");
+
+ assertEquals(
+ o.shortCircuitedNullishPrivateMethod(),
+ o.getPrivateMethod(),
+ "The expression should evaluate to the short-circuited value");
+
+ assertThrows(
+ () => o.andAssignReadOnly(),
+ TypeError,
+ /Private method '#privateMethod' is not writable/
+ );
+}
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js b/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
index a6709f210e..cabad58e7e 100644
--- a/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
@@ -55,10 +55,8 @@ revoke();
assertThrows(() => wrapped(), TypeError, "Cannot perform 'apply' on a proxy that has been revoked");
// revoked proxy
-var wrapped = shadowRealm.evaluate(`
+assertThrows(() => shadowRealm.evaluate(`
var revocable = Proxy.revocable(() => 1, {});
revocable.revoke();
revocable.proxy;
-`);
-var revoke = shadowRealm.evaluate('globalThis.revoke');
-assertThrows(() => wrapped(), TypeError, "Cannot perform 'apply' on a proxy that has been revoked");
+`), TypeError, "Cannot wrap target callable");
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-bind.js b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-bind.js
new file mode 100644
index 0000000000..388df6aca0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-bind.js
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-shadow-realm
+
+const shadowRealm = new ShadowRealm();
+
+// bind
+var wrapped = shadowRealm.evaluate('function foo(bar, quz) {}; foo');
+assertEquals(wrapped.name, 'foo');
+assertEquals(wrapped.length, 2);
+
+var bound = wrapped.bind(undefined, 'bar');
+assertEquals(bound.name, 'bound foo');
+assertEquals(bound.length, 1);
+
+// proxy
+var wrapped = shadowRealm.evaluate('function foo(bar, quz) {}; foo');
+assertEquals(wrapped.name, 'foo');
+assertEquals(wrapped.length, 2);
+
+var proxy = new Proxy(wrapped, {});
+assertEquals(proxy.name, 'foo');
+assertEquals(proxy.length, 2);
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props-stack.js b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props-stack.js
new file mode 100644
index 0000000000..9ce783175c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props-stack.js
@@ -0,0 +1,18 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-shadow-realm
+
+(function TestWrappedFunctionNameStackOverflow() {
+ const shadowRealm = new ShadowRealm();
+ let otherBind = shadowRealm.evaluate('function foo(fn) { return fn.bind(1); }; foo');
+
+ let fn = () => {};
+ for(let i = 0; i < 1024 * 50; i++) {
+ fn = otherBind(fn.bind(1));
+ }
+ assertThrows(() => {
+ fn.name;
+ }, RangeError, 'Maximum call stack size exceeded');
+})();
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props.js b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props.js
new file mode 100644
index 0000000000..1417ec5476
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function-props.js
@@ -0,0 +1,121 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-shadow-realm --allow-natives-syntax
+
+// Test wrapped function returned from ShadowRealm.prototype.evaluate
+function shadowRealmEvaluate(sourceText) {
+ var shadowRealm = new ShadowRealm();
+ shadowRealm.evaluate(`function makeSlow(o) {
+ for (var i = 0; i < 1200; i++) {
+ o["o"+i] = 0;
+ }
+ if (%HasFastProperties(o)) {
+ throw new Error('o should be slow');
+ }
+ return o;
+ }`);
+ return shadowRealm.evaluate(sourceText);
+}
+
+// Test wrapped function returned from WrappedFunction.[[Call]]
+function wrappedFunctionEvaluate(sourceText) {
+ var shadowRealm = new ShadowRealm();
+ shadowRealm.evaluate(`function makeSlow(o) {
+ for (var i = 0; i < 1200; i++) {
+ o["o"+i] = 0;
+ }
+ if (%HasFastProperties(o)) {
+ throw new Error('o should be slow');
+ }
+ return o;
+ }`);
+ // Create a wrapped function from sourceText in the shadow realm and return it.
+ return shadowRealm.evaluate('text => eval(text)')(sourceText);
+}
+
+suite(shadowRealmEvaluate);
+suite(wrappedFunctionEvaluate);
+
+function suite(evaluate) {
+ // function
+ var wrapped = evaluate('function foo() {}; foo');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 0);
+ // The properties should be accessor infos.
+ assertTrue(%HasFastProperties(wrapped));
+
+ var wrapped = evaluate('function foo(bar) {}; foo');
+ assertEquals(wrapped.length, 1);
+
+ // builtin function
+ var wrapped = evaluate('String.prototype.substring');
+ assertEquals(wrapped.name, 'substring');
+ assertEquals(wrapped.length, 2);
+
+ // callable proxy
+ var wrapped = evaluate('new Proxy(function foo(arg) {}, {})');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 1);
+
+ // nested callable proxy
+ var wrapped = evaluate('new Proxy(new Proxy(new Proxy(function foo(arg) {}, {}), {}), {})');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 1);
+
+ // bound function
+ var wrapped = evaluate('(function foo(arg) { return this.a }).bind({ a: 1 })');
+ assertEquals(wrapped.name, 'bound foo');
+ assertEquals(wrapped.length, 1);
+
+ // nested bound function
+ var wrapped = evaluate('(function foo(arg) { return this.a }).bind({ a: 1 }).bind().bind()');
+ assertEquals(wrapped.name, 'bound bound bound foo');
+ assertEquals(wrapped.length, 1);
+
+ // bound function with args
+ var wrapped = evaluate('(function foo(arg1, arg2) { return this.a }).bind({ a: 1 }, 1)');
+ assertEquals(wrapped.name, 'bound foo');
+ assertEquals(wrapped.length, 1);
+
+ // function with length modified
+ var wrapped = evaluate('function foo(arg) {}; Object.defineProperty(foo, "length", {value: 123}); foo');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 123);
+
+ var wrapped = evaluate('function foo(arg) {}; Object.defineProperty(foo, "length", {value: "123"}); foo');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 0);
+
+ var wrapped = evaluate('function foo(arg) {}; delete foo.length; foo');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 0);
+
+ var wrapped = evaluate('function foo(arg) {}; Object.defineProperty(foo, "length", {value: 123}); makeSlow(foo)');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 123);
+
+ // function with name modified
+ var wrapped = evaluate('function foo(arg) {}; Object.defineProperty(foo, "name", {value: "bar"}); foo');
+ assertEquals(wrapped.name, 'bar');
+ assertEquals(wrapped.length, 1);
+
+ var wrapped = evaluate('function foo(arg) {}; Object.defineProperty(foo, "name", {value: new String("bar")}); foo');
+ assertEquals(wrapped.name, '');
+ assertEquals(wrapped.length, 1);
+
+ var wrapped = evaluate('function foo(arg) {}; delete foo.name; foo');
+ assertEquals(wrapped.name, '');
+ assertEquals(wrapped.length, 1);
+
+ // function with prototype modified
+ var wrapped = evaluate('function foo(arg) {}; Object.setPrototypeOf(foo, Object); foo');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 1);
+
+ // function with additional properties
+ var wrapped = evaluate('function foo(arg) {}; foo.bar = 123; foo');
+ assertEquals(wrapped.name, 'foo');
+ assertEquals(wrapped.length, 1);
+}
diff --git a/deps/v8/test/mjsunit/harmony/typedarray-set-length-detach.js b/deps/v8/test/mjsunit/harmony/typedarray-set-length-detach.js
deleted file mode 100644
index 4f1d588ee7..0000000000
--- a/deps/v8/test/mjsunit/harmony/typedarray-set-length-detach.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-
-let ta = new Int32Array(10);
-assertThrows(() => {
- ta.set({
- get length() {
- %ArrayBufferDetach(ta.buffer);
- return 1;
- },
- get 0() {
- return 100;
- },
- });
-}, TypeError);
diff --git a/deps/v8/test/mjsunit/maglev/19.js b/deps/v8/test/mjsunit/maglev/19.js
new file mode 100644
index 0000000000..ea2205986c
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/19.js
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function f(i) {
+ var j;
+ var o;
+ if (i) {
+ } else {
+ if (j) {
+ } else {
+ }
+ }
+ return o;
+}
+
+%PrepareFunctionForOptimization(f);
+f(false, true);
+
+%OptimizeMaglevOnNextCall(f);
+f(false, true);
+f(false, true);
+f(false, true);
diff --git a/deps/v8/test/mjsunit/maglev/add-smi.js b/deps/v8/test/mjsunit/maglev/add-smi.js
new file mode 100644
index 0000000000..71d809aa77
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/add-smi.js
@@ -0,0 +1,41 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev --no-stress-opt
+
+// Checks Smi add operation and deopt while untagging.
+(function() {
+ function add(x, y) {
+ return x + y;
+ }
+
+ %PrepareFunctionForOptimization(add);
+ assertEquals(3, add(1, 2));
+
+ %OptimizeMaglevOnNextCall(add);
+ assertEquals(3, add(1, 2));
+ assertTrue(isMaglevved(add));
+
+ // We should deopt here in SmiUntag.
+ assertEquals(0x40000000, add(1, 0x3FFFFFFF));
+ assertFalse(isMaglevved(add));
+})();
+
+// Checks when we deopt due to tagging.
+(function() {
+ function add(x, y) {
+ return x + y;
+ }
+
+ %PrepareFunctionForOptimization(add);
+ assertEquals(3, add(1, 2));
+
+ %OptimizeMaglevOnNextCall(add);
+ assertEquals(3, add(1, 2));
+ assertTrue(isMaglevved(add));
+
+ // We should deopt here in SmiTag.
+ assertEquals(3.2, add(1.2, 2));
+ assertFalse(isMaglevved(add));
+})();
diff --git a/deps/v8/test/mjsunit/maglev/argument-over-under-application.js b/deps/v8/test/mjsunit/maglev/argument-over-under-application.js
new file mode 100644
index 0000000000..b6c85e381f
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/argument-over-under-application.js
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --maglev --allow-natives-syntax
+
+function f(x) {
+ return x;
+}
+
+%PrepareFunctionForOptimization(f);
+// f(x) takes one argument but we are under-applying here
+assertEquals(undefined, f());
+// f(x) takes one argument but we are over-applying here
+assertEquals(1, f(1, 2));
+
+%OptimizeMaglevOnNextCall(f);
+// f(x) takes one argument but we are under-applying here
+assertEquals(undefined, f());
+// f(x) takes one argument but we are over-applying here
+assertEquals(1, f(1, 2));
diff --git a/deps/v8/test/mjsunit/maglev/lazy-deopt-with-onstack-activation.js b/deps/v8/test/mjsunit/maglev/lazy-deopt-with-onstack-activation.js
new file mode 100644
index 0000000000..4b57dd2745
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/lazy-deopt-with-onstack-activation.js
@@ -0,0 +1,33 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev --no-always-opt
+
+var x = 1;
+var do_change = false;
+
+function g() {
+ if (do_change) {
+ x = 2;
+ return 40;
+ }
+ return 30;
+}
+
+function f() {
+ return g() + x;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(31, f());
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(31, f());
+assertTrue(isMaglevved(f));
+
+// Trigger a lazy deopt on the next g() call.
+do_change = true;
+assertEquals(42, f());
+assertFalse(isMaglevved(f));
+assertUnoptimized(f);
diff --git a/deps/v8/test/mjsunit/maglev/lazy-deopt-without-onstack-activation.js b/deps/v8/test/mjsunit/maglev/lazy-deopt-without-onstack-activation.js
new file mode 100644
index 0000000000..82fdd2f29a
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/lazy-deopt-without-onstack-activation.js
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev --no-always-opt
+
+var x = 1;
+
+function f(o) {
+ return x;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(1, f());
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(1, f());
+assertTrue(isMaglevved(f));
+
+// Trigger a lazy deopt now, so that f() deopts on its next call.
+x = 2;
+assertEquals(2, f());
+assertFalse(isMaglevved(f));
+assertUnoptimized(f);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index ae8816c6b9..4c5b2a4141 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -175,18 +175,19 @@ var V8OptimizationStatus = {
kAlwaysOptimize: 1 << 2,
kMaybeDeopted: 1 << 3,
kOptimized: 1 << 4,
- kTurboFanned: 1 << 5,
- kInterpreted: 1 << 6,
- kMarkedForOptimization: 1 << 7,
- kMarkedForConcurrentOptimization: 1 << 8,
- kOptimizingConcurrently: 1 << 9,
- kIsExecuting: 1 << 10,
- kTopmostFrameIsTurboFanned: 1 << 11,
- kLiteMode: 1 << 12,
- kMarkedForDeoptimization: 1 << 13,
- kBaseline: 1 << 14,
- kTopmostFrameIsInterpreted: 1 << 15,
- kTopmostFrameIsBaseline: 1 << 16,
+ kMaglevved: 1 << 5,
+ kTurboFanned: 1 << 6,
+ kInterpreted: 1 << 7,
+ kMarkedForOptimization: 1 << 8,
+ kMarkedForConcurrentOptimization: 1 << 9,
+ kOptimizingConcurrently: 1 << 10,
+ kIsExecuting: 1 << 11,
+ kTopmostFrameIsTurboFanned: 1 << 12,
+ kLiteMode: 1 << 13,
+ kMarkedForDeoptimization: 1 << 14,
+ kBaseline: 1 << 15,
+ kTopmostFrameIsInterpreted: 1 << 16,
+ kTopmostFrameIsBaseline: 1 << 17,
};
// Returns true if --lite-mode is on and we can't ever turn on optimization.
@@ -210,6 +211,9 @@ var isUnoptimized;
// Returns true if given function is optimized.
var isOptimized;
+// Returns true if given function is compiled by Maglev.
+var isMaglevved;
+
// Returns true if given function is compiled by TurboFan.
var isTurboFanned;
@@ -781,6 +785,14 @@ var prettyPrinted;
return (opt_status & V8OptimizationStatus.kOptimized) !== 0;
}
+ isMaglevved = function isMaglevved(fun) {
+ var opt_status = OptimizationStatus(fun, "");
+ assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
+ "not a function");
+ return (opt_status & V8OptimizationStatus.kOptimized) !== 0 &&
+ (opt_status & V8OptimizationStatus.kMaglevved) !== 0;
+ }
+
isTurboFanned = function isTurboFanned(fun) {
var opt_status = OptimizationStatus(fun, "");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 61e59c67e6..7e57621350 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -48,25 +48,14 @@
'temporal/calendar-date-from-fields': [FAIL],
'temporal/calendar-date-until': [FAIL],
'temporal/calendar-day': [FAIL],
- 'temporal/calendar-day-of-week': [FAIL],
- 'temporal/calendar-day-of-year': [FAIL],
- 'temporal/calendar-days-in-month': [FAIL],
- 'temporal/calendar-days-in-week': [FAIL],
- 'temporal/calendar-days-in-year': [FAIL],
'temporal/calendar-fields': [FAIL],
- 'temporal/calendar-in-leap-year': [FAIL],
- 'temporal/calendar-merge-fields': [FAIL],
'temporal/calendar-month': [FAIL],
'temporal/calendar-month-code': [FAIL],
'temporal/calendar-month-day-from-fields': [FAIL],
- 'temporal/calendar-months-in-year': [FAIL],
'temporal/calendar-week-of-year': [FAIL],
- 'temporal/calendar-year': [FAIL],
'temporal/calendar-year-month-from-fields': [FAIL],
- 'temporal/duration-abs': [FAIL],
'temporal/duration-add': [FAIL],
'temporal/duration-from': [FAIL],
- 'temporal/duration-negated': [FAIL],
'temporal/duration-to-json': [FAIL],
'temporal/duration-with': [FAIL],
'temporal/instant-add': [FAIL],
@@ -143,10 +132,13 @@
# https://crbug.com/v8/10948
'wasm/atomics': [PASS, ['arch == arm and not simulator_run', SKIP]],
- # crbug.com/v8/12472 Stack overflow during regexp node generation.
+ # https://crbug.com/v8/12472 Stack overflow during regexp node generation.
'regress/regress-crbug-595657': [SKIP],
'regress/regress-475705': [SKIP],
+ # https://crbug.com/v8/12697
+ 'math-abs': [SKIP],
+
##############################################################################
# Tests where variants make no sense.
'd8/enable-tracing': [PASS, NO_VARIANTS],
@@ -274,9 +266,9 @@
# TODO(v8:10915): Fails with --future.
'harmony/weakrefs/stress-finalizationregistry-dirty-enqueue': [SKIP],
- # BUG(v8:12645)
- 'shared-memory/shared-struct-workers': [SKIP],
- 'shared-memory/shared-struct-atomics-workers': [SKIP],
+ # Needs deterministic test helpers for concurrent maglev tiering.
+ # TODO(jgruber,v8:7700): Implement ASAP.
+ 'maglev/18': [SKIP],
}], # ALWAYS
##############################################################################
@@ -366,7 +358,6 @@
'compiler/array-multiple-receiver-maps': [SKIP],
# Tests taking too long
'regress/regress-1122': [SKIP],
- 'regress/regress-331444': [SKIP],
'regress/regress-353551': [SKIP],
'regress/regress-crbug-119926': [SKIP],
'regress/short-circuit': [SKIP],
@@ -526,7 +517,6 @@
['is_full_debug', {
# Tests too slow in non-optimized debug mode.
'regress/regress-2790': [SKIP],
- 'regress/regress-331444': [SKIP],
'regress/regress-740784': [SKIP],
'regress/regress-992389': [SKIP],
}], # 'is_full_debug'
@@ -601,7 +591,6 @@
'math-floor-of-div-nosudiv': [PASS, SLOW],
'messages': [PASS, SLOW],
'regress/regress-2790': [PASS, SLOW],
- 'regress/regress-331444': [PASS, SLOW],
'regress/regress-490': [PASS, SLOW],
'regress/regress-crbug-217858': [PASS, SLOW],
'regress/regress-create-exception': [PASS, SLOW],
@@ -1217,7 +1206,6 @@
'regress/regress-crbug-482998': [SKIP],
'regress/regress-91008': [PASS, SLOW],
'regress/regress-779407': [PASS, SLOW],
- 'regress/regress-331444': [PASS, SLOW],
'harmony/regexp-property-lu-ui': [PASS, SLOW],
'whitespaces': [PASS, SLOW],
'generated-transition-stub': [PASS, SLOW],
@@ -1507,6 +1495,14 @@
}], # variant == stress_concurrent_allocation
##############################################################################
+['variant == maglev', {
+ # TODO(v8:7700): These tests assume that optimization always succeed.
+ # Change this when maglev support all bytecodes.
+ 'interrupt-budget-override': [SKIP],
+ 'never-optimize': [SKIP],
+}], # variant == maglev
+
+##############################################################################
['no_simd_hardware == True', {
'wasm/exceptions-simd': [SKIP],
'wasm/liftoff-simd-params': [SKIP],
@@ -1544,6 +1540,7 @@
'regress/wasm/regress-1283395': [SKIP],
'regress/wasm/regress-1289678': [SKIP],
'regress/wasm/regress-1290079': [SKIP],
+ 'regress/wasm/regress-1299183': [SKIP],
}], # no_simd_hardware == True
##############################################################################
@@ -1553,7 +1550,7 @@
##############################################################################
# TODO(v8:11421): Port baseline compiler to other architectures.
-['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, loong64, s390x)', {
+['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, loong64, s390x) or (arch == s390x and pointer_compression)', {
'baseline/*': [SKIP],
'regress/regress-1242306': [SKIP],
}],
@@ -1693,7 +1690,6 @@
'wasm/memory_2gb_oob': [SKIP],
'wasm/memory_1gb_oob': [SKIP],
'wasm/memory_4gb_oob': [SKIP],
- 'regress/regress-331444': [SKIP],
'regress/regress-708247': [SKIP],
# Performs GC
'code-coverage-precise': [SKIP],
diff --git a/deps/v8/test/mjsunit/optimized-array-includes.js b/deps/v8/test/mjsunit/optimized-array-includes.js
deleted file mode 100644
index a38b2e15af..0000000000
--- a/deps/v8/test/mjsunit/optimized-array-includes.js
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
-// Flags: --no-always-opt
-
-// normal case
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, 0);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- testArrayIncludes();
- assertOptimized(testArrayIncludes);
-})();
-
-// from_index is not smi will lead to bailout
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, {
- valueOf: () => {
- return 0;
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Length change detected during get from_index, will bailout
-(() => {
- let called_values;
- function testArrayIncludes(deopt) {
- const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- return a.includes(9, {
- valueOf: () => {
- if (deopt) {
- a.length = 3;
- }
- return 0;
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- assertEquals(true, testArrayIncludes());
- testArrayIncludes();
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- assertEquals(true, testArrayIncludes());
- assertEquals(false, testArrayIncludes(true));
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Input array change during get from_index, will bailout
-(() => {
- const a = [1, 2, 3, 4, 5];
- function testArrayIncludes() {
- return a.includes(9, {
- valueOf: () => {
- a[0] = 9;
- return 0;
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- assertEquals(true, testArrayIncludes());
- testArrayIncludes();
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- assertEquals(true, testArrayIncludes());
- assertEquals(true, testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Handle from_index is undefined, will bail out
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, undefined);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Handle from_index is null, will bail out
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, undefined);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Handle from_index is float, will bail out
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, 0.5);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Handle from_index is symbol, will throw
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, Symbol.for('123'));
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- assertThrows(() => testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- assertThrows(() => testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Handle from_index is string, will bailout
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, '0');
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes()
- assertEquals(true, testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Handle from_index is object which cannot convert to smi, will throw
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20, {
- valueOf: () => {
- return Symbol.for('123')
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- assertThrows(() => testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- assertThrows(() => testArrayIncludes());
- assertFalse(isOptimized(testArrayIncludes));
-})();
-
-// Handle input array is smi packed elements and search_element is number
-// , will be inlined
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIncludes() {
- return a.includes(20);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-})();
-
-// Handle input array is double packed elements, will be inlined
-(() => {
- const a = [
- 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
- ];
- function testArrayIncludes() {
- return a.includes(20.5);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-})();
-
-// Handle input array is double packed elements and has NaN, will be inlined
-(() => {
- const a = [
- NaN, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
- ];
- function testArrayIncludes() {
- return a.includes(NaN);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-
-})();
-
-// Handle input array is packed elements, will reach slow path
-(() => {
- const a = [
- 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIncludes() {
- return a.includes(20.5);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-
-})();
-
-
-// Handle input array is packed elements, will be inlined
-(() => {
- const obj = {}
- const a = [
- 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, obj
- ];
- function testArrayIncludes() {
- return a.includes(obj);
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-
-})();
-
-
-// Handle input array is packed elements and search_element is symbol
-(() => {
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIncludes() {
- return a.includes(Symbol.for("123"));
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-
-})();
-
-// Handle input array is packed elements and search_element is BigInt
-(() => {
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIncludes() {
- return a.includes(BigInt(123));
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-
-})();
-
-// Handle input array is packed elements and search_element is string
-(() => {
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIncludes() {
- return a.includes("4.5");
- }
- %PrepareFunctionForOptimization(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- %OptimizeFunctionOnNextCall(testArrayIncludes);
- testArrayIncludes();
- assertEquals(true, testArrayIncludes());
- assertOptimized(testArrayIncludes);
-})();
diff --git a/deps/v8/test/mjsunit/optimized-array-indexof.js b/deps/v8/test/mjsunit/optimized-array-indexof.js
deleted file mode 100644
index d0fe067a6a..0000000000
--- a/deps/v8/test/mjsunit/optimized-array-indexof.js
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
-// Flags: --no-always-opt
-
-
-// normal case
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, 0);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- testArrayIndexOf();
- assertOptimized(testArrayIndexOf);
-})();
-
-// from_index is not smi will lead to bailout
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, {
- valueOf: () => {
- return 0;
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Length change detected during get from_index, will bailout
-(() => {
- let called_values;
- function testArrayIndexOf(deopt) {
- const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- return a.indexOf(9, {
- valueOf: () => {
- if (deopt) {
- a.length = 3;
- }
- return 0;
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- assertEquals(8, testArrayIndexOf());
- testArrayIndexOf();
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- assertEquals(8, testArrayIndexOf());
- assertEquals(-1, testArrayIndexOf(true));
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Input array change during get from_index, will bailout
-(() => {
- function testArrayIndexOf(deopt) {
- const a = [1, 2, 3, 4, 5];
- return a.indexOf(9, {
- valueOf: () => {
- if (deopt) {
- a[0] = 9;
- }
- return 0;
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- assertEquals(-1, testArrayIndexOf());
- testArrayIndexOf();
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- assertEquals(0, testArrayIndexOf(true));
- assertEquals(-1, testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Handle from_index is undefined, will bail out
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, undefined);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Handle from_index is null, will bail out
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, undefined);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Handle from_index is float, will bail out
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, 0.5);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Handle from_index is symbol, will throw
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, Symbol.for('123'));
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- assertThrows(() => testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- assertThrows(() => testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Handle from_index is string, will bailout
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, '0');
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf()
- assertEquals(19, testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Handle from_index is object which cannot convert to smi, will throw
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20, {
- valueOf: () => {
- return Symbol.for('123')
- }
- });
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- assertThrows(() => testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- assertThrows(() => testArrayIndexOf());
- assertFalse(isOptimized(testArrayIndexOf));
-})();
-
-// Handle input array is smi packed elements and search_element is number
-// , will be inlined
-(() => {
- const a = [
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
- ];
- function testArrayIndexOf() {
- return a.indexOf(20);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
-
-// Handle input array is double packed elements, will be inlined
-(() => {
- const a = [
- 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
- ];
- function testArrayIndexOf() {
- return a.indexOf(20.5);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
-
-// Handle input array is double packed elements and has NaN, will be inlined
-(() => {
- const a = [
- NaN, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, 25.5
- ];
- function testArrayIndexOf() {
- return a.indexOf(NaN);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(-1, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(-1, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
-
-// Handle input array is packed elements and search_element is double,
-// will be inlined
-(() => {
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIndexOf() {
- return a.indexOf(20.5);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(19, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
-
-
-// Handle input array is packed elements and search_element is object,
-// will be inlined
-(() => {
- const obj = {}
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, obj
- ];
- function testArrayIndexOf() {
- return a.indexOf(obj);
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(24, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(24, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
-
-// Handle input array is packed elements and search_element is symbol,
-// will be inlined
-(() => {
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIndexOf() {
- return a.indexOf(Symbol.for("123"));
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(2, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(2, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
-
-// Handle input array is packed elements and search_element is BigInt,
-// will be inlined
-(() => {
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIndexOf() {
- return a.indexOf(BigInt(123));
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(4, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(4, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
-
-// Handle input array is packed elements and search_element is string,
-// will be inlined
-(() => {
- const a = [
- 1.5, 2.5, Symbol.for("123"), "4.5", BigInt(123), 6.5, 7.5, 8.5, 9.5,
- 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5,
- 19.5, 20.5, 21.5, 22.5, 23.5, 24.5, {}
- ];
- function testArrayIndexOf() {
- return a.indexOf("4.5");
- }
- %PrepareFunctionForOptimization(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(3, testArrayIndexOf());
- %OptimizeFunctionOnNextCall(testArrayIndexOf);
- testArrayIndexOf();
- assertEquals(3, testArrayIndexOf());
- assertOptimized(testArrayIndexOf);
-})();
diff --git a/deps/v8/test/mjsunit/optimized-string-includes.js b/deps/v8/test/mjsunit/optimized-string-includes.js
new file mode 100644
index 0000000000..09375d1d73
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-string-includes.js
@@ -0,0 +1,152 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+(function optimize() {
+ function f() {
+ return 'abc'.includes('a');
+ }
+ %PrepareFunctionForOptimization(f);
+ assertEquals(true, f());
+ assertEquals(true, f());
+ assertEquals(true, f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(true, f());
+ assertTrue(isOptimized(f));
+
+ function f2() {
+ return 'abc'.includes('a', 1);
+ }
+ %PrepareFunctionForOptimization(f2);
+ assertEquals(false, f2());
+ assertEquals(false, f2());
+ assertEquals(false, f2());
+ %OptimizeFunctionOnNextCall(f2);
+ assertEquals(false, f2());
+ assertTrue(isOptimized(f2));
+
+ function f3() {
+ return 'abc'.includes('b');
+ }
+ %PrepareFunctionForOptimization(f3);
+ assertEquals(true, f3());
+ assertEquals(true, f3());
+ assertEquals(true, f3());
+ %OptimizeFunctionOnNextCall(f3);
+ assertEquals(true, f3());
+ assertTrue(isOptimized(f3));
+
+ function f4() {
+ return 'abcbc'.includes('bc', 2);
+ }
+ %PrepareFunctionForOptimization(f4);
+ assertEquals(true, f4());
+ assertEquals(true, f4());
+ assertEquals(true, f4());
+ %OptimizeFunctionOnNextCall(f4);
+ assertEquals(true, f4());
+ assertTrue(isOptimized(f4));
+
+ function f5() {
+ return 'abcbc'.includes('b', -1);
+ }
+ %PrepareFunctionForOptimization(f5);
+ assertEquals(true, f5());
+ assertEquals(true, f5());
+ assertEquals(true, f5());
+ %OptimizeFunctionOnNextCall(f5);
+ assertEquals(true, f5());
+ assertTrue(isOptimized(f5));
+
+ function f6() {
+ return 'abcbc'.includes('b', -10737418);
+ }
+ %PrepareFunctionForOptimization(f6);
+ assertEquals(true, f6());
+ assertEquals(true, f6());
+ assertEquals(true, f6());
+ %OptimizeFunctionOnNextCall(f6);
+ assertEquals(true, f6());
+ assertTrue(isOptimized(f6));
+})();
+
+(function optimizeOSR() {
+ function f() {
+ var result;
+ for (var i = 0; i < 100000; i++) {
+ result = 'abc'.includes('a');
+ }
+ return result;
+ }
+ assertEquals(true, f());
+
+ function f2() {
+ var result;
+ for (var i = 0; i < 100000; i++) {
+ result = 'abc'.includes('a', 1);
+ }
+ return result;
+ }
+ assertEquals(false, f2());
+
+ function f3() {
+ var result;
+ for (var i = 0; i < 100000; i++) {
+ result = 'abc'.includes('b');
+ }
+ return result;
+ }
+ assertEquals(true, f3());
+
+ function f4() {
+ var result;
+ for (var i = 0; i < 100000; i++) {
+ result = 'abcbc'.includes('bc', 2);
+ }
+ return result;
+ }
+ assertEquals(true, f4());
+})();
+
+(function bailout() {
+ function f(str) {
+ return String.prototype.includes.call(str, 'a')
+ }
+ %PrepareFunctionForOptimization(f);
+ assertEquals(true, f('abc'));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(true, f({
+ toString: () => {
+ return 'abc'
+ }
+ }));
+ assertFalse(isOptimized(f));
+
+ function f2(str) {
+ return 'abc'.includes(str)
+ }
+ %PrepareFunctionForOptimization(f2);
+ assertEquals(true, f2('a'));
+ %OptimizeFunctionOnNextCall(f2);
+ assertEquals(true, f2({
+ toString: () => {
+ return 'a'
+ }
+ }));
+ assertFalse(isOptimized(f2));
+
+ function f3(index) {
+ return 'abc'.includes('a', index)
+ }
+ %PrepareFunctionForOptimization(f3);
+ assertEquals(true, f3(0));
+ %OptimizeFunctionOnNextCall(f3);
+ assertEquals(true, f3({
+ valueOf: () => {
+ return 0
+ }
+ }));
+ assertFalse(isOptimized(f3));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-1309769.js b/deps/v8/test/mjsunit/regress/regress-1309769.js
new file mode 100644
index 0000000000..22c770f5fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1309769.js
@@ -0,0 +1,15 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a, b, c) {
+ let x = BigInt.asUintN(0, a + b);
+ return BigInt.asUintN(64, x + c);
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(1n, foo(9n, 2n, 1n));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(1n, foo(9n, 2n, 1n));
diff --git a/deps/v8/test/mjsunit/regress/regress-1312022.js b/deps/v8/test/mjsunit/regress/regress-1312022.js
new file mode 100644
index 0000000000..1160359d80
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1312022.js
@@ -0,0 +1,42 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function __getProperties(obj) {
+ let properties = [];
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ properties.push(name);
+ }
+ return properties;
+}
+function* __getObjects(root = this, level = 0) {
+ if (level > 4) return;
+ let obj_names = __getProperties(root);
+ for (let obj_name of obj_names) {
+ let obj = root[obj_name];
+ yield* __getObjects(obj, level + 1);
+ }
+}
+function __getRandomObject() {
+ for (let obj of __getObjects()) {}
+}
+%PrepareFunctionForOptimization(__f_23);
+%OptimizeFunctionOnNextCall(__f_23);
+try {
+ __getRandomObject(), {};
+} catch (e) {}
+function __f_23(__v_93) {
+ var __v_95 = "x";
+ return __v_93[__v_95] + __v_94[__v_95];
+}
+%PrepareFunctionForOptimization(__f_23);
+try {
+ __f_23();
+} catch (e) {}
+try {
+ %OptimizeFunctionOnNextCall(__f_23);
+ __f_23();
+} catch (e) {}
+%DisableOptimizationFinalization();
diff --git a/deps/v8/test/mjsunit/regress/regress-1312310.js b/deps/v8/test/mjsunit/regress/regress-1312310.js
new file mode 100644
index 0000000000..d7e4f1ebe0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1312310.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --stress-wasm-code-gc --gc-interval=46 --cache=code --no-lazy
+
+// No contents - just the flag combination above triggered the MSAN failure.
diff --git a/deps/v8/test/mjsunit/regress/regress-1313419.js b/deps/v8/test/mjsunit/regress/regress-1313419.js
new file mode 100644
index 0000000000..881953d003
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1313419.js
@@ -0,0 +1,27 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --interrupt-budget=100 --function-context-specialization
+
+function __f_0() {
+ return function __f_1() {
+ __v_0.p = 42;
+ for (let __v_2 = 0; __v_2 < 100; __v_2++) {
+ try { this.p(); } catch (e) {}
+ }
+ this.p = __v_0;
+ };
+}
+var __v_0 = __f_0();
+var __v_1 = __f_0();
+__v_1.prototype = {
+ p() {
+ this.q = new __v_0();
+ for (let __v_3 = 0; __v_3 < 200; __v_3++);
+ }
+};
+__v_0.prototype = {
+ p() {}
+};
+new __v_1();
diff --git a/deps/v8/test/mjsunit/regress/regress-1313475.js b/deps/v8/test/mjsunit/regress/regress-1313475.js
new file mode 100644
index 0000000000..5ff8ea9602
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1313475.js
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --invoke-weak-callbacks
+
+// We spawn a new worker which creates a Realm, then terminate the main thread
+// which will also terminate the worker.
+new Worker(`Realm.create();`, {type: 'string'});
diff --git a/deps/v8/test/mjsunit/regress/regress-331444.js b/deps/v8/test/mjsunit/regress/regress-331444.js
deleted file mode 100644
index 84b2ea6c2d..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-331444.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-gc
-
-function boom() {
- var args = [];
- for (var i = 0; i < 110000; i++)
- args.push(i);
- return Array.apply(Array, args);
-}
-var array = boom();
-function fib(n) {
- var f0 = 0, f1 = 1;
- for (; n > 0; n = n - 1) {
- f0 + f1;
- f0 = array;
- }
-}
-fib(12);
diff --git a/deps/v8/test/mjsunit/regress/regress-454725.js b/deps/v8/test/mjsunit/regress/regress-454725.js
deleted file mode 100644
index a2469d11a0..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-454725.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --expose-gc
-
-var __v_9 = {};
-var depth = 15;
-var current = 0;
-
-function __f_15(__v_3) {
- if ((__v_3 % 50) != 0) {
- return __v_3;
- } else {
- return __v_9 + 0.5;
- }
-}
-function __f_13(a) {
- a[100000 - 2] = 1;
- for (var __v_3= 0; __v_3 < 70000; ++__v_3 ) {
- a[__v_3] = __f_15(__v_3);
- }
-}
-function __f_2(size) {
-
-}
-var tmp;
-function __f_18(allocator) {
- current++;
- if (current == depth) return;
- var __v_7 = new allocator(100000);
- __f_13(__v_7);
- var __v_4 = 6;
- for (var __v_3= 0; __v_3 < 70000; __v_3 += 501 ) {
- tmp += __v_3;
- }
- __f_18(Array);
- current--;
-}
-
-gc();
-__f_18(__f_2);
diff --git a/deps/v8/test/mjsunit/regress/regress-840106.js b/deps/v8/test/mjsunit/regress/regress-840106.js
deleted file mode 100644
index 568ab75479..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-840106.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-var buffer = new ArrayBuffer(1024 * 1024);
-buffer.constructor = {
- [Symbol.species]: new Proxy(function() {}, {
- get: _ => {
- %ArrayBufferDetach(buffer);
- }
- })
-};
-var array1 = new Uint8Array(buffer, 0, 1024);
-assertThrows(() => new Uint8Array(array1));
-assertThrows(() => new Int8Array(array1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1302527-no-lazy-feedback.js b/deps/v8/test/mjsunit/regress/regress-crbug-1302527-no-lazy-feedback.js
new file mode 100644
index 0000000000..b39c651821
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1302527-no-lazy-feedback.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+
+d8.file.execute('test/mjsunit/regress/regress-crbug-1302527.js');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1302527.js b/deps/v8/test/mjsunit/regress/regress-crbug-1302527.js
new file mode 100644
index 0000000000..dd0fa3193b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1302527.js
@@ -0,0 +1,612 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+{
+ class X {
+ static ['name'] = "name";
+ static ['length'] = 15;
+ }
+
+ assertEquals({
+ "value": "name",
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(X, "name"));
+
+ assertEquals({
+ "value": 15,
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(X, "length"));
+}
+
+{
+ class X {
+ ['field'] = Object.preventExtensions(this);
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot define property field, object is not extensible/);
+}
+
+{
+ class X {
+ [0] = Object.preventExtensions(this);
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot define property 0, object is not extensible/);
+}
+
+{
+ class X {
+ field = Object.defineProperty(
+ this,
+ "field2",
+ { writable: false, configurable: true, value: 1 }
+ );
+ ['field2'] = 2;
+ }
+
+ let x = new X();
+ assertEquals({
+ "value": 2,
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(x, "field2"));
+}
+
+{
+ class X {
+ field = Object.defineProperty(
+ this,
+ 0,
+ { writable: false, configurable: true, value: 1 }
+ );
+ [0] = 2;
+ }
+
+ let x = new X();
+ assertEquals({
+ "value": 2,
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(x, 0));
+}
+
+{
+ class X {
+ field = Object.defineProperty(
+ this,
+ "field2",
+ { writable: false, configurable: false, value: 1 }
+ );
+ ['field2'] = true;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: field2/);
+}
+
+{
+ class X {
+ field = Object.defineProperty(
+ this,
+ 0,
+ { writable: false, configurable: false, value: 1 }
+ );
+ [0] = true;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: 0/);
+}
+
+{
+ class X {
+ field = Object.defineProperty(
+ this,
+ "field2",
+ { writable: true, configurable: false, value: 1 }
+ );
+ ['field2'] = true;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: field2/);
+}
+
+{
+ class X {
+ field = Object.defineProperty(
+ this,
+ 0,
+ { writable: true, configurable: false, value: 1 }
+ );
+ [0] = true;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: 0/);
+}
+
+{
+ let setterCalled = false;
+ class X {
+ field = Object.defineProperty(
+ this,
+ "field2",
+ {
+ configurable: true,
+ set(val) {
+ setterCalled = true;
+ }
+ }
+ );
+ ['field2'] = 2;
+ }
+
+ let x = new X();
+ assertFalse(setterCalled);
+ assertEquals({
+ "value": 2,
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(x, 'field2'));
+}
+
+{
+ let setterCalled = false;
+ class X {
+ field = Object.defineProperty(
+ this,
+ 0,
+ {
+ configurable: true,
+ set(val) {
+ setterCalled = true;
+ }
+ }
+ );
+ [0] = 2;
+ }
+
+ let x = new X();
+ assertFalse(setterCalled);
+ assertEquals({
+ "value": 2,
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(x, 0));
+}
+
+{
+ let setterCalled = false;
+ class X {
+ field = Object.defineProperty(
+ this,
+ "field2",
+ {
+ configurable: false,
+ set(val) {
+ setterCalled = true;
+ }
+ }
+ );
+ ['field2'] = 2;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: field2/);
+}
+
+{
+ let setterCalled = false;
+ class X {
+ field = Object.defineProperty(
+ this,
+ 0,
+ {
+ configurable: false,
+ set(val) {
+ setterCalled = true;
+ }
+ }
+ );
+ [0] = 2;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: 0/);
+}
+
+{
+ class Base {
+ constructor(arg) {
+ return arg;
+ }
+ }
+
+ class ClassWithNormalField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ "normalField",
+ { writable: true, configurable: true, value: "initial" }
+ );
+ return 1;
+ })();
+ ['normalField'] = "written";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassWithNormalIndexField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ 0,
+ { writable: true, configurable: true, value: "initial" }
+ );
+ return 1;
+ })();
+ [0] = "written";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ let setterCalled = false;
+ class ClassWithSetterField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ "setterField",
+ { configurable: true, set(val) { setterCalled = true; } }
+ );
+ return 1;
+ })();
+ ['setterField'] = "written";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassWithSetterIndexField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ 0,
+ { configurable: true, set(val) { setterCalled = true; } }
+ );
+ return 1;
+ })();
+ [0] = "written";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassWithReadOnlyField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ "readOnlyField",
+ { writable: false, configurable: true, value: "initial" }
+ );
+ return 1;
+ })();
+ ['readOnlyField'] = "written";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassWithReadOnlyIndexField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ 0,
+ { writable: false, configurable: true, value: "initial" }
+ );
+ return 1;
+ })();
+ [0] = "written";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassWithNonConfigurableField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ "nonConfigurableField",
+ { writable: false, configurable: false, value: "initial" }
+ );
+ return 1;
+ })();
+ ['nonConfigurableField'] = "configured";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassWithNonConfigurableIndexField extends Base {
+ field = (() => {
+ Object.defineProperty(
+ this,
+ 0,
+ { writable: false, configurable: false, value: "initial" }
+ );
+ return 1;
+ })();
+ [0] = "configured";
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassNonExtensible extends Base {
+ ['field'] = (() => {
+ Object.preventExtensions(this);
+ return 'defined';
+ })();
+ ['nonExtensible'] = 4;
+ constructor(arg) {
+ super(arg);
+ }
+ }
+ class ClassNonExtensibleWithIndexField extends Base {
+ [0] = (() => {
+ Object.preventExtensions(this);
+ return 'defined';
+ })();
+ ['nonExtensible'] = 4;
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ class ClassNonExtensibleWithPrivateField extends Base {
+ #privateField = (() => {
+ Object.preventExtensions(this);
+ return "defined";
+ })();
+ // In case the object has a null prototype, we'll use a static
+ // method to access the field.
+ static getPrivateField(obj) { return obj.#privateField; }
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ // Test dictionary objects.
+ function testObject(getObject) {
+ let obj = getObject();
+ new ClassWithNormalField(obj);
+ assertEquals(1, obj.field);
+ assertEquals("written", obj.normalField);
+
+ obj = getObject();
+ new ClassWithNormalIndexField(obj);
+ assertEquals(1, obj.field);
+ assertEquals("written", obj[0]);
+
+ obj = getObject();
+ new ClassWithSetterField(obj);
+ assertFalse(setterCalled);
+
+ obj = getObject();
+ new ClassWithSetterIndexField(obj);
+ assertFalse(setterCalled);
+
+ obj = getObject();
+ new ClassWithReadOnlyField(obj);
+ assertEquals("written", obj.readOnlyField);
+
+ obj = getObject();
+ new ClassWithReadOnlyIndexField(obj);
+ assertEquals("written", obj[0]);
+
+ obj = getObject();
+ assertThrows(() => {
+ new ClassWithNonConfigurableField(obj);
+ }, TypeError, /Cannot redefine property: nonConfigurableField/);
+ assertEquals("initial", obj.nonConfigurableField);
+
+ obj = getObject();
+ assertThrows(() => {
+ new ClassWithNonConfigurableIndexField(obj);
+ }, TypeError, /Cannot redefine property: 0/);
+ assertEquals("initial", obj[0]);
+
+ obj = getObject();
+ if (Object.hasOwn(obj, 'field')) {
+ assertThrows(() => {
+ new ClassNonExtensible(obj);
+ }, TypeError, /Cannot define property nonExtensible, object is not extensible/);
+ assertEquals({
+ "value": 'defined',
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(obj, 'field'));
+ } else {
+ assertThrows(() => {
+ new ClassNonExtensible(obj);
+ }, TypeError, /Cannot define property field, object is not extensible/);
+ assertFalse(Object.hasOwn(obj, 'field'));
+ }
+ assertFalse(Object.hasOwn(obj, 'nonExtensible'));
+
+ obj = getObject();
+ if (Object.hasOwn(obj, 0)) {
+ assertThrows(() => {
+ new ClassNonExtensibleWithIndexField(obj);
+ }, TypeError, /Cannot define property nonExtensible, object is not extensible/);
+ assertEquals({
+ "value": 'defined',
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(obj, 0));
+ } else {
+ assertThrows(() => {
+ new ClassNonExtensibleWithIndexField(obj);
+ }, TypeError, /Cannot define property 0, object is not extensible/);
+ assertFalse(Object.hasOwn(obj, 0));
+ }
+ assertFalse(Object.hasOwn(obj, 'nonExtensible'));
+
+ obj = getObject();
+ new ClassNonExtensibleWithPrivateField(obj);
+ assertEquals("defined", ClassNonExtensibleWithPrivateField.getPrivateField(obj));
+
+ return obj;
+ }
+
+ let obj = testObject(() => Object.create(null));
+ assertEquals(undefined, obj.field);
+
+ let fieldValue = 100;
+ let indexValue = 100;
+ obj = testObject(() => { return { field: fieldValue }; });
+ obj = testObject(() => { return { field: fieldValue, [0]: indexValue }; });
+
+ // Test proxies.
+ {
+ let trapCalls = [];
+ function getProxy() {
+ trapCalls = [];
+ let target = {};
+ let proxy = new Proxy(target, {
+ get(oTarget, sKey) {
+ return oTarget[sKey];
+ },
+ defineProperty(oTarget, sKey, oDesc) {
+ trapCalls.push(sKey);
+ Object.defineProperty(oTarget, sKey, oDesc);
+ return oTarget;
+ }
+ });
+ return proxy;
+ }
+
+ let proxy = getProxy();
+ new ClassWithNormalField(proxy);
+ assertEquals(1, proxy.field);
+ assertEquals("written", proxy.normalField);
+ assertEquals(["normalField", "field", "normalField"], trapCalls);
+
+ proxy = getProxy();
+ new ClassWithNormalIndexField(proxy);
+ assertEquals(1, proxy.field);
+ assertEquals("written", proxy[0]);
+ assertEquals(["0", "field", "0"], trapCalls);
+
+ proxy = getProxy();
+ new ClassWithSetterField(proxy);
+ assertFalse(setterCalled);
+ assertEquals("written", proxy.setterField);
+ assertEquals(["setterField", "field", "setterField"], trapCalls);
+
+ proxy = getProxy();
+ new ClassWithSetterIndexField(proxy);
+ assertFalse(setterCalled);
+ assertEquals("written", proxy[0]);
+ assertEquals(["0", "field", "0"], trapCalls);
+
+ proxy = getProxy();
+ new ClassWithReadOnlyField(proxy);
+ assertEquals("written", proxy.readOnlyField);
+ assertEquals(["readOnlyField", "field", "readOnlyField"], trapCalls);
+
+ proxy = getProxy();
+ new ClassWithReadOnlyIndexField(proxy);
+ assertEquals("written", proxy[0]);
+ assertEquals(["0", "field", "0"], trapCalls);
+
+ proxy = getProxy();
+ assertThrows(() => {
+ new ClassWithNonConfigurableField(proxy);
+ }, TypeError, /Cannot redefine property: nonConfigurableField/);
+ assertEquals("initial", proxy.nonConfigurableField);
+ assertEquals(["nonConfigurableField", "field", "nonConfigurableField"], trapCalls);
+
+ proxy = getProxy();
+ assertThrows(() => {
+ new ClassWithNonConfigurableIndexField(proxy);
+ }, TypeError, /Cannot redefine property: 0/);
+ assertEquals("initial", proxy[0]);
+ assertEquals(["0", "field", "0"], trapCalls);
+
+ proxy = getProxy();
+ assertThrows(() => {
+ new ClassNonExtensible(proxy);
+ }, TypeError, /Cannot define property field, object is not extensible/);
+ assertFalse(Object.hasOwn(proxy, 'field'));
+ assertFalse(Object.hasOwn(proxy, 'nonExtensible'));
+ assertEquals(["field"], trapCalls);
+
+ proxy = getProxy();
+ assertThrows(() => {
+ new ClassNonExtensibleWithIndexField(proxy);
+ }, TypeError, /Cannot define property 0, object is not extensible/);
+ assertFalse(Object.hasOwn(proxy, 0));
+ assertFalse(Object.hasOwn(proxy, 'nonExtensible'));
+ assertEquals(["0"], trapCalls);
+
+ proxy = getProxy();
+ new ClassNonExtensibleWithPrivateField(proxy);
+ assertEquals("defined", ClassNonExtensibleWithPrivateField.getPrivateField(proxy));
+ }
+
+ // Test globalThis.
+ {
+ new ClassWithNormalField(globalThis);
+ assertEquals(1, field);
+ assertEquals("written", normalField);
+
+ new ClassWithNormalIndexField(globalThis);
+ assertEquals(1, field);
+ assertEquals("written", globalThis[0]);
+
+ new ClassWithSetterField(globalThis);
+ assertFalse(setterCalled);
+ assertEquals("written", setterField);
+
+ new ClassWithSetterField(globalThis);
+ assertFalse(setterCalled);
+ assertEquals("written", setterField);
+
+ new ClassWithReadOnlyField(globalThis);
+ assertEquals("written", readOnlyField);
+
+ assertThrows(() => {
+ new ClassWithNonConfigurableField(globalThis);
+ }, TypeError, /Cannot redefine property: nonConfigurableField/);
+ assertEquals("initial", nonConfigurableField);
+
+ assertThrows(() => {
+ new ClassNonExtensible(globalThis);
+ }, TypeError, /Cannot define property nonExtensible, object is not extensible/);
+ assertEquals("undefined", typeof nonExtensible);
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1306929.js b/deps/v8/test/mjsunit/regress/regress-crbug-1306929.js
new file mode 100644
index 0000000000..35a1be73ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1306929.js
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+const gsab = new SharedArrayBuffer(1024, {maxByteLength: 11337});
+const ta = new Float64Array(gsab);
+Object.defineProperty(ta, 0, {});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1307310.js b/deps/v8/test/mjsunit/regress/regress-crbug-1307310.js
new file mode 100644
index 0000000000..7a6325d1f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1307310.js
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-rab-gsab
+
+const gsab = new SharedArrayBuffer(4, {
+ maxByteLength: 8
+});
+const ta = new Int8Array(gsab);
+
+function defineUndefined(ta) {
+ Object.defineProperty(ta, undefined, {
+ get: function () {}
+ });
+}
+
+defineUndefined(ta);
+assertThrows(() => { defineUndefined(ta); });
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-10817.js b/deps/v8/test/mjsunit/regress/regress-v8-10817.js
new file mode 100644
index 0000000000..6740bac9ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-10817.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => {
+ Promise()
+}, TypeError, "Promise constructor cannot be invoked without 'new'");
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-11614.js b/deps/v8/test/mjsunit/regress/regress-v8-11614.js
new file mode 100644
index 0000000000..f9fe41eec3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-11614.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Deconstructing will goto fast path.
+const fastProps = { key: "abc", ref: 1234, a: 10, b: 20, c: 30, d: 40, e: 50 };
+
+const { key, ref, ...normalizedFastProps } = fastProps;
+
+// Deconstructing will goto runtime, call
+// Runtime:: kCopyDataPropertiesWithExcludedPropertiesOnStack.
+// Add a large index to force dictionary elements.
+const slowProps = { [2**30] : 10};
+assertTrue(%HasDictionaryElements(slowProps));
+const { ...normalizedSlowProps } = slowProps;
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12219.js b/deps/v8/test/mjsunit/regress/regress-v8-12219.js
new file mode 100644
index 0000000000..4929981bd5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12219.js
@@ -0,0 +1,11 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function quitInWorker() {
+ quit();
+};
+
+for(let i = 0; i < 10; i++){
+ new Worker(quitInWorker, ({type : 'function', arguments : []}));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12421.js b/deps/v8/test/mjsunit/regress/regress-v8-12421.js
index ba0dba2542..2d6241d8dd 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-12421.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12421.js
@@ -63,6 +63,21 @@
}
{
+ class X {
+ field = Object.defineProperty(
+ this,
+ "field2",
+ { writable: true, configurable: false, value: 1}
+ );
+ field2 = true;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: field2/);
+}
+
+{
let setterCalled = false;
class X {
field = Object.defineProperty(
@@ -80,6 +95,34 @@
let x = new X();
assertFalse(setterCalled);
+ assertEquals({
+ "value": 2,
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(x, 'field2'));
+}
+
+{
+ let setterCalled = false;
+ class X {
+ field = Object.defineProperty(
+ this,
+ "field2",
+ {
+ configurable: false,
+ set(val) {
+ setterCalled = true;
+ }
+ }
+ );
+ field2 = 2;
+ }
+
+ assertThrows(() => {
+ new X();
+ }, TypeError, /Cannot redefine property: field2/);
+ assertFalse(setterCalled);
}
{
@@ -153,7 +196,7 @@
class ClassNonExtensible extends Base {
field = (() => {
Object.preventExtensions(this);
- return 1;
+ return 'defined';
})();
nonExtensible = 4;
constructor(arg) {
@@ -162,74 +205,103 @@
}
// Test dictionary objects.
- {
- let dict = Object.create(null);
+ function testObject(getObject) {
+ let obj = getObject();
- new ClassWithNormalField(dict);
- assertEquals(1, dict.field);
- assertEquals("written", dict.normalField);
+ new ClassWithNormalField(obj);
+ assertEquals(1, obj.field);
+ assertEquals("written", obj.normalField);
- new ClassWithSetterField(dict);
+ obj = getObject();
+ new ClassWithSetterField(obj);
+ assertEquals(1, obj.field);
assertFalse(setterCalled);
- new ClassWithReadOnlyField(dict);
- assertEquals("written", dict.readOnlyField);
+ obj = getObject();
+ new ClassWithReadOnlyField(obj);
+ assertEquals(1, obj.field);
+ assertEquals("written", obj.readOnlyField);
+ obj = getObject();
assertThrows(() => {
- new ClassWithNonConfigurableField(dict);
+ new ClassWithNonConfigurableField(obj);
}, TypeError, /Cannot redefine property: nonConfigurableField/);
- assertEquals("initial", dict.nonConfigurableField);
+ assertEquals("initial", obj.nonConfigurableField);
+ assertEquals(1, obj.field);
+
+ obj = getObject();
+ if (Object.hasOwn(obj, 'field')) {
+ assertThrows(() => {
+ new ClassNonExtensible(obj);
+ }, TypeError, /Cannot define property nonExtensible, object is not extensible/);
+ assertEquals({
+ "value": 'defined',
+ "writable": true,
+ "enumerable": true,
+ "configurable": true
+ }, Object.getOwnPropertyDescriptor(obj, 'field'));
+ } else {
+ assertThrows(() => {
+ new ClassNonExtensible(obj);
+ }, TypeError, /Cannot define property field, object is not extensible/);
+ assertFalse(Object.hasOwn(obj, 'field'));
+ }
+ assertFalse(Object.hasOwn(obj, 'nonExtensible'));
- assertThrows(() => {
- new ClassNonExtensible(dict);
- }, TypeError, /Cannot define property nonExtensible, object is not extensible/);
- assertEquals(undefined, dict.nonExtensible);
+ return obj;
}
+ testObject(() => Object.create(null));
+ testObject( () => { return {field: 1000 } });
// Test proxies.
{
let trapCalls = [];
- let target = {};
- let proxy = new Proxy(target, {
- get(oTarget, sKey) {
- return oTarget[sKey];
- },
- defineProperty(oTarget, sKey, oDesc) {
- trapCalls.push(sKey);
- Object.defineProperty(oTarget, sKey, oDesc);
- return oTarget;
- }
- });
+ function getProxy() {
+ trapCalls = [];
+ let target = {};
+ return new Proxy(target, {
+ get(oTarget, sKey) {
+ return oTarget[sKey];
+ },
+ defineProperty(oTarget, sKey, oDesc) {
+ trapCalls.push(sKey);
+ Object.defineProperty(oTarget, sKey, oDesc);
+ return oTarget;
+ }
+ });
+ }
+ let proxy = getProxy();
new ClassWithNormalField(proxy);
assertEquals(1, proxy.field);
assertEquals("written", proxy.normalField);
assertEquals(["normalField", "field", "normalField"], trapCalls);
- trapCalls = [];
+ proxy = getProxy();
new ClassWithSetterField(proxy);
assertFalse(setterCalled);
assertEquals("written", proxy.setterField);
assertEquals(["setterField", "field", "setterField"], trapCalls);
- trapCalls = [];
+ proxy = getProxy();
new ClassWithReadOnlyField(proxy);
assertEquals("written", proxy.readOnlyField);
assertEquals(["readOnlyField", "field", "readOnlyField"], trapCalls);
- trapCalls = [];
+ proxy = getProxy();
assertThrows(() => {
new ClassWithNonConfigurableField(proxy);
}, TypeError, /Cannot redefine property: nonConfigurableField/);
assertEquals("initial", proxy.nonConfigurableField);
assertEquals(["nonConfigurableField", "field", "nonConfigurableField"], trapCalls);
- trapCalls = [];
+ proxy = getProxy();
assertThrows(() => {
new ClassNonExtensible(proxy);
- }, TypeError, /Cannot define property nonExtensible, object is not extensible/);
- assertEquals(undefined, proxy.nonExtensible);
- assertEquals(["field", "nonExtensible"], trapCalls);
+ }, TypeError, /Cannot define property field, object is not extensible/);
+ assertFalse(Object.hasOwn(proxy, 'field'));
+ assertFalse(Object.hasOwn(proxy, 'nonExtensible'));
+ assertEquals(["field"], trapCalls);
}
// Test globalThis.
@@ -252,7 +324,7 @@
assertThrows(() => {
new ClassNonExtensible(globalThis);
- }, TypeError, /Cannot add property nonExtensible, object is not extensible/);
+ }, TypeError, /Cannot define property nonExtensible, object is not extensible/);
assertEquals("undefined", typeof nonExtensible);
}
}
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12632.js b/deps/v8/test/mjsunit/regress/regress-v8-12632.js
new file mode 100644
index 0000000000..3d2bcc4834
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12632.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --interrupt-budget=1024 --noturbo-loop-variable
+
+function opt() {
+ const array = [-1, 1];
+ array.shift();
+}
+
+%PrepareFunctionForOptimization(opt);
+opt();
+opt();
+%OptimizeFunctionOnNextCall(opt);
+opt();
+opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12705.js b/deps/v8/test/mjsunit/regress/regress-v8-12705.js
new file mode 100644
index 0000000000..5e96faba59
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12705.js
@@ -0,0 +1,11 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const r = Realm.create();
+const otherPromise = Realm.eval(r, 'Promise.resolve()');
+
+assertThrows(
+ () => {
+ Promise.prototype.then.call(otherPromise, () => { });
+ }, TypeError, 'no access');
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12729-1.mjs b/deps/v8/test/mjsunit/regress/regress-v8-12729-1.mjs
new file mode 100644
index 0000000000..775419cfe5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12729-1.mjs
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {getNS} from 'regress-v8-12729.mjs';
+
+assertThrows(
+ getNS, ReferenceError, 'Cannot access \'default\' before initialization');
+export default 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12729.mjs b/deps/v8/test/mjsunit/regress/regress-v8-12729.mjs
new file mode 100644
index 0000000000..220c1a4a0c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12729.mjs
@@ -0,0 +1,8 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as ns from 'regress-v8-12729-1.mjs';
+export function getNS() {
+ return Object.keys(ns);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12762.js b/deps/v8/test/mjsunit/regress/regress-v8-12762.js
new file mode 100644
index 0000000000..fc36d8a2aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12762.js
@@ -0,0 +1,23 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function func() {
+ function foo() {}
+ return foo;
+}
+
+function bar(foo) {
+ %DisassembleFunction(foo);
+ foo();
+ %PrepareFunctionForOptimization(foo);
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+}
+
+bar(func());
+bar(func());
+bar(func());
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5697.js b/deps/v8/test/mjsunit/regress/regress-v8-5697.js
index 9dec917f70..571850dba5 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5697.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5697.js
@@ -1,8 +1,12 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --opt
+//
+// Flags: --allow-natives-syntax --opt --no-use-osr
+//
+// Why not OSR? Because it may inline the `store` function into OSR'd code
+// below before it has a chance to be optimized, making
+// `assertOptimized(store)` fail.
function load(o) {
return o.x;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1299183.js b/deps/v8/test/mjsunit/regress/wasm/regress-1299183.js
new file mode 100644
index 0000000000..a64db462ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1299183.js
@@ -0,0 +1,215 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addStruct([]);
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addType(makeSig([kWasmAnyRef, kWasmFuncRef, kWasmExternRef], [wasmRefType(0)]));
+builder.addType(makeSig([kWasmI64, kWasmF32, kWasmS128, kWasmI32], [wasmRefType(1), wasmOptRefType(2), kWasmI64, wasmOptRefType(2), kWasmI64]));
+builder.addType(makeSig([], [wasmOptRefType(2), wasmOptRefType(2), kWasmF64, wasmOptRefType(2), kWasmI32, wasmOptRefType(2), kWasmI32, kWasmI32, wasmOptRefType(2), kWasmI32, kWasmI32, kWasmI64, kWasmI32, kWasmS128, wasmOptRefType(2)]));
+builder.addType(makeSig([], []));
+builder.addType(makeSig([wasmRefType(kWasmAnyRef)], [kWasmI32, kWasmI32, wasmRefType(1), wasmRefType(kWasmAnyRef), kWasmI32, wasmRefType(1), kWasmI64, wasmOptRefType(4), kWasmI32, wasmRefType(kWasmAnyRef), wasmOptRefType(4), kWasmI64, kWasmI64, wasmRefType(kWasmEqRef), kWasmI32]));
+builder.addType(makeSig([wasmRefType(kWasmEqRef), kWasmAnyRef, kWasmI32, kWasmI32], [wasmRefType(1), kWasmI64, wasmOptRefType(4), kWasmI32, wasmRefType(kWasmAnyRef), wasmOptRefType(4), kWasmI64, kWasmI64, wasmRefType(kWasmEqRef), kWasmI32]));
+builder.addType(makeSig([kWasmI32, kWasmI32, wasmRefType(1), wasmRefType(kWasmAnyRef), kWasmI32, wasmRefType(1), kWasmI64, wasmOptRefType(4), kWasmI32, wasmRefType(kWasmAnyRef), wasmOptRefType(4), kWasmI64, kWasmI64, wasmRefType(kWasmEqRef), kWasmI32], [kWasmI32]));
+builder.addMemory(16, 32, false);
+builder.addTable(kWasmFuncRef, 4, 5, undefined)
+builder.addTable(kWasmFuncRef, 15, 25, undefined)
+builder.addTable(kWasmFuncRef, 1, 1, undefined)
+builder.addTable(kWasmFuncRef, 16, 17, undefined)
+builder.addActiveElementSegment(0, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3)], kWasmFuncRef);
+builder.addActiveElementSegment(1, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2)], kWasmFuncRef);
+builder.addActiveElementSegment(2, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0)], kWasmFuncRef);
+builder.addActiveElementSegment(3, WasmInitExpr.I32Const(0), [WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3), WasmInitExpr.RefFunc(0), WasmInitExpr.RefFunc(1), WasmInitExpr.RefFunc(2), WasmInitExpr.RefFunc(3)], kWasmFuncRef);
+builder.addTag(makeSig([], []));
+// Generate function 1 (out of 4).
+builder.addFunction(undefined, 1 /* sig */)
+ .addLocals(kWasmI64, 1).addLocals(wasmOptRefType(4), 1).addLocals(kWasmI32, 2).addLocals(kWasmI64, 1).addLocals(wasmOptRefType(4), 1).addLocals(kWasmI32, 1).addLocals(kWasmI64, 3).addLocals(kWasmI32, 1).addLocals(kWasmI64, 1).addLocals(kWasmI32, 1).addLocals(kWasmI64, 1).addLocals(wasmOptRefType(4), 1).addLocals(kWasmI64, 1)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprRefFunc, 0x01, // ref.func
+kExprBlock, 0x06, // block @32 i32 i32 (ref 1) (ref any) i32 (ref 1) i64 (ref null 4) i32 (ref any) (ref null 4) i64 i64 (ref eq) i32
+ kExprDrop, // drop
+ kExprI32Const, 0xf1, 0x00, // i32.const
+ kExprI64Const, 0x00, // i64.const
+ kExprI64Const, 0xe1, 0x00, // i64.const
+ kExprI64Const, 0x00, // i64.const
+ kExprI64Const, 0xef, 0x00, // i64.const
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0xf0, 0x02, // i32.const
+ kSimdPrefix, kExprI64x2ShrU, 0x01, // i64x2.shr_u
+ kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
+ kExprI32Const, 0x00, // i32.const
+ kExprRefFunc, 0x00, // ref.func
+ kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+ kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+ kExprI32Const, 0x00, // i32.const
+ kExprRefFunc, 0x00, // ref.func
+ kExprI64Const, 0x00, // i64.const
+ kExprRefNull, 0x04, // ref.null
+ kExprI32Const, 0x00, // i32.const
+ kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+ kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+ kExprRefNull, 0x04, // ref.null
+ kExprI64Const, 0x00, // i64.const
+ kExprI64Const, 0x00, // i64.const
+ kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+ kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+ kExprI32Const, 0x00, // i32.const
+ kExprRefNull, 0x6e, // ref.null
+ kExprBrOnNull, 0x00, // br_on_null
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprI64ShrU, // i64.shr_u
+ kExprI64Ror, // i64.ror
+ kExprI64ShrS, // i64.shr_s
+ kExprI64Const, 0x01, // i64.const
+ kSimdPrefix, kExprS128Const, 0xff, 0x01, 0x0d, 0x00, 0x70, 0x70, 0x71, 0x3a, 0x00, 0x00, 0x00, 0x73, 0x01, 0x6f, 0x70, 0x71, // s128.const
+ kSimdPrefix, kExprI64x2ExtractLane, 0x01, // i64x2.extract_lane
+ kExprI64ShrS, // i64.shr_s
+ kExprI64Ror, // i64.ror
+ kAtomicPrefix, kExprI64AtomicStore16U, 0x01, 0xef, 0xc2, 0xbd, 0x8b, 0x06, // i64.atomic.store16_u
+ kSimdPrefix, kExprS128Const, 0x71, 0x6f, 0x61, 0x61, 0x6f, 0x70, 0x00, 0x01, 0x70, 0x00, 0x71, 0x70, 0x3a, 0x70, 0x00, 0x00, // s128.const
+ kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
+ kExprRefNull, 0x03, // ref.null
+ kExprRefNull, 0x70, // ref.null
+ kExprRefNull, 0x6f, // ref.null
+ kExprI32Const, 0x01, // i32.const
+ kExprCallIndirect, 0x02, 0x00, // call_indirect sig #2: r_nnn
+ kExprDrop, // drop
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kExprCallIndirect, 0x01, 0x00, // call_indirect sig #1: i_iii
+ kExprNop, // nop
+ kExprI64Const, 0xe1, 0x00, // i64.const
+ kExprI32Const, 0x00, // i32.const
+ kAtomicPrefix, kExprI64AtomicLoad, 0x02, 0xe0, 0x8c, 0xbc, 0x03, // i64.atomic.load64
+ kExprI64ShrU, // i64.shr_u
+ kAtomicPrefix, kExprI64AtomicStore8U, 0x00, 0x80, 0x82, 0x7c, // i64.atomic.store8_u
+ kExprBlock, 0x40, // block @219
+ kExprEnd, // end @221
+ kExprBlock, 0x7f, // block @222 i32
+ kExprI32Const, 0x00, // i32.const
+ kExprEnd, // end @226
+ kExprI32Const, 0xe3, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0xe3, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
+ kSimdPrefix, kExprI64x2ShrS, 0x01, // i64x2.shr_s
+ kSimdPrefix, kExprI32x4BitMask, 0x01, // i32x4.bitmask
+ kExprRefFunc, 0x00, // ref.func
+ kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+ kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+ kExprI32Const, 0x00, // i32.const
+ kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+ kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+ kExprRefNull, 0x6e, // ref.null
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kExprBlock, 0x07, // block @268 (ref 1) i64 (ref null 4) i32 (ref any) (ref null 4) i64 i64 (ref eq) i32
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprRefFunc, 0x00, // ref.func
+ kExprI64Const, 0x00, // i64.const
+ kExprRefNull, 0x04, // ref.null
+ kExprI32Const, 0x00, // i32.const
+ kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+ kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+ kExprRefNull, 0x04, // ref.null
+ kExprI64Const, 0x00, // i64.const
+ kExprI64Const, 0x00, // i64.const
+ kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+ kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+ kExprI32Const, 0x00, // i32.const
+ kExprEnd, // end @302
+ kExprEnd, // end @303
+kExprBlock, 0x08, // block @304 i32
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprNop, // nop
+ kExprEnd, // end @321
+kExprEnd, // end @322
+]);
+// Generate function 2 (out of 4).
+builder.addFunction(undefined, 2 /* sig */)
+ .addBodyWithEnd([
+// signature: r_nnn
+// body:
+kGCPrefix, kExprRttCanon, 0x00, // rtt.canon
+kGCPrefix, kExprStructNewWithRtt, 0x00, // struct.new_with_rtt
+kExprEnd, // end @7
+]);
+// Generate function 3 (out of 4).
+builder.addFunction(undefined, 3 /* sig */)
+ .addBodyWithEnd([
+// signature: rnlnl_lfsi
+// body:
+kExprRefFunc, 0x00, // ref.func
+kExprRefNull, 0x02, // ref.null
+kExprI64Const, 0x00, // i64.const
+kExprRefNull, 0x02, // ref.null
+kExprI64Const, 0x00, // i64.const
+kExprEnd, // end @11
+]);
+// Generate function 4 (out of 4).
+builder.addFunction(undefined, 4 /* sig */)
+ .addBodyWithEnd([
+// signature: nndniniiniilisn_v
+// body:
+kExprRefNull, 0x02, // ref.null
+kExprRefNull, 0x02, // ref.null
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // f64.const
+kExprRefNull, 0x02, // ref.null
+kExprI32Const, 0x00, // i32.const
+kExprRefNull, 0x02, // ref.null
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprRefNull, 0x02, // ref.null
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI64Const, 0x00, // i64.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x00, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kExprRefNull, 0x02, // ref.null
+kExprEnd, // end @40
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertEquals(0, instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1308333.js b/deps/v8/test/mjsunit/regress/wasm/regress-1308333.js
new file mode 100644
index 0000000000..d1b530be99
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1308333.js
@@ -0,0 +1,260 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addMemory(16, 32, false);
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0xe2, 0x80, 0xae, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Xor, // i32.xor
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x78, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x10, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32And, // i32.and
+kExprCallFunction, 0x00, // call function #0: i_iii
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x7c, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprI32Const, 0x73, // i32.const
+kExprUnreachable, // unreachable
+kExprEnd // end @353
+]).exportAs("main");
+let instance = builder.instantiate();
+assertThrows(() => instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1314363.js b/deps/v8/test/mjsunit/regress/wasm/regress-1314363.js
new file mode 100644
index 0000000000..c3bd238aae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1314363.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-enable-sse4-1
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, makeSig([], [kWasmI64]))
+ .addBody([
+ ...wasmF32Const(11.3), // f32.const
+ kExprI64SConvertF32, // i64.trunc_f32_s
+ ])
+ .exportAs('main');
+let instance = builder.instantiate();
+assertEquals(11n, instance.exports.main());
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js
index 12b7d57f8b..fb5fbad616 100644
--- a/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-atomics.js
@@ -11,10 +11,20 @@ let S = new SharedStructType(['field']);
(function TestPrimitivesUsingAtomics() {
// All primitives can be stored in fields.
let s = new S();
- for (let prim of [42, -0, undefined, null, true, false, "foo"]) {
+ const prims = [42, -0, undefined, null, true, false, "foo"];
+
+ for (let prim of prims) {
Atomics.store(s, 'field', prim);
assertEquals(Atomics.load(s, 'field'), prim);
}
+
+ for (let prim1 of prims) {
+ for (let prim2 of prims) {
+ s.field = prim1;
+ assertEquals(Atomics.exchange(s, 'field', prim2), prim1);
+ assertEquals(s.field, prim2);
+ }
+ }
})();
(function TestObjectsUsingAtomics() {
@@ -26,6 +36,10 @@ let S = new SharedStructType(['field']);
let shared_rhs = new S();
Atomics.store(s, 'field', shared_rhs);
assertEquals(Atomics.load(s, 'field'), shared_rhs);
+
+ let shared_rhs2 = new S();
+ assertEquals(Atomics.exchange(s, 'field', shared_rhs2), shared_rhs);
+ assertEquals(s.field, shared_rhs2);
})();
(function TestNotExtensibleUsingAtomics() {
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-without-map-space.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-without-map-space.js
new file mode 100644
index 0000000000..4706629b97
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-without-map-space.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --no-use-map-space --harmony-struct
+
+"use strict";
+
+// Test ensures that deserialization works without map space and
+// that we can allocate maps in the shared heap.
+
+let SomeStruct = new SharedStructType(['field1', 'field2']);
diff --git a/deps/v8/test/mjsunit/smi-ops-inlined.js b/deps/v8/test/mjsunit/smi-ops-inlined.js
index dd753d17b0..9371ccd30f 100644
--- a/deps/v8/test/mjsunit/smi-ops-inlined.js
+++ b/deps/v8/test/mjsunit/smi-ops-inlined.js
@@ -434,7 +434,7 @@ function testShiftNonSmis() {
assertEquals(0x46536000, (neg_32 + neg_smi - 0.5) << 4);
assertEquals(-0x73594000, (neg_32 + neg_smi - 0.5) << 5);
// End block A repeat 1
- // Repeat block A with shift amounts in variables intialized with
+ // Repeat block A with shift amounts in variables initialized with
// a constant.
var zero = 0;
var one = 1;
diff --git a/deps/v8/test/mjsunit/smi-ops.js b/deps/v8/test/mjsunit/smi-ops.js
index 7945855f39..359fa721b2 100644
--- a/deps/v8/test/mjsunit/smi-ops.js
+++ b/deps/v8/test/mjsunit/smi-ops.js
@@ -443,7 +443,7 @@ function testShiftNonSmis() {
assertEquals(0x46536000, (neg_32 + neg_smi - 0.5) << 4);
assertEquals(-0x73594000, (neg_32 + neg_smi - 0.5) << 5);
// End block A repeat 1
- // Repeat block A with shift amounts in variables intialized with
+ // Repeat block A with shift amounts in variables initialized with
// a constant.
var zero = 0;
var one = 1;
diff --git a/deps/v8/test/mjsunit/stack-traces-class-fields.js b/deps/v8/test/mjsunit/stack-traces-class-fields.js
index 1c2a954fdf..aa7edf9fb1 100644
--- a/deps/v8/test/mjsunit/stack-traces-class-fields.js
+++ b/deps/v8/test/mjsunit/stack-traces-class-fields.js
@@ -183,13 +183,13 @@ function testStaticClassFieldCall() {
}
// ReferenceError: FAIL is not defined
-// at Function.thrower [as x]
+// at X.thrower [as x]
// at testStaticClassFieldCall
// at testTrace
testTrace(
"during static class field call",
testStaticClassFieldCall,
- ["Function.thrower"],
+ ["X.thrower"],
["anonymous"]
);
@@ -226,12 +226,12 @@ function testStaticClassFieldCallWithFNI() {
}
// ReferenceError: FAIL is not defined
-// at Function.x
+// at X.x
// at testStaticClassFieldCallWithFNI
// at testTrace
testTrace(
"during static class field call with FNI",
testStaticClassFieldCallWithFNI,
- ["Function.x"],
+ ["X.x"],
["anonymous"]
);
diff --git a/deps/v8/test/mjsunit/temporal/duration-negated.js b/deps/v8/test/mjsunit/temporal/duration-negated.js
index 033f24d5ee..d113f04448 100644
--- a/deps/v8/test/mjsunit/temporal/duration-negated.js
+++ b/deps/v8/test/mjsunit/temporal/duration-negated.js
@@ -6,7 +6,7 @@
d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
let d1 = new Temporal.Duration();
-assertDuration(d1.negated(), -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, 0, true);
+assertDuration(d1.negated(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
assertDuration(d2.negated(), -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index faac39847d..6fa631a0a8 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -35,10 +35,6 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
from testrunner.outproc import base as outproc
-try:
- basestring # Python 2
-except NameError: # Python 3
- basestring = str
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
@@ -249,7 +245,7 @@ class CombinedTest(testcase.D8TestCase):
def _is_flag_blocked(self, flag):
for item in MISBEHAVING_COMBINED_TESTS_FLAGS:
- if isinstance(item, basestring):
+ if isinstance(item, str):
if item == flag:
return True
elif item.match(flag):
diff --git a/deps/v8/test/mjsunit/thin-strings.js b/deps/v8/test/mjsunit/thin-strings.js
index 0c50cf6971..b90a196638 100644
--- a/deps/v8/test/mjsunit/thin-strings.js
+++ b/deps/v8/test/mjsunit/thin-strings.js
@@ -94,3 +94,18 @@ cc2(t);
cc2(t);
%OptimizeFunctionOnNextCall(cc2);
cc2(t);
+
+function string_table_lookup_sliced_thin_string(a, b) {
+ // Make a ConsString.
+ var s = a + b;
+ // Slice a substring out of it.
+ var slice = s.substring(0, 20);
+ // Make the original string thin.
+ var o = {};
+ o[s];
+ // Try to internalize the SlicedString.
+ o[slice];
+}
+
+string_table_lookup_sliced_thin_string(
+ 'abcdefghijklmnopqrstuvwxyz', '0123456789');
diff --git a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
index 28a9d42bb0..0b56f49e33 100644
--- a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
@@ -3565,3 +3565,120 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
assertEquals([7, 8, 9, 10, 0, 0], ToNumbers(taFull));
}
})();
+
+(function ObjectDefinePropertyDefineProperties() {
+ for (let helper of
+ [ObjectDefinePropertyHelper, ObjectDefinePropertiesHelper]) {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(
+ gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(
+ gsab, 2 * ctor.BYTES_PER_ELEMENT);
+ const taFull = new ctor(gsab, 0);
+
+ // Orig. array: [0, 0, 0, 0]
+ // [0, 0, 0, 0] << fixedLength
+ // [0, 0] << fixedLengthWithOffset
+ // [0, 0, 0, 0, ...] << lengthTracking
+ // [0, 0, ...] << lengthTrackingWithOffset
+
+ helper(fixedLength, 0, 1);
+ assertEquals([1, 0, 0, 0], ToNumbers(taFull));
+ helper(fixedLengthWithOffset, 0, 2);
+ assertEquals([1, 0, 2, 0], ToNumbers(taFull));
+ helper(lengthTracking, 1, 3);
+ assertEquals([1, 3, 2, 0], ToNumbers(taFull));
+ helper(lengthTrackingWithOffset, 1, 4);
+ assertEquals([1, 3, 2, 4], ToNumbers(taFull));
+
+ assertThrows(() => { helper(fixedLength, 4, 8); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 2, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTracking, 4, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTrackingWithOffset, 2, 8); },
+ TypeError);
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+
+ helper(fixedLength, 0, 9);
+ assertEquals([9, 3, 2, 4, 0, 0], ToNumbers(taFull));
+ helper(fixedLengthWithOffset, 0, 10);
+ assertEquals([9, 3, 10, 4, 0, 0], ToNumbers(taFull));
+ helper(lengthTracking, 1, 11);
+ assertEquals([9, 11, 10, 4, 0, 0], ToNumbers(taFull));
+ helper(lengthTrackingWithOffset, 2, 12);
+ assertEquals([9, 11, 10, 4, 12, 0], ToNumbers(taFull));
+
+ // Trying to define properties out of the fixed-length bounds throws.
+ assertThrows(() => { helper(fixedLength, 5, 13); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 3, 13); }, TypeError);
+ assertEquals([9, 11, 10, 4, 12, 0], ToNumbers(taFull));
+
+ helper(lengthTracking, 4, 14);
+ assertEquals([9, 11, 10, 4, 14, 0], ToNumbers(taFull));
+ helper(lengthTrackingWithOffset, 3, 15);
+ assertEquals([9, 11, 10, 4, 14, 15], ToNumbers(taFull));
+
+ assertThrows(() => { helper(fixedLength, 6, 8); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 4, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTracking, 6, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTrackingWithOffset, 4, 8); },
+ TypeError);
+
+ }
+ }
+})();
+
+(function ObjectDefinePropertyParameterConversionGrows() {
+ const helper = ObjectDefinePropertyHelper;
+ // Length tracking.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab, 0);
+ const evil = {toString: () => {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ return 4; // Index valid after resize.
+ }};
+ helper(lengthTracking, evil, 8);
+ assertEquals([0, 0, 0, 0, 8, 0], ToNumbers(lengthTracking));
+ }
+})();
+
+(function ObjectFreeze() {
+ // Freezing non-OOB non-zero-length TAs throws.
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(
+ gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(
+ gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { Object.freeze(fixedLength); }, TypeError);
+ assertThrows(() => { Object.freeze(fixedLengthWithOffset); }, TypeError);
+ assertThrows(() => { Object.freeze(lengthTracking); }, TypeError);
+ assertThrows(() => { Object.freeze(lengthTrackingWithOffset); }, TypeError);
+ }
+ // Freezing zero-length TAs doesn't throw.
+ for (let ctor of ctors) {
+ const gsab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 0);
+ const fixedLengthWithOffset = new ctor(
+ gsab, 2 * ctor.BYTES_PER_ELEMENT, 0);
+ // Zero-length because the offset is at the end:
+ const lengthTrackingWithOffset = new ctor(
+ gsab, 4 * ctor.BYTES_PER_ELEMENT);
+
+ Object.freeze(fixedLength);
+ Object.freeze(fixedLengthWithOffset);
+ Object.freeze(lengthTrackingWithOffset);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/typedarray-helpers.js b/deps/v8/test/mjsunit/typedarray-helpers.js
index 366e094fe7..43fb88634b 100644
--- a/deps/v8/test/mjsunit/typedarray-helpers.js
+++ b/deps/v8/test/mjsunit/typedarray-helpers.js
@@ -234,3 +234,21 @@ function assertAllDataViewMethodsThrow(view, index, errorType) {
assertThrows(() => { getter.call(view, index); }, errorType);
}
}
+
+function ObjectDefinePropertyHelper(ta, index, value) {
+ if (ta instanceof BigInt64Array || ta instanceof BigUint64Array) {
+ Object.defineProperty(ta, index, {value: BigInt(value)});
+ } else {
+ Object.defineProperty(ta, index, {value: value});
+ }
+}
+
+function ObjectDefinePropertiesHelper(ta, index, value) {
+ const values = {};
+ if (ta instanceof BigInt64Array || ta instanceof BigUint64Array) {
+ values[index] = {value: BigInt(value)};
+ } else {
+ values[index] = {value: value};
+ }
+ Object.defineProperties(ta, values);
+}
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
index 9ece49cbe5..e02b570084 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
@@ -38,57 +38,6 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
}
})();
-(function ConstructFromTypedArraySpeciesConstructorDetaches() {
- let rab;
- class MyArrayBuffer extends ArrayBuffer {
- constructor(...params) {
- super(...params);
- }
- static get [Symbol.species]() {
- %ArrayBufferDetach(rab);
- }
- };
-
- function CreateRabForTest(ctor) {
- const rab = new MyArrayBuffer(
- 4 * ctor.BYTES_PER_ELEMENT,
- {maxByteLength: 8 * ctor.BYTES_PER_ELEMENT});
- // Write some data into the array.
- const taWrite = new ctor(rab);
- for (let i = 0; i < 4; ++i) {
- WriteToTypedArray(taWrite, i, 2 * i);
- }
- return rab;
- }
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const fixedLength = new sourceCtor(rab, 0, 4);
- assertThrows(() => { new targetCtor(fixedLength); }, TypeError);
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const fixedLengthWithOffset = new sourceCtor(
- rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
- assertThrows(() => { new targetCtor(fixedLengthWithOffset); }, TypeError);
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const lengthTracking = new sourceCtor(rab, 0);
- assertThrows(() => { new targetCtor(lengthTracking); }, TypeError);
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const lengthTrackingWithOffset = new sourceCtor(
- rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
- assertThrows(() => { new targetCtor(lengthTrackingWithOffset); },
- TypeError);
- });
-})();
-
(function AccessDetachedTypedArray() {
const rab = CreateResizableArrayBuffer(16, 40);
@@ -1207,32 +1156,29 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
});
}
- // Tests where the length getter returns a non-zero value -> these throw.
+ // Tests where the length getter detaches -> these are no-op.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const fixedLength = new ctor(rab, 0, 4);
- assertThrows(() => { fixedLength.set(CreateSourceProxy(1)); }, TypeError);
+ fixedLength.set(CreateSourceProxy(1));
}
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
- assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(1)); },
- TypeError);
+ fixedLengthWithOffset.set(CreateSourceProxy(1));
}
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
- assertThrows(() => { lengthTracking.set(CreateSourceProxy(1)); },
- TypeError);
+ lengthTracking.set(CreateSourceProxy(1));
}
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
- assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(1)); },
- TypeError);
+ lengthTrackingWithOffset.set(CreateSourceProxy(1));
}
// Tests where the length getter returns a zero -> these don't throw.
@@ -1302,31 +1248,28 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
rab = CreateRabForTest(ctor);
const fixedLength = new ctor(rab, 0, 4);
detachAt = 2;
- assertThrows(() => { fixedLength.set(CreateSourceProxy(4)); }, TypeError);
+ fixedLength.set(CreateSourceProxy(4));
}
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
detachAt = 2;
- assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(2)); },
- TypeError);
+ fixedLengthWithOffset.set(CreateSourceProxy(2));
}
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
detachAt = 2;
- assertThrows(() => { lengthTracking.set(CreateSourceProxy(2)); },
- TypeError);
+ lengthTracking.set(CreateSourceProxy(2));
}
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
detachAt = 2;
- assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(2)); },
- TypeError);
+ lengthTrackingWithOffset.set(CreateSourceProxy(2));
}
})();
@@ -1458,6 +1401,12 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
return 0;
}
+ function AssertIsDetached(ta) {
+ assertEquals(0, ta.byteLength);
+ assertEquals(0, ta.byteOffset);
+ assertEquals(0, ta.length);
+ }
+
// Fixed length TA.
for (let ctor of ctors) {
rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
@@ -1466,7 +1415,8 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
const taFull = new ctor(rab, 0);
WriteUnsortedData(taFull);
- assertThrows(() => { fixedLength.sort(CustomComparison); });
+ fixedLength.sort(CustomComparison);
+ AssertIsDetached(fixedLength);
}
// Length-tracking TA.
@@ -1477,6 +1427,63 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
const taFull = new ctor(rab, 0);
WriteUnsortedData(taFull);
- assertThrows(() => { lengthTracking.sort(CustomComparison); });
+ lengthTracking.sort(CustomComparison);
+ AssertIsDetached(lengthTracking);
+ }
+})();
+
+(function ObjectDefineProperty() {
+ for (let helper of
+ [ObjectDefinePropertyHelper, ObjectDefinePropertiesHelper]) {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 0, 0, 0]
+ // [0, 0, 0, 0] << fixedLength
+ // [0, 0] << fixedLengthWithOffset
+ // [0, 0, 0, 0, ...] << lengthTracking
+ // [0, 0, ...] << lengthTrackingWithOffset
+
+ %ArrayBufferDetach(rab);
+
+ assertThrows(() => { helper(fixedLength, 0, 8); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 0, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTracking, 0, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTrackingWithOffset, 0, 8); },
+ TypeError);
+ }
+ }
+})();
+
+(function ObjectDefinePropertyParameterConversionDetaches() {
+ const helper = ObjectDefinePropertyHelper;
+ // Fixed length.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const evil = {toString: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertThrows(() => { helper(fixedLength, evil, 8); }, TypeError);
+ }
+ // Length tracking.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab, 0);
+ const evil = {toString: () => {
+ %ArrayBufferDetach(rab);
+ return 0;
+ }};
+ assertThrows(() => { helper(lengthTracking, evil, 8); }, TypeError);
}
})();
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
index acd636cae3..f24f2de46b 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
@@ -174,128 +174,6 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
});
})();
-(function ConstructFromTypedArraySpeciesConstructorShrinks() {
- let rab;
- let resizeTo;
- class MyArrayBuffer extends ArrayBuffer {
- constructor(...params) {
- super(...params);
- }
- static get [Symbol.species]() {
- rab.resize(resizeTo);
- }
- };
-
- function CreateRabForTest(ctor) {
- const rab = new MyArrayBuffer(
- 4 * ctor.BYTES_PER_ELEMENT,
- {maxByteLength: 8 * ctor.BYTES_PER_ELEMENT});
- // Write some data into the array.
- const taWrite = new ctor(rab);
- for (let i = 0; i < 4; ++i) {
- WriteToTypedArray(taWrite, i, 2 * i);
- }
- return rab;
- }
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const fixedLength = new sourceCtor(rab, 0, 4);
- resizeTo = 2 * sourceCtor.BYTES_PER_ELEMENT;
- assertThrows(() => { new targetCtor(fixedLength); }, TypeError);
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const fixedLengthWithOffset = new sourceCtor(
- rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
- resizeTo = 2 * sourceCtor.BYTES_PER_ELEMENT;
- assertThrows(() => { new targetCtor(fixedLengthWithOffset); }, TypeError);
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const lengthTracking = new sourceCtor(rab, 0);
- resizeTo = 2 * sourceCtor.BYTES_PER_ELEMENT;
- assertEquals([0, 2], ToNumbers(new targetCtor(lengthTracking)));
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const lengthTrackingWithOffset = new sourceCtor(
- rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
- resizeTo = 3 * sourceCtor.BYTES_PER_ELEMENT;
- assertEquals([4], ToNumbers(new targetCtor(lengthTrackingWithOffset)));
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const lengthTrackingWithOffset = new sourceCtor(
- rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
- resizeTo = 1 * sourceCtor.BYTES_PER_ELEMENT;
- assertThrows(() => { new targetCtor(lengthTrackingWithOffset); },
- TypeError);
- });
-})();
-
-(function ConstructFromTypedArraySpeciesConstructorGrows() {
- let rab;
- let resizeTo;
- class MyArrayBuffer extends ArrayBuffer {
- constructor(...params) {
- super(...params);
- }
- static get [Symbol.species]() {
- rab.resize(resizeTo);
- }
- };
- function CreateRabForTest(ctor) {
- const rab = new MyArrayBuffer(
- 4 * ctor.BYTES_PER_ELEMENT,
- {maxByteLength: 8 * ctor.BYTES_PER_ELEMENT});
- // Write some data into the array.
- const taWrite = new ctor(rab);
- for (let i = 0; i < 4; ++i) {
- WriteToTypedArray(taWrite, i, 2 * i);
- }
- return rab;
- }
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const fixedLength = new sourceCtor(rab, 0, 4);
- resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
- // Fixed-length TA unaffected by growing.
- assertEquals([0, 2, 4, 6], ToNumbers(new targetCtor(fixedLength)));
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const fixedLengthWithOffset = new sourceCtor(
- rab, 2 * sourceCtor.BYTES_PER_ELEMENT, 2);
- resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
- // Fixed-length TA unaffected by growing.
- assertEquals([4, 6], ToNumbers(new targetCtor(fixedLengthWithOffset)));
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const lengthTracking = new sourceCtor(rab, 0);
- resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
- assertEquals([0, 2, 4, 6, 0, 0],
- ToNumbers(new targetCtor(lengthTracking)));
- });
-
- AllBigIntMatchedCtorCombinations((targetCtor, sourceCtor) => {
- rab = CreateRabForTest(sourceCtor);
- const lengthTrackingWithOffset = new sourceCtor(
- rab, 2 * sourceCtor.BYTES_PER_ELEMENT);
- resizeTo = 6 * sourceCtor.BYTES_PER_ELEMENT;
- assertEquals([4, 6, 0, 0],
- ToNumbers(new targetCtor(lengthTrackingWithOffset)));
- });
-})();
-
(function TypedArrayLengthWhenResizedOutOfBounds1() {
const rab = CreateResizableArrayBuffer(16, 40);
@@ -5570,13 +5448,13 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
});
}
- // Tests where the length getter returns a non-zero value -> these throw if
+ // Tests where the length getter returns a non-zero value -> these are nop if
// the TA went OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const fixedLength = new ctor(rab, 0, 4);
resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
- assertThrows(() => { fixedLength.set(CreateSourceProxy(1)); }, TypeError);
+ fixedLength.set(CreateSourceProxy(1));
assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
}
@@ -5584,8 +5462,7 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
rab = CreateRabForTest(ctor);
const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
- assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(1)); },
- TypeError);
+ fixedLengthWithOffset.set(CreateSourceProxy(1));
assertEquals([0, 2, 4], ToNumbers(new ctor(rab)));
}
@@ -5612,8 +5489,7 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
resizeTo = 1 * ctor.BYTES_PER_ELEMENT;
- assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(1)); },
- TypeError);
+ lengthTrackingWithOffset.set(CreateSourceProxy(1));
assertEquals([0], ToNumbers(new ctor(rab)));
}
@@ -5757,7 +5633,7 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
const fixedLength = new ctor(rab, 0, 4);
resizeAt = 2;
resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
- assertThrows(() => { fixedLength.set(CreateSourceProxy(4)); }, TypeError);
+ fixedLength.set(CreateSourceProxy(4));
assertEquals([1, 2, 4], ToNumbers(new ctor(rab)));
}
@@ -5766,8 +5642,7 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
resizeAt = 2;
resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
- assertThrows(() => { fixedLengthWithOffset.set(CreateSourceProxy(2)); },
- TypeError);
+ fixedLengthWithOffset.set(CreateSourceProxy(2));
assertEquals([0, 2, 1], ToNumbers(new ctor(rab)));
}
@@ -5797,8 +5672,7 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
resizeAt = 1;
resizeTo = 1 * ctor.BYTES_PER_ELEMENT;
- assertThrows(() => { lengthTrackingWithOffset.set(CreateSourceProxy(2)); },
- TypeError);
+ lengthTrackingWithOffset.set(CreateSourceProxy(2));
assertEquals([0], ToNumbers(new ctor(rab)));
}
})();
@@ -6597,7 +6471,7 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
const taFull = new ctor(rab, 0);
WriteUnsortedData(taFull);
- assertThrows(() => { fixedLength.sort(CustomComparison); });
+ fixedLength.sort(CustomComparison);
// The data is unchanged.
assertEquals([10, 9], ToNumbers(taFull));
@@ -6674,3 +6548,206 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
assertEquals([7, 8, 9, 10, 0, 0], ToNumbers(taFull));
}
})();
+
+(function ObjectDefinePropertyDefineProperties() {
+ for (let helper of
+ [ObjectDefinePropertyHelper, ObjectDefinePropertiesHelper]) {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT);
+ const taFull = new ctor(rab, 0);
+
+ // Orig. array: [0, 0, 0, 0]
+ // [0, 0, 0, 0] << fixedLength
+ // [0, 0] << fixedLengthWithOffset
+ // [0, 0, 0, 0, ...] << lengthTracking
+ // [0, 0, ...] << lengthTrackingWithOffset
+
+ helper(fixedLength, 0, 1);
+ assertEquals([1, 0, 0, 0], ToNumbers(taFull));
+ helper(fixedLengthWithOffset, 0, 2);
+ assertEquals([1, 0, 2, 0], ToNumbers(taFull));
+ helper(lengthTracking, 1, 3);
+ assertEquals([1, 3, 2, 0], ToNumbers(taFull));
+ helper(lengthTrackingWithOffset, 1, 4);
+ assertEquals([1, 3, 2, 4], ToNumbers(taFull));
+
+ assertThrows(() => { helper(fixedLength, 4, 8); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 2, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTracking, 4, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTrackingWithOffset, 2, 8); },
+ TypeError);
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [1, 3, 2]
+ // [1, 3, 2, ...] << lengthTracking
+ // [2, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { helper(fixedLength, 0, 8); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 0, 8); }, TypeError);
+ assertEquals([1, 3, 2], ToNumbers(taFull));
+
+ helper(lengthTracking, 0, 5);
+ assertEquals([5, 3, 2], ToNumbers(taFull));
+ helper(lengthTrackingWithOffset, 0, 6);
+ assertEquals([5, 3, 6], ToNumbers(taFull));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { helper(fixedLength, 0, 8); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 0, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTrackingWithOffset, 0, 8); },
+ TypeError);
+ assertEquals([5], ToNumbers(taFull));
+
+ helper(lengthTracking, 0, 7);
+ assertEquals([7], ToNumbers(taFull));
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { helper(fixedLength, 0, 8); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 0, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTracking, 0, 8); }, TypeError);
+ assertThrows(() => { helper(lengthTrackingWithOffset, 0, 8); },
+ TypeError);
+ assertEquals([], ToNumbers(taFull));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+
+ helper(fixedLength, 0, 9);
+ assertEquals([9, 0, 0, 0, 0, 0], ToNumbers(taFull));
+ helper(fixedLengthWithOffset, 0, 10);
+ assertEquals([9, 0, 10, 0, 0, 0], ToNumbers(taFull));
+ helper(lengthTracking, 1, 11);
+ assertEquals([9, 11, 10, 0, 0, 0], ToNumbers(taFull));
+ helper(lengthTrackingWithOffset, 2, 12);
+ assertEquals([9, 11, 10, 0, 12, 0], ToNumbers(taFull));
+
+ // Trying to define properties out of the fixed-length bounds throws.
+ assertThrows(() => { helper(fixedLength, 5, 13); }, TypeError);
+ assertThrows(() => { helper(fixedLengthWithOffset, 3, 13); }, TypeError);
+ assertEquals([9, 11, 10, 0, 12, 0], ToNumbers(taFull));
+
+ helper(lengthTracking, 4, 14);
+ assertEquals([9, 11, 10, 0, 14, 0], ToNumbers(taFull));
+ helper(lengthTrackingWithOffset, 3, 15);
+ assertEquals([9, 11, 10, 0, 14, 15], ToNumbers(taFull));
+ }
+ }
+})();
+
+(function ObjectDefinePropertyParameterConversionShrinks() {
+ const helper = ObjectDefinePropertyHelper;
+ // Fixed length.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const evil = {toString: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ assertThrows(() => { helper(fixedLength, evil, 8); }, TypeError);
+ }
+ // Length tracking.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab, 0);
+ const evil = {toString: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 3; // Index too large after resize.
+ }};
+ assertThrows(() => { helper(lengthTracking, evil, 8); }, TypeError);
+ }
+})();
+
+(function ObjectDefinePropertyParameterConversionGrows() {
+ const helper = ObjectDefinePropertyHelper;
+ // Fixed length.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ // Make fixedLength go OOB.
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ const evil = {toString: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return 0;
+ }};
+ helper(fixedLength, evil, 8);
+ assertEquals([8, 0, 0, 0], ToNumbers(fixedLength));
+ }
+ // Length tracking.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab, 0);
+ const evil = {toString: () => {
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ return 4; // Index valid after resize.
+ }};
+ helper(lengthTracking, evil, 8);
+ assertEquals([0, 0, 0, 0, 8, 0], ToNumbers(lengthTracking));
+ }
+})();
+
+(function ObjectFreeze() {
+ // Freezing non-OOB non-zero-length TAs throws.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { Object.freeze(fixedLength); }, TypeError);
+ assertThrows(() => { Object.freeze(fixedLengthWithOffset); }, TypeError);
+ assertThrows(() => { Object.freeze(lengthTracking); }, TypeError);
+ assertThrows(() => { Object.freeze(lengthTrackingWithOffset); }, TypeError);
+ }
+ // Freezing zero-length TAs doesn't throw.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 0);
+ const fixedLengthWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT, 0);
+ // Zero-length because the offset is at the end:
+ const lengthTrackingWithOffset = new ctor(
+ rab, 4 * ctor.BYTES_PER_ELEMENT);
+
+ Object.freeze(fixedLength);
+ Object.freeze(fixedLengthWithOffset);
+ Object.freeze(lengthTrackingWithOffset);
+ }
+ // If the buffer has been resized to make length-tracking TAs zero-length,
+ // freezing them also doesn't throw.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab, );
+ const lengthTrackingWithOffset = new ctor(
+ rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ Object.freeze(lengthTrackingWithOffset);
+
+ rab.resize(0 * ctor.BYTES_PER_ELEMENT);
+ Object.freeze(lengthTracking);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/call-ref.js b/deps/v8/test/mjsunit/wasm/call-ref.js
index 2a38bba178..812a764f7f 100644
--- a/deps/v8/test/mjsunit/wasm/call-ref.js
+++ b/deps/v8/test/mjsunit/wasm/call-ref.js
@@ -96,11 +96,10 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print("--imported function from another module--");
assertEquals(57, instance.exports.test_wasm_import());
- /* TODO(7748): Implement cross-module type canonicalization.
print("--not imported function defined in another module--");
assertEquals(19, instance.exports.main(
exporting_instance.exports.addition, 12, 7));
-*/
+
print("--imported WebAssembly.Function--")
assertEquals(21, instance.exports.test_js_api_import());
print("--not imported WebAssembly.Function--")
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
index 1821a1fef5..35726fedc5 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
@@ -96,7 +96,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
kExprI32Mul])
.setCompilationHint(kCompilationHintStrategyEager,
kCompilationHintTierDefault,
- kCompilationHintTierOptimized)
+ kCompilationHintTierOptimized);
builder.instantiate();
})();
@@ -128,3 +128,35 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
.exportFunc();
builder.instantiate();
})();
+
+(function testDecodeIllegalCompilationHintBaselineTier() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let kIllegalHintTier = 0x03;
+ builder.addFunction('func', kSig_i_i)
+ .addBody([kExprUnreachable])
+ .setCompilationHint(
+ kCompilationHintStrategyDefault, kIllegalHintTier,
+ kCompilationHintTierDefault);
+ assertThrows(
+ () => builder.instantiate(), WebAssembly.CompileError,
+ new RegExp(
+ 'WebAssembly.Module\\(\\): Invalid compilation hint 0x0c ' +
+ '\\(invalid tier 0x03\\)'));
+})();
+
+(function testDecodeIllegalCompilationHintTopTier() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let kIllegalHintTier = 0x03;
+ builder.addFunction('func', kSig_i_i)
+ .addBody([kExprUnreachable])
+ .setCompilationHint(
+ kCompilationHintStrategyDefault, kCompilationHintTierDefault,
+ kIllegalHintTier);
+ assertThrows(
+ () => builder.instantiate(), WebAssembly.CompileError,
+ new RegExp(
+ 'WebAssembly.Module\\(\\): Invalid compilation hint 0x30 ' +
+ '\\(invalid tier 0x03\\)'));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/generic-wrapper.js b/deps/v8/test/mjsunit/wasm/generic-wrapper.js
index 72870537e3..891adc8634 100644
--- a/deps/v8/test/mjsunit/wasm/generic-wrapper.js
+++ b/deps/v8/test/mjsunit/wasm/generic-wrapper.js
@@ -787,3 +787,26 @@ let kSig_f_iiliiiffddlifffdi = makeSig([kWasmI32, kWasmI32, kWasmI64, kWasmI32,
}
caller();
})();
+
+(function testGenericWrapper6Ref7F64Param() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_r_ddrrrrrrddddd = builder.addType(makeSig(
+ [kWasmF64, kWasmF64, kWasmExternRef, kWasmExternRef, kWasmExternRef,
+ kWasmExternRef, kWasmExternRef, kWasmExternRef, kWasmF64, kWasmF64,
+ kWasmF64, kWasmF64, kWasmF64],
+ [kWasmExternRef]));
+
+
+ builder.addFunction("func0", sig_r_ddrrrrrrddddd)
+ .addBody([
+ kExprLocalGet, 7,
+ ])
+ .exportAs("func0");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ let res = instance.exports.func0(1, 2, "3", "4", "5", "6", "7",
+ "8", 9, 10, 11, 12, 13);
+ assertEquals("8", res);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/imported-function-types.js b/deps/v8/test/mjsunit/wasm/imported-function-types.js
index 756296cbc1..895644017e 100644
--- a/deps/v8/test/mjsunit/wasm/imported-function-types.js
+++ b/deps/v8/test/mjsunit/wasm/imported-function-types.js
@@ -35,9 +35,8 @@ var importing_module = function(imported_function) {
return builder.instantiate({other: {func: imported_function}});
};
-// TODO(7748): Implement cross-module subtyping.
// Same form/different index should be fine.
-// importing_module(exporting_module.exports.func2);
+importing_module(exporting_module.exports.func2);
// Same index/different form should throw.
assertThrows(
() => importing_module(exporting_module.exports.func1),
diff --git a/deps/v8/test/mjsunit/wasm/reference-globals.js b/deps/v8/test/mjsunit/wasm/reference-globals.js
index 361708d6fe..6ab071f9fa 100644
--- a/deps/v8/test/mjsunit/wasm/reference-globals.js
+++ b/deps/v8/test/mjsunit/wasm/reference-globals.js
@@ -6,7 +6,6 @@
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
-/* TODO(7748): Implement cross-module subtyping.
(function TestReferenceGlobals() {
print(arguments.callee.name);
@@ -106,7 +105,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// The correct function reference has been passed.
assertEquals(66, instance.exports.test_import(42, 24));
})();
-*/
(function TestStructInitExpr() {
print(arguments.callee.name);
diff --git a/deps/v8/test/mjsunit/wasm/reference-tables.js b/deps/v8/test/mjsunit/wasm/reference-tables.js
index 3bbf0ffdac..b31503562a 100644
--- a/deps/v8/test/mjsunit/wasm/reference-tables.js
+++ b/deps/v8/test/mjsunit/wasm/reference-tables.js
@@ -5,7 +5,6 @@
// Flags: --experimental-wasm-gc
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
-/* TODO(7748): Implement cross-module subtyping.
(function TestTables() {
print(arguments.callee.name);
var exporting_instance = (function() {
@@ -100,9 +99,8 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertThrows(
() => instance.exports.table.set(0, exporting_instance.exports.addition),
TypeError,
- /Argument 1 must be null or a WebAssembly function of type compatible to/);
+ /Argument 1 is invalid for table of type \(ref null 0\)/);
})();
-*/
(function TestNonNullableTables() {
print(arguments.callee.name);
diff --git a/deps/v8/test/mjsunit/wasm/resizablearraybuffer-growablesharedarraybuffer-wasm.js b/deps/v8/test/mjsunit/wasm/resizablearraybuffer-growablesharedarraybuffer-wasm.js
new file mode 100644
index 0000000000..9c545e966d
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/resizablearraybuffer-growablesharedarraybuffer-wasm.js
@@ -0,0 +1,15 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+(function TestMemoryBufferNotResizable() {
+ const m = new WebAssembly.Memory({
+ initial: 128
+ });
+
+ assertFalse(m.buffer.resizable);
+ // For non-resizable buffers, maxByteLength returns byteLength.
+ assertEquals(m.buffer.maxByteLength, m.buffer.byteLength);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js b/deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js
new file mode 100644
index 0000000000..dc89fed3c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js
@@ -0,0 +1,67 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --experimental-wasm-gc
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+let builder = new WasmModuleBuilder();
+
+let struct_index = builder.addStruct([makeField(kWasmI32, true)]);
+let identical_struct_index = builder.addStruct([makeField(kWasmI32, true)]);
+let distinct_struct_index = builder.addStruct([makeField(kWasmI64, true)]);
+
+let struct_init = builder.addFunction("struct_init",
+ makeSig([], [kWasmDataRef]))
+ .addBody([kGCPrefix, kExprStructNewDefault, struct_index])
+ .exportFunc();
+let test_pass = builder.addFunction("test_pass",
+ makeSig([kWasmDataRef], [kWasmI32]))
+ .addBody([kExprLocalGet, 0,
+ kGCPrefix, kExprRefTestStatic, identical_struct_index])
+ .exportFunc();
+let test_fail = builder.addFunction("test_fail",
+ makeSig([kWasmDataRef], [kWasmI32]))
+ .addBody([kExprLocalGet, 0,
+ kGCPrefix, kExprRefTestStatic, distinct_struct_index])
+ .exportFunc();
+
+(function TestCanonicalizationSameInstance() {
+ print(arguments.callee.name);
+ let instance = builder.instantiate({});
+ assertEquals(1, instance.exports.test_pass(instance.exports.struct_init()));
+ assertEquals(0, instance.exports.test_fail(instance.exports.struct_init()));
+})();
+
+(function TestCanonicalizationSameModuleDifferentInstances() {
+ print(arguments.callee.name);
+ let module = builder.toModule();
+ let instance1 = new WebAssembly.Instance(module, {});
+ let instance2 = new WebAssembly.Instance(module, {});
+ assertEquals(1, instance2.exports.test_pass(instance1.exports.struct_init()));
+ assertEquals(0, instance2.exports.test_fail(instance1.exports.struct_init()));
+})();
+
+// GC between tests so that the type registry is cleared.
+gc();
+
+(function TestCanonicalizationDifferentModules() {
+ print(arguments.callee.name);
+ let instance1 = builder.instantiate({});
+ let instance2 = builder.instantiate({});
+ assertEquals(1, instance2.exports.test_pass(instance1.exports.struct_init()));
+ assertEquals(0, instance2.exports.test_fail(instance1.exports.struct_init()));
+})();
+
+(function TestCanonicalizationDifferentModulesAfterGC() {
+ print(arguments.callee.name);
+ let struct = (function make_struct() {
+ return builder.instantiate({}).exports.struct_init();
+ })();
+ // A the live {struct} object keeps the instance alive.
+ gc();
+ let instance = builder.instantiate({});
+ assertEquals(1, instance.exports.test_pass(struct));
+ assertEquals(0, instance.exports.test_fail(struct));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/speculative-inlining.js b/deps/v8/test/mjsunit/wasm/speculative-inlining.js
index 0572e7449f..f675f25390 100644
--- a/deps/v8/test/mjsunit/wasm/speculative-inlining.js
+++ b/deps/v8/test/mjsunit/wasm/speculative-inlining.js
@@ -155,7 +155,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(8, instance.exports.main(10, 0));
})();
-/* TODO(7748): Implement cross-module subtyping.
(function CallRefImportedFunction() {
print(arguments.callee.name);
@@ -196,7 +195,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// The function f1 defined in another module should not be inlined.
assertEquals(1, instance2.exports.main(0, instance1.exports.f1));
})();
-*/
// Check that we handle WasmJSFunctions properly and do not inline them, both
// in the monomorphic and polymorphic case.
diff --git a/deps/v8/test/mjsunit/wasm/worker-memory.js b/deps/v8/test/mjsunit/wasm/worker-memory.js
index 28a6924ea7..60391c6f3f 100644
--- a/deps/v8/test/mjsunit/wasm/worker-memory.js
+++ b/deps/v8/test/mjsunit/wasm/worker-memory.js
@@ -15,7 +15,13 @@
let worker = new Worker('', {type: 'string'});
let memory = new WebAssembly.Memory({initial: 1, maximum: 2});
- assertThrows(() => worker.postMessage(memory.buffer), Error);
+ worker.postMessage(memory.buffer);
+ assertThrows(() => {
+ worker.postMessage(memory.buffer, [memory.buffer])
+ }, Error)
+ assertThrows(() => {
+ worker.postMessage(undefined, [memory.buffer])
+ }, Error)
})();
// Can't use assert in a worker.
diff --git a/deps/v8/test/mjsunit/wasm/worker-running-empty-loop-interruptible.js b/deps/v8/test/mjsunit/wasm/worker-running-empty-loop-interruptible.js
new file mode 100644
index 0000000000..36817a875b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/worker-running-empty-loop-interruptible.js
@@ -0,0 +1,32 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+// void main() { for (;;) {} }
+builder.addFunction('main', kSig_v_v).addBody([
+ kExprLoop, kWasmVoid, kExprBr, 0, kExprEnd
+]).exportFunc();
+const module = builder.toModule();
+
+function workerCode() {
+ onmessage = function(module) {
+ print('[worker] Creating instance.');
+ let instance = new WebAssembly.Instance(module);
+ print('[worker] Reporting start.');
+ postMessage('start');
+ print('[worker] Running main.');
+ instance.exports.main();
+ };
+}
+
+print('[main] Starting worker.');
+const worker = new Worker(workerCode, {type: 'function'});
+print('[main] Sending module.');
+worker.postMessage(module);
+assertEquals('start', worker.getMessage());
+print('[main] Terminating worker and waiting for it to finish.');
+worker.terminateAndWait();
+print('[main] All done.');
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 63f6e3b3ea..c97726f98b 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -19,6 +19,7 @@
namespace v8 {
static const char* kHeader =
+ "#!/usr/bin/env python3\n"
"# Copyright 2019 the V8 project authors. All rights reserved.\n"
"# Use of this source code is governed by a BSD-style license that can\n"
"# be found in the LICENSE file.\n"
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 5fe23925eb..72448938ff 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -25,7 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
from functools import reduce
import os
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index c0b6e15628..ea1ccefb5f 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -312,7 +312,6 @@
'built-ins/Temporal/PlainDate/prototype/since/smallestunit-wrong-type': [FAIL],
'built-ins/Temporal/Duration/prototype/add/argument-string-negative-fractional-units': [FAIL],
'built-ins/Temporal/Duration/prototype/add/balance-negative-result': [FAIL],
- 'built-ins/Temporal/Duration/prototype/negated/subclassing-ignored': [FAIL],
'built-ins/Temporal/TimeZone/from/argument-valid': [SKIP],
@@ -320,10 +319,6 @@
# https://github.com/tc39/test262/pull/3250
'built-ins/Temporal/PlainDate/prototype/since/largestunit-higher-units': [FAIL],
- # Calendar in TemporalTimeString
- # https://github.com/tc39/test262/pull/3257
- 'built-ins/Temporal/PlainTime/from/argument-string-with-calendar': [FAIL],
-
# PlainTime RelativeTime
# https://github.com/tc39/proposal-temporal/pull/1862
'built-ins/Temporal/Duration/compare/calendar-temporal-object': [FAIL],
@@ -364,6 +359,8 @@
'built-ins/Temporal/Duration/prototype/round/timezone-getpossibleinstantsfor-iterable': [FAIL],
'built-ins/Temporal/Duration/prototype/total/timezone-getpossibleinstantsfor-iterable': [FAIL],
+ 'built-ins/Temporal/Instant/from/instant-string-multiple-offsets': [SKIP],
+
# Valid calendar in the test
# https://github.com/tc39/test262/pull/3261
'built-ins/Temporal/Calendar/from/calendar-string-not-builtin': [FAIL],
@@ -392,12 +389,36 @@
'built-ins/Temporal/PlainDateTime/prototype/until/largestunit-undefined': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11544
- 'built-ins/Temporal/Calendar/prototype/mergeFields/arguments-not-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/fields-missing-properties': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/monthcode-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/overflow-constrain': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/overflow-reject': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthDayFromFields/reference-year-1972': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-plaindatetime': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/add/balance-negative-time-units': [FAIL],
'built-ins/Temporal/Duration/prototype/add/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
'built-ins/Temporal/Duration/prototype/add/relativeto-string-zoneddatetime': [SKIP],
'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-negative-epochnanoseconds': [SKIP],
+ 'built-ins/Temporal/Duration/compare/calendar-dateadd-called-with-options-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/round/balance-negative-result': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/calendar-dateadd-called-with-options-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/round/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
'built-ins/Temporal/Duration/prototype/round/read-time-fields-before-datefromfields': [FAIL],
'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-negative-epochnanoseconds': [SKIP],
@@ -407,6 +428,7 @@
'built-ins/Temporal/Duration/prototype/subtract/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
'built-ins/Temporal/Duration/prototype/subtract/read-time-fields-before-datefromfields': [FAIL],
'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-zoneddatetime': [SKIP],
+ 'built-ins/Temporal/Duration/prototype/total/calendar-dateadd-called-with-options-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/total/calendar-dateuntil-called-with-singular-largestunit': [SKIP],
'built-ins/Temporal/Duration/prototype/total/options-wrong-type': [FAIL],
'built-ins/Temporal/Duration/prototype/total/read-time-fields-before-datefromfields': [FAIL],
@@ -445,15 +467,8 @@
'built-ins/Temporal/Calendar/prototype/dateAdd/throw-range-error-from-ToTemporalDate': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateAdd/throw-range-error-from-ToTemporalDuration': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateAdd/throw-type-error-from-GetOptionsObject': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateFromFields/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateFromFields/fields-not-object': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateFromFields/overflow-invalid-string': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateFromFields/overflow-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateFromFields/overflow-wrong-type': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateFromFields/throws-range-error': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateFromFields/throws-type-error': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateFromFields/throw-type-error-from-GetOptionsObject': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateFromFields/with-year-monthCode-day': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateFromFields/with-year-monthCode-day-need-constrain': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateFromFields/with-year-month-day': [FAIL],
@@ -491,76 +506,21 @@
'built-ins/Temporal/Calendar/prototype/day/date-time': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/month-day': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfWeek/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfWeek/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfWeek/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/plain-date': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/plain-date-time': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/string': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/throw-range-error-ToTemporalDate': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfYear/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfYear/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfYear/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/plain-date': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/plain-date-time': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/string': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/throw-range-error-ToTemporalDate': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInMonth/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInMonth/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInMonth/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/plain-date': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/plain-date-time': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/string': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/throw-range-error-ToTemporalDate': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInWeek/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInWeek/calendar-temporal-object': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/date': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/date-time': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInWeek/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/string': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/throw-range-error-ToTemporalDate': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInYear/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInYear/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInYear/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/plain-date': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/plain-date-time': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/string': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/throw-range-error-ToTemporalDate': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/string': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/throw-range-error-ToTemporalDate': [FAIL],
'built-ins/Temporal/Calendar/prototype/fields/argument-iterable-not-array': [FAIL],
@@ -571,18 +531,9 @@
'built-ins/Temporal/Calendar/prototype/fields/non-string-element-throws': [FAIL],
'built-ins/Temporal/Calendar/prototype/fields/repeated-throw': [FAIL],
'built-ins/Temporal/Calendar/prototype/fields/reverse': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/inLeapYear/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/inLeapYear/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Calendar/prototype/inLeapYear/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/mergeFields/arguments-empty-object': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/mergeFields/branding': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
@@ -618,14 +569,7 @@
'built-ins/Temporal/Calendar/prototype/monthDayFromFields/overflow-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/month-day-throw-type-error': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthsInYear/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthsInYear/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthsInYear/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/string': [FAIL],
@@ -642,17 +586,8 @@
'built-ins/Temporal/Calendar/prototype/weekOfYear/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Calendar/prototype/weekOfYear/cross-year': [FAIL],
'built-ins/Temporal/Calendar/prototype/weekOfYear/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/argument-string-with-utc-designator': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Calendar/prototype/year/basic': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/branding': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/Calendar/prototype/year/calendar-temporal-object': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/date': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/date-time': [FAIL],
'built-ins/Temporal/Calendar/prototype/year/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/branding': [FAIL],
'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/fields-not-object': [FAIL],
@@ -660,9 +595,6 @@
'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-invalid-string': [FAIL],
'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-wrong-type': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/string': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/throw-range-error-ToTemporalDate': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/year-month': [FAIL],
'built-ins/Temporal/Duration/compare/argument-string-negative-fractional-units': [FAIL],
'built-ins/Temporal/Duration/compare/calendar-dateadd-called-with-plaindate-instance': [FAIL],
'built-ins/Temporal/Duration/compare/options-undefined': [FAIL],
@@ -689,8 +621,6 @@
'built-ins/Temporal/Duration/from/order-of-operations': [FAIL],
'built-ins/Temporal/Duration/from/string-with-skipped-units': [FAIL],
'built-ins/Temporal/Duration/from/subclassing-ignored': [FAIL],
- 'built-ins/Temporal/Duration/prototype/abs/branding': [FAIL],
- 'built-ins/Temporal/Duration/prototype/abs/subclassing-ignored': [FAIL],
'built-ins/Temporal/Duration/prototype/add/branding': [FAIL],
'built-ins/Temporal/Duration/prototype/add/calendar-dateadd-called-with-plaindate-instance': [FAIL],
'built-ins/Temporal/Duration/prototype/add/infinity-throws-rangeerror': [FAIL],
@@ -710,7 +640,6 @@
'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/Duration/prototype/add/subclassing-ignored': [FAIL],
- 'built-ins/Temporal/Duration/prototype/negated/branding': [FAIL],
'built-ins/Temporal/Duration/prototype/round/branding': [FAIL],
'built-ins/Temporal/Duration/prototype/round/calendar-dateadd-called-with-plaindate-instance': [FAIL],
'built-ins/Temporal/Duration/prototype/round/dateuntil-field': [FAIL],
@@ -816,18 +745,7 @@
'built-ins/Temporal/Instant/compare/argument-zoneddatetime': [FAIL],
'built-ins/Temporal/Instant/compare/instant-string': [FAIL],
'built-ins/Temporal/Instant/compare/instant-string-multiple-offsets': [FAIL],
- 'built-ins/Temporal/Instant/from/argument-zoneddatetime': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochMicroseconds/basic': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochMicroseconds/subclassing-ignored': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochMilliseconds/basic': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochMilliseconds/subclassing-ignored': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochNanoseconds/basic': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochNanoseconds/subclassing-ignored': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochSeconds/basic': [FAIL],
- 'built-ins/Temporal/Instant/fromEpochSeconds/subclassing-ignored': [FAIL],
'built-ins/Temporal/Instant/from/instant-string': [FAIL],
- 'built-ins/Temporal/Instant/from/instant-string-multiple-offsets': [FAIL],
- 'built-ins/Temporal/Instant/from/subclassing-ignored': [FAIL],
'built-ins/Temporal/Instant/from/timezone-custom': [FAIL],
'built-ins/Temporal/Instant/prototype/add/argument-string': [FAIL],
'built-ins/Temporal/Instant/prototype/add/argument-string-negative-fractional-units': [FAIL],
@@ -894,6 +812,7 @@
'built-ins/Temporal/Instant/prototype/toJSON/branding': [FAIL],
'built-ins/Temporal/Instant/prototype/toJSON/negative-epochnanoseconds': [FAIL],
'built-ins/Temporal/Instant/prototype/toJSON/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toJSON/year-format': [FAIL],
'built-ins/Temporal/Instant/prototype/toLocaleString/branding': [FAIL],
'built-ins/Temporal/Instant/prototype/toLocaleString/return-string': [FAIL],
'built-ins/Temporal/Instant/prototype/toString/basic': [FAIL],
@@ -923,6 +842,7 @@
'built-ins/Temporal/Instant/prototype/toString/timezone-offset': [FAIL],
'built-ins/Temporal/Instant/prototype/toString/timezone-string-datetime': [FAIL],
'built-ins/Temporal/Instant/prototype/toString/timezone-string-multiple-offsets': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/year-format': [FAIL],
'built-ins/Temporal/Instant/prototype/toZonedDateTime/branding': [FAIL],
'built-ins/Temporal/Instant/prototype/toZonedDateTime/calendar-temporal-object': [FAIL],
'built-ins/Temporal/Instant/prototype/toZonedDateTimeISO/branding': [FAIL],
@@ -977,6 +897,7 @@
'built-ins/Temporal/PlainDate/compare/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/PlainDate/compare/basic': [FAIL],
'built-ins/Temporal/PlainDate/compare/calendar': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDate/compare/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainDate/compare/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainDate/compare/infinity-throws-rangeerror': [FAIL],
@@ -984,7 +905,6 @@
'built-ins/Temporal/PlainDate/from/argument-plaindate': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-plaindatetime': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-string': [FAIL],
- 'built-ins/Temporal/PlainDate/from/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainDate/from/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainDate/from/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/PlainDate/from/limits': [FAIL],
@@ -1013,7 +933,6 @@
'built-ins/Temporal/PlainDate/prototype/add/subclassing-ignored': [FAIL],
'built-ins/Temporal/PlainDate/prototype/dayOfWeek/basic': [FAIL],
'built-ins/Temporal/PlainDate/prototype/dayOfYear/basic': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/daysInWeek/basic': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/argument-object-invalid': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/argument-object-valid': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/argument-plaindatetime': [FAIL],
@@ -1028,11 +947,11 @@
'built-ins/Temporal/PlainDate/prototype/equals/branding': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/calendar-call-different': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/calendar-call-same': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/calendar-no-call': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/monthsInYear/basic': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/argument-plaindatetime': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
@@ -1042,6 +961,7 @@
'built-ins/Temporal/PlainDate/prototype/since/basic': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/branding': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/calendar-id-match': [FAIL],
@@ -1081,6 +1001,7 @@
'built-ins/Temporal/PlainDate/prototype/subtract/overflow-wrong-type': [FAIL],
'built-ins/Temporal/PlainDate/prototype/subtract/subclassing-ignored': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toJSON/year-format': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toLocaleString/branding': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toLocaleString/return-string': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-with-utc-designator': [FAIL],
@@ -1099,9 +1020,11 @@
'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/branding': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/calendar-arguments': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/calendar-monthdayfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/branding': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/calendar-arguments': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toString/basic': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toString/branding': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toString/calendarname-always': [FAIL],
@@ -1111,6 +1034,7 @@
'built-ins/Temporal/PlainDate/prototype/toString/calendarname-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toString/calendarname-wrong-type': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toString/year-format': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/basic': [FAIL],
@@ -1138,6 +1062,7 @@
'built-ins/Temporal/PlainDate/prototype/until/basic': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/branding': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/calendar-id-match': [FAIL],
@@ -1311,6 +1236,7 @@
'built-ins/Temporal/PlainDateTime/prototype/subtract/overflow-wrong-type': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/subtract/subclassing-ignored': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toJSON/year-format': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toLocaleString/branding': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toLocaleString/return-string': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toPlainDate/branding': [FAIL],
@@ -1318,10 +1244,12 @@
'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/branding': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/calendar-arguments': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/calendar-monthdayfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toPlainTime/branding': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/branding': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/calendar-arguments': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toString/branding': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toString/calendarname-invalid-string': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toString/calendarname-undefined': [FAIL],
@@ -1341,6 +1269,7 @@
'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-valid-units': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toString/smallestunit-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/year-format': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/balance-negative-time-units': [SKIP],
'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/branding': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/disambiguation-invalid-string': [FAIL],
@@ -1410,6 +1339,7 @@
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/infinity-throws-rangeerror': [FAIL],
@@ -1431,6 +1361,7 @@
'built-ins/Temporal/PlainMonthDay/basic': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/calendar-monthdayfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/fields-leap-day': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/fields-missing-properties': [FAIL],
@@ -1452,12 +1383,14 @@
'built-ins/Temporal/PlainMonthDay/prototype/equals/basic': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/branding': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/calendar-monthdayfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/calendars': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/monthCode/basic': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toJSON/branding': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toJSON/calendarname': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toJSON/year-format': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toLocaleString/branding': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/argument-not-object': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/branding': [FAIL],
@@ -1474,6 +1407,7 @@
'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-undefined': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-wrong-type': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/toString/year-format': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/valueOf/basic': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/with/basic': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/with/branding': [FAIL],
@@ -1498,22 +1432,8 @@
'built-ins/Temporal/PlainTime/compare/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainTime/compare/plaintime-propertybag-no-time-units': [FAIL],
'built-ins/Temporal/PlainTime/compare/use-internal-slots': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-balance-negative-time-units': [FAIL],
'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-negative-epochnanoseconds': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
- 'built-ins/Temporal/PlainTime/from/calendar-temporal-object': [FAIL],
- 'built-ins/Temporal/PlainTime/from/infinity-throws-rangeerror': [FAIL],
- 'built-ins/Temporal/PlainTime/from/options-undefined': [FAIL],
- 'built-ins/Temporal/PlainTime/from/order-of-operations': [FAIL],
- 'built-ins/Temporal/PlainTime/from/overflow-invalid-string': [FAIL],
- 'built-ins/Temporal/PlainTime/from/overflow-undefined': [FAIL],
- 'built-ins/Temporal/PlainTime/from/overflow-wrong-type': [FAIL],
- 'built-ins/Temporal/PlainTime/from/plaintime-propertybag-no-time-units': [FAIL],
- 'built-ins/Temporal/PlainTime/from/subclassing-ignored': [FAIL],
'built-ins/Temporal/PlainTime/prototype/add/argument-not-object': [FAIL],
'built-ins/Temporal/PlainTime/prototype/add/argument-string': [FAIL],
'built-ins/Temporal/PlainTime/prototype/add/argument-string-negative-fractional-units': [FAIL],
@@ -1599,6 +1519,7 @@
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/basic': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/infinity-throws-rangeerror': [FAIL],
@@ -1627,6 +1548,7 @@
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/basic': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/branding': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/calendar-temporal-object': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/infinity-throws-rangeerror': [FAIL],
@@ -1680,12 +1602,14 @@
'built-ins/Temporal/PlainYearMonth/compare/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/use-internal-slots': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/argument-string': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/order-of-operations': [FAIL],
@@ -1699,6 +1623,7 @@
'built-ins/Temporal/PlainYearMonth/prototype/add/branding': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-arguments-extra-options': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-datefromfields-called': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-daysinmonth-wrong-value': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/add/infinity-throws-rangeerror': [FAIL],
@@ -1715,14 +1640,17 @@
'built-ins/Temporal/PlainYearMonth/prototype/equals/branding': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/arguments-missing-throws': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/branding': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-temporal-object': [FAIL],
+'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-disallowed-units': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-invalid-string': [FAIL],
@@ -1752,6 +1680,7 @@
'built-ins/Temporal/PlainYearMonth/prototype/subtract/branding': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-arguments-extra-options': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-arguments': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-datefromfields-called': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-daysinmonth-wrong-value': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/subtract/infinity-throws-rangeerror': [FAIL],
@@ -1764,6 +1693,7 @@
'built-ins/Temporal/PlainYearMonth/prototype/subtract/overflow-wrong-type': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/subtract/subclassing-ignored': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toJSON/branding': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toJSON/year-format': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toLocaleString/branding': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/argument-not-object': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/branding': [FAIL],
@@ -1777,12 +1707,15 @@
'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-wrong-type': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toString/options-undefined': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toString/year-format': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/argument-string-with-utc-designator': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/branding': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-dateadd-called-with-plaindate-instance': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/infinity-throws-rangeerror': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-invalid-string': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/largestunit-plurals-accepted': [FAIL],
@@ -2064,6 +1997,7 @@
'built-ins/Temporal/ZonedDateTime/prototype/since/argument-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/balance-negative-time-units': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-dateadd-called-with-options-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-dateuntil-called-with-copy-of-options': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/calendar-fields-iterable': [FAIL],
@@ -2127,6 +2061,7 @@
'built-ins/Temporal/ZonedDateTime/prototype/toJSON/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toJSON/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toJSON/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toJSON/year-format': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toLocaleString/branding': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toLocaleString/return-string': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/branding': [FAIL],
@@ -2142,14 +2077,11 @@
'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainDate/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/branding': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-arguments': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-monthdayfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-result': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-yearmonthfromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/balance-negative-time-units': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/branding': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/negative-epochnanoseconds': [FAIL],
@@ -2157,14 +2089,9 @@
'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainTime/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/branding': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-arguments': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-result': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/balance-negative-time-units': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/branding': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/calendarname-invalid-string': [FAIL],
@@ -2197,9 +2124,11 @@
'built-ins/Temporal/ZonedDateTime/prototype/toString/timezonename-invalid-string': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/timezonename-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/timezonename-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/year-format': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/argument-propertybag-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/balance-negative-time-units': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-dateadd-called-with-options-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-dateuntil-called-with-copy-of-options': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-dateuntil-called-with-singular-largestunit': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/calendar-fields-iterable': [FAIL],
@@ -2265,6 +2194,7 @@
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/calendar-datefromfields-called-with-options-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/calendar-fields-iterable': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/calendar-temporal-object': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/infinity-throws-rangeerror': [FAIL],
@@ -2324,10 +2254,12 @@
'intl402/Temporal/Calendar/prototype/era/argument-string-with-utc-designator': [FAIL],
'intl402/Temporal/Calendar/prototype/era/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
'intl402/Temporal/Calendar/prototype/era/branding': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/calendar-datefromfields-called-with-options-undefined': [FAIL],
'intl402/Temporal/Calendar/prototype/era/infinity-throws-rangeerror': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/argument-string-with-utc-designator': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/branding': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/calendar-datefromfields-called-with-options-undefined': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/infinity-throws-rangeerror': [FAIL],
'intl402/Temporal/Calendar/prototype/inLeapYear/infinity-throws-rangeerror': [FAIL],
'intl402/Temporal/Calendar/prototype/monthCode/infinity-throws-rangeerror': [FAIL],
@@ -2481,7 +2413,6 @@
'built-ins/Temporal/PlainDate/from/argument-number': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-object-invalid': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-object-valid': [FAIL],
- 'built-ins/Temporal/PlainDate/from/argument-string-overflow': [FAIL],
'built-ins/Temporal/PlainDate/from/year-zero': [FAIL],
'built-ins/Temporal/PlainDate/prototype/add/argument-invalid-duration': [FAIL],
'built-ins/Temporal/PlainDate/prototype/add/argument-missing-properties': [FAIL],
@@ -2554,21 +2485,10 @@
'built-ins/Temporal/PlainTime/compare/argument-string-with-time-designator': [FAIL],
'built-ins/Temporal/PlainTime/compare/basic': [FAIL],
'built-ins/Temporal/PlainTime/compare/year-zero': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-number': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-object': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-object-leap-second': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-plaindatetime': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-plaintime': [FAIL],
'built-ins/Temporal/PlainTime/from/argument-string': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-string-invalid': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-string-leap-second': [FAIL],
'built-ins/Temporal/PlainTime/from/argument-string-no-implicit-midnight': [FAIL],
'built-ins/Temporal/PlainTime/from/argument-string-time-designator-required-for-disambiguation': [FAIL],
- 'built-ins/Temporal/PlainTime/from/argument-string-trailing-junk': [FAIL],
'built-ins/Temporal/PlainTime/from/argument-string-with-time-designator': [FAIL],
- 'built-ins/Temporal/PlainTime/from/options-invalid': [FAIL],
- 'built-ins/Temporal/PlainTime/from/overflow-constrain': [FAIL],
- 'built-ins/Temporal/PlainTime/from/overflow-reject': [FAIL],
'built-ins/Temporal/PlainTime/from/year-zero': [FAIL],
'built-ins/Temporal/PlainTime/prototype/add/argument-duration': [FAIL],
'built-ins/Temporal/PlainTime/prototype/add/argument-higher-units': [FAIL],
@@ -2654,7 +2574,6 @@
'built-ins/Temporal/PlainTime/prototype/until/roundingmode-halfExpand': [FAIL],
'built-ins/Temporal/PlainTime/prototype/until/roundingmode-trunc': [FAIL],
'built-ins/Temporal/PlainTime/prototype/until/year-zero': [FAIL],
- 'built-ins/Temporal/PlainTime/prototype/valueOf/basic': [FAIL],
'built-ins/Temporal/PlainTime/prototype/with/copy-properties-not-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/basic': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/argument-cast': [FAIL],
@@ -2682,15 +2601,12 @@
'built-ins/Temporal/PlainYearMonth/prototype/add/limits': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/add/month-length': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/add/options-invalid': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/daysInMonth/basic': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/daysInYear/basic': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-cast': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/basic': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/compare-calendar': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/compare-reference-day': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/use-internal-slots': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/year-zero': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/monthsInYear/basic': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/argument-casting': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-auto': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/largestunit-months': [FAIL],
@@ -2766,15 +2682,116 @@
'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-with-time-designator': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/fields-missing-properties': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/monthcode-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/options-not-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-constrain': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearMonthFromFields/overflow-reject': [FAIL],
+ 'built-ins/Temporal/Duration/compare/twenty-five-hour-day': [FAIL],
+ 'built-ins/Temporal/Duration/from/argument-object-invalid': [FAIL],
+ 'built-ins/Temporal/Duration/from/argument-string': [FAIL],
+ 'built-ins/Temporal/Duration/from/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toJSON/basic': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/balance': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/all-negative': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/all-positive': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/argument-object-wrong-shape': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/partial-positive': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/with/sign-conflict-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/basic': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/add/disallowed-duration-units': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/basic': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/subtract/disallowed-duration-units': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/observable-get-overflow-argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-object-insufficient-data': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/basic': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/cast': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/constructor-full': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/datetime-math': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/hour-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/microsecond-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/millisecond-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/minute-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/nanosecond-undefined': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/ambiguous-date': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/argument-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/argument-object-insufficient-data': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/argument-plain-object-mixed-signs': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/hour-overflow': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/negative-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/options-empty': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/add/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-object-insufficient-data': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/basic': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/cast': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/ambiguous-date': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/argument-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/argument-object-insufficient-data': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/argument-plain-object-mixed-signs': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/hour-overflow': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/negative-duration': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/options-empty': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/subtract/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/basic': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/casts-argument': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/different-calendars-throws': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/inverse': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/no-unnecessary-units': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/returns-days': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/subseconds': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/units-changed': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/weeks-months-mutually-exclusive': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/weekOfYear/basic': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/argument-object-insufficient-data': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/basic': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/calendar-throws': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/month-and-monthcode-must-agree': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/multiple-unrecognized-properties-ignored': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/options-empty': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/options-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-object-insuffcient-data': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-plaindate': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-string': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-string-iso-calendar': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/non-compatible-calendars-throw': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-object-insufficient-data': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-without-time-designator': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-time': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/no-argument-default-to-midnight': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/string-throws': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/timezone-throws': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/second-undefined': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/calendar-always': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/calendar-always': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-string-invalid': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/argument-string-invalid': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/argument-string-invalid': [FAIL],
+
+ 'harness/temporalHelpers-one-shift-time-zone': [SKIP],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=10776
'intl402/NumberFormat/constructor-roundingIncrement': [FAIL],
+ 'intl402/NumberFormat/constructor-roundingIncrement-invalid': [FAIL],
# NumberFormat.prototype.formatRange
'intl402/NumberFormat/prototype/formatRange/en-US': [FAIL],
'intl402/NumberFormat/prototype/formatRange/pt-PT': [FAIL],
-
- # https://github.com/tc39/test262/pull/3425
- 'intl402/NumberFormat/prototype/formatRange/x-greater-than-y-throws': [FAIL],
- 'intl402/NumberFormat/prototype/formatRangeToParts/x-greater-than-y-throws': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-max-min-fraction-significant-digits': [FAIL],
# String handling
'intl402/NumberFormat/prototype/format/format-rounding-increment-1000': [FAIL],
@@ -2839,10 +2856,6 @@
'built-ins/ShadowRealm/prototype/importValue/throws-typeerror-import-syntax-error': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/throws-typeerror-import-throws': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/validates-realm-object': [FAIL],
- 'built-ins/ShadowRealm/WrappedFunction/length': [FAIL],
- 'built-ins/ShadowRealm/WrappedFunction/length-throws-typeerror': [FAIL],
- 'built-ins/ShadowRealm/WrappedFunction/name': [FAIL],
- 'built-ins/ShadowRealm/WrappedFunction/name-throws-typeerror': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=12085
'language/statements/class/subclass/derived-class-return-override-catch-finally': [FAIL],
@@ -2861,15 +2874,8 @@
'built-ins/Date/prototype/setUTCMonth/arg-coercion-order': [FAIL],
'built-ins/Date/prototype/setUTCSeconds/arg-coercion-order': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=12680
- 'language/expressions/logical-assignment/left-hand-side-private-reference-method-short-circuit-nullish': [FAIL],
- 'language/expressions/logical-assignment/left-hand-side-private-reference-method-short-circuit-or': [FAIL],
- 'language/expressions/logical-assignment/left-hand-side-private-reference-readonly-accessor-property-short-circuit-and': [FAIL],
- 'language/expressions/logical-assignment/left-hand-side-private-reference-readonly-accessor-property-short-circuit-nullish': [FAIL],
- 'language/expressions/logical-assignment/left-hand-side-private-reference-readonly-accessor-property-short-circuit-or': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=12044
- 'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
+ 'built-ins/Array/prototype/Symbol.unscopables/array-grouping': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=12681
'built-ins/Array/prototype/push/set-length-zero-array-length-is-non-writable': [FAIL],
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index af4788f6a3..3ab9dac446 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -25,9 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import imp
import itertools
import os
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index d20d3bf0df..3682617a8d 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -313,7 +313,6 @@ v8_source_set("unittests_sources") {
"diagnostics/gdb-jit-unittest.cc",
"execution/microtask-queue-unittest.cc",
"heap/allocation-observer-unittest.cc",
- "heap/barrier-unittest.cc",
"heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc",
"heap/code-object-registry-unittest.cc",
@@ -330,6 +329,7 @@ v8_source_set("unittests_sources") {
"heap/heap-utils.cc",
"heap/heap-utils.h",
"heap/index-generator-unittest.cc",
+ "heap/lab-unittest.cc",
"heap/list-unittest.cc",
"heap/local-factory-unittest.cc",
"heap/local-heap-unittest.cc",
@@ -361,6 +361,7 @@ v8_source_set("unittests_sources") {
"libplatform/default-job-unittest.cc",
"libplatform/default-platform-unittest.cc",
"libplatform/default-worker-threads-task-runner-unittest.cc",
+ "libplatform/single-threaded-default-platform-unittest.cc",
"libplatform/task-queue-unittest.cc",
"libplatform/worker-thread-unittest.cc",
"logging/counters-unittest.cc",
@@ -368,6 +369,7 @@ v8_source_set("unittests_sources") {
"numbers/conversions-unittest.cc",
"objects/object-unittest.cc",
"objects/osr-optimized-code-cache-unittest.cc",
+ "objects/swiss-hash-table-helpers-unittest.cc",
"objects/value-serializer-unittest.cc",
"objects/weakarraylist-unittest.cc",
"parser/ast-value-unittest.cc",
diff --git a/deps/v8/test/unittests/api/deserialize-unittest.cc b/deps/v8/test/unittests/api/deserialize-unittest.cc
index 5e6edcff6b..6e3731ecad 100644
--- a/deps/v8/test/unittests/api/deserialize-unittest.cc
+++ b/deps/v8/test/unittests/api/deserialize-unittest.cc
@@ -15,7 +15,7 @@
namespace v8 {
-class DeserializeTest : public testing::Test {
+class DeserializeTest : public TestWithPlatform {
public:
class IsolateAndContextScope {
public:
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index cc4ee6188f..a8ad3c0027 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -4,6 +4,8 @@
#include "src/base/platform/platform.h"
+#include <cstring>
+
#include "testing/gtest/include/gtest/gtest.h"
#if V8_OS_WIN
@@ -24,6 +26,29 @@ TEST(OS, GetCurrentProcessId) {
#endif
}
+TEST(OS, RemapPages) {
+ if constexpr (OS::IsRemapPageSupported()) {
+ size_t size = base::OS::AllocatePageSize();
+ // Data to be remapped, filled with data.
+ void* data = OS::Allocate(nullptr, size, base::OS::AllocatePageSize(),
+ OS::MemoryPermission::kReadWrite);
+ ASSERT_TRUE(data);
+ memset(data, 0xab, size);
+
+ // Target mapping.
+ void* remapped_data =
+ OS::Allocate(nullptr, size, base::OS::AllocatePageSize(),
+ OS::MemoryPermission::kReadWrite);
+ ASSERT_TRUE(remapped_data);
+
+ EXPECT_TRUE(OS::RemapPages(data, size, remapped_data,
+ OS::MemoryPermission::kReadExecute));
+ EXPECT_EQ(0, memcmp(remapped_data, data, size));
+
+ OS::Free(data, size);
+ OS::Free(remapped_data, size);
+ }
+}
namespace {
diff --git a/deps/v8/test/unittests/base/template-utils-unittest.cc b/deps/v8/test/unittests/base/template-utils-unittest.cc
index 4b1f3b834b..8b090f034c 100644
--- a/deps/v8/test/unittests/base/template-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/template-utils-unittest.cc
@@ -107,63 +107,6 @@ static_assert(has_output_operator<TestClass3>::value,
static_assert(has_output_operator<const TestClass3>::value,
"const TestClass3 can be output");
-//////////////////////////////
-// Test fold.
-//////////////////////////////
-
-struct FoldAllSameType {
- constexpr uint32_t operator()(uint32_t a, uint32_t b) const { return a | b; }
-};
-static_assert(base::fold(FoldAllSameType{}, 3, 6) == 7, "check fold");
-// Test that it works if implicit conversion is needed for one of the
-// parameters.
-static_assert(base::fold(FoldAllSameType{}, uint8_t{1}, 256) == 257,
- "check correct type inference");
-// Test a single parameter.
-static_assert(base::fold(FoldAllSameType{}, 25) == 25,
- "check folding a single argument");
-
-TEST(TemplateUtilsTest, FoldDifferentType) {
- auto fn = [](std::string str, char c) {
- str.push_back(c);
- return str;
- };
- CHECK_EQ(base::fold(fn, std::string("foo"), 'b', 'a', 'r'), "foobar");
-}
-
-TEST(TemplateUtilsTest, FoldMoveOnlyType) {
- auto fn = [](std::unique_ptr<std::string> str, char c) {
- str->push_back(c);
- return str;
- };
- std::unique_ptr<std::string> str = std::make_unique<std::string>("foo");
- std::unique_ptr<std::string> folded =
- base::fold(fn, std::move(str), 'b', 'a', 'r');
- CHECK_NULL(str);
- CHECK_NOT_NULL(folded);
- CHECK_EQ(*folded, "foobar");
-}
-
-struct TemplatizedFoldFunctor {
- template <typename T, typename... Tup>
- std::tuple<Tup..., typename std::decay<T>::type> operator()(
- std::tuple<Tup...> tup, T&& val) {
- return std::tuple_cat(std::move(tup),
- std::make_tuple(std::forward<T>(val)));
- }
-};
-TEST(TemplateUtilsTest, FoldToTuple) {
- auto input = std::make_tuple(char{'x'}, int{4}, double{3.2},
- std::unique_ptr<uint8_t>{}, std::string{"foo"});
- auto result =
- base::fold(TemplatizedFoldFunctor{}, std::make_tuple(),
- std::get<0>(input), std::get<1>(input), std::get<2>(input),
- std::unique_ptr<uint8_t>{}, std::get<4>(input));
- static_assert(std::is_same<decltype(result), decltype(input)>::value,
- "the resulting tuple should have the same type as the input");
- DCHECK_EQ(input, result);
-}
-
} // namespace template_utils_unittest
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 02bf80876a..5d57cf5614 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -43,9 +43,8 @@ class LazyCompileDispatcherTestFlags {
static void SetFlagsForTest() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
- FLAG_single_threaded = true;
- FlagList::EnforceFlagImplications();
FLAG_lazy_compile_dispatcher = true;
+ FlagList::EnforceFlagImplications();
}
static void RestoreFlags() {
@@ -68,13 +67,13 @@ class LazyCompileDispatcherTest : public TestWithNativeContext {
LazyCompileDispatcherTest& operator=(const LazyCompileDispatcherTest&) =
delete;
- static void SetUpTestCase() {
+ static void SetUpTestSuite() {
LazyCompileDispatcherTestFlags::SetFlagsForTest();
- TestWithNativeContext::SetUpTestCase();
+ TestWithNativeContext::SetUpTestSuite();
}
- static void TearDownTestCase() {
- TestWithNativeContext::TearDownTestCase();
+ static void TearDownTestSuite() {
+ TestWithNativeContext::TearDownTestSuite();
LazyCompileDispatcherTestFlags::RestoreFlags();
}
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index 0e323f6afa..ec27b2fb58 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -26,11 +26,10 @@ using OptimizingCompileDispatcherTest = TestWithNativeContext;
namespace {
-class BlockingCompilationJob : public OptimizedCompilationJob {
+class BlockingCompilationJob : public TurbofanCompilationJob {
public:
BlockingCompilationJob(Isolate* isolate, Handle<JSFunction> function)
- : OptimizedCompilationJob(&info_, "BlockingCompilationJob",
- State::kReadyToExecute),
+ : TurbofanCompilationJob(&info_, State::kReadyToExecute),
shared_(function->shared(), isolate),
zone_(isolate->allocator(), ZONE_NAME),
info_(&zone_, isolate, shared_, function, CodeKind::TURBOFAN),
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 021c88374b..4cd7b4427f 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -28,17 +28,17 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
BytecodeAnalysisTest(const BytecodeAnalysisTest&) = delete;
BytecodeAnalysisTest& operator=(const BytecodeAnalysisTest&) = delete;
- static void SetUpTestCase() {
+ static void SetUpTestSuite() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
i::FLAG_ignition_elide_noneffectful_bytecodes = false;
i::FLAG_ignition_reo = false;
- TestWithIsolateAndZone::SetUpTestCase();
+ TestWithIsolateAndZone::SetUpTestSuite();
}
- static void TearDownTestCase() {
- TestWithIsolateAndZone::TearDownTestCase();
+ static void TearDownTestSuite() {
+ TestWithIsolateAndZone::TearDownTestSuite();
delete save_flags_;
save_flags_ = nullptr;
}
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index c97bb96b49..ef8d4beb68 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -28,19 +28,21 @@ class CommonOperatorReducerTest : public GraphTest {
protected:
Reduction Reduce(
AdvancedReducer::Editor* editor, Node* node,
+ BranchSemantics branch_semantics,
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
JSHeapBroker broker(isolate(), zone());
MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
flags);
CommonOperatorReducer reducer(editor, graph(), &broker, common(), &machine,
- zone());
+ zone(), branch_semantics);
return reducer.Reduce(node);
}
- Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kNoFlags) {
+ Reduction Reduce(
+ Node* node, BranchSemantics branch_semantics,
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
StrictMock<MockAdvancedReducerEditor> editor;
- return Reduce(&editor, node, flags);
+ return Reduce(&editor, node, branch_semantics, flags);
}
MachineOperatorBuilder* machine() { return &machine_; }
@@ -84,7 +86,7 @@ TEST_F(CommonOperatorReducerTest, BranchWithInt32ZeroConstant) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(if_true, IsDead()));
EXPECT_CALL(editor, Replace(if_false, control));
- Reduction const r = Reduce(&editor, branch);
+ Reduction const r = Reduce(&editor, branch, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
@@ -101,7 +103,7 @@ TEST_F(CommonOperatorReducerTest, BranchWithInt32OneConstant) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(if_true, control));
EXPECT_CALL(editor, Replace(if_false, IsDead()));
- Reduction const r = Reduce(&editor, branch);
+ Reduction const r = Reduce(&editor, branch, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
@@ -118,7 +120,7 @@ TEST_F(CommonOperatorReducerTest, BranchWithFalseConstant) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(if_true, IsDead()));
EXPECT_CALL(editor, Replace(if_false, control));
- Reduction const r = Reduce(&editor, branch);
+ Reduction const r = Reduce(&editor, branch, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
@@ -135,7 +137,7 @@ TEST_F(CommonOperatorReducerTest, BranchWithTrueConstant) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(if_true, control));
EXPECT_CALL(editor, Replace(if_false, IsDead()));
- Reduction const r = Reduce(&editor, branch);
+ Reduction const r = Reduce(&editor, branch, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
@@ -151,7 +153,7 @@ TEST_F(CommonOperatorReducerTest, BranchWithBooleanNot) {
graph()->NewNode(simplified()->BooleanNot(), value), control);
Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
- Reduction const r = Reduce(branch);
+ Reduction const r = Reduce(branch, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(branch, r.replacement());
EXPECT_THAT(branch, IsBranch(value, control));
@@ -172,7 +174,7 @@ TEST_F(CommonOperatorReducerTest, BranchWithSelect) {
control);
Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
- Reduction const r = Reduce(branch);
+ Reduction const r = Reduce(branch, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(branch, r.replacement());
EXPECT_THAT(branch, IsBranch(value, control));
@@ -193,7 +195,8 @@ TEST_F(CommonOperatorReducerTest, MergeOfUnusedDiamond0) {
Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
Reduction const r =
- Reduce(graph()->NewNode(common()->Merge(2), if_true, if_false));
+ Reduce(graph()->NewNode(common()->Merge(2), if_true, if_false),
+ BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(control, r.replacement());
EXPECT_THAT(branch, IsDead());
@@ -207,7 +210,8 @@ TEST_F(CommonOperatorReducerTest, MergeOfUnusedDiamond1) {
Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
Reduction const r =
- Reduce(graph()->NewNode(common()->Merge(2), if_false, if_true));
+ Reduce(graph()->NewNode(common()->Merge(2), if_false, if_true),
+ BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(control, r.replacement());
EXPECT_THAT(branch, IsDead());
@@ -236,8 +240,10 @@ TEST_F(CommonOperatorReducerTest, EffectPhiWithMerge) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r =
- Reduce(&editor, graph()->NewNode(common()->EffectPhi(value_input_count),
- input_count, inputs));
+ Reduce(&editor,
+ graph()->NewNode(common()->EffectPhi(value_input_count),
+ input_count, inputs),
+ BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -253,7 +259,7 @@ TEST_F(CommonOperatorReducerTest, EffectPhiWithLoop) {
ephi->ReplaceInput(1, ephi);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(loop));
- Reduction const r = Reduce(&editor, ephi);
+ Reduction const r = Reduce(&editor, ephi, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(e0, r.replacement());
}
@@ -281,9 +287,11 @@ TEST_F(CommonOperatorReducerTest, PhiWithMerge) {
inputs[value_input_count] = merge;
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(
- &editor, graph()->NewNode(common()->Phi(rep, value_input_count),
- input_count, inputs));
+ Reduction r =
+ Reduce(&editor,
+ graph()->NewNode(common()->Phi(rep, value_input_count),
+ input_count, inputs),
+ BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -301,7 +309,7 @@ TEST_F(CommonOperatorReducerTest, PhiWithLoop) {
phi->ReplaceInput(1, phi);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(loop));
- Reduction const r = Reduce(&editor, phi);
+ Reduction const r = Reduce(&editor, phi, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
@@ -321,7 +329,7 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat32Abs) {
common()->Phi(MachineRepresentation::kFloat32, 2), vtrue, vfalse, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi);
+ Reduction r = Reduce(&editor, phi, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
}
@@ -341,7 +349,7 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64Abs) {
common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi);
+ Reduction r = Reduce(&editor, phi, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
@@ -370,7 +378,7 @@ TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) {
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(merge, IsDead()));
- Reduction const r = Reduce(&editor, ret);
+ Reduction const r = Reduce(&editor, ret, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
EXPECT_THAT(graph()->end(), IsEnd(ret, IsReturn(vtrue, etrue, if_true),
@@ -400,7 +408,7 @@ TEST_F(CommonOperatorReducerTest, MultiReturnWithPhiAndEffectPhiAndMerge) {
graph()->NewNode(common()->Return(2), zero, phi1, phi2, ephi, merge);
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
StrictMock<MockAdvancedReducerEditor> editor;
- Reduction const r = Reduce(&editor, ret);
+ Reduction const r = Reduce(&editor, ret, BranchSemantics::kJS);
// For now a return with multiple return values should not be reduced.
ASSERT_TRUE(!r.Changed());
}
@@ -414,7 +422,8 @@ TEST_F(CommonOperatorReducerTest, SelectWithSameThenAndElse) {
TRACED_FOREACH(BranchHint, hint, kBranchHints) {
TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
Reduction r = Reduce(
- graph()->NewNode(common()->Select(rep, hint), input, input, input));
+ graph()->NewNode(common()->Select(rep, hint), input, input, input),
+ BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -428,7 +437,7 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt32ZeroConstant) {
Node* select =
graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
Int32Constant(0), p0, p1);
- Reduction r = Reduce(select);
+ Reduction r = Reduce(select, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p1, r.replacement());
}
@@ -440,7 +449,7 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt32OneConstant) {
Node* select =
graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
Int32Constant(1), p0, p1);
- Reduction r = Reduce(select);
+ Reduction r = Reduce(select, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
@@ -452,7 +461,7 @@ TEST_F(CommonOperatorReducerTest, SelectWithFalseConstant) {
Node* select =
graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
FalseConstant(), p0, p1);
- Reduction r = Reduce(select);
+ Reduction r = Reduce(select, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p1, r.replacement());
}
@@ -463,7 +472,7 @@ TEST_F(CommonOperatorReducerTest, SelectWithTrueConstant) {
Node* p1 = Parameter(1);
Node* select = graph()->NewNode(
common()->Select(MachineRepresentation::kTagged), TrueConstant(), p0, p1);
- Reduction r = Reduce(select);
+ Reduction r = Reduce(select, BranchSemantics::kJS);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
@@ -476,7 +485,7 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat32Abs) {
Node* select =
graph()->NewNode(common()->Select(MachineRepresentation::kFloat32), check,
p0, graph()->NewNode(machine()->Float32Sub(), c0, p0));
- Reduction r = Reduce(select);
+ Reduction r = Reduce(select, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
}
@@ -489,7 +498,7 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
Node* select =
graph()->NewNode(common()->Select(MachineRepresentation::kFloat64), check,
p0, graph()->NewNode(machine()->Float64Sub(), c0, p0));
- Reduction r = Reduce(select);
+ Reduction r = Reduce(select, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
@@ -506,7 +515,7 @@ TEST_F(CommonOperatorReducerTest, SwitchInputMatchesCaseWithDefault) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(if_1, control));
- Reduction r = Reduce(&editor, sw);
+ Reduction r = Reduce(&editor, sw, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
@@ -520,7 +529,7 @@ TEST_F(CommonOperatorReducerTest, SwitchInputMatchesDefaultWithCase) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(if_default, control));
- Reduction r = Reduce(&editor, sw);
+ Reduction r = Reduce(&editor, sw, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
@@ -535,7 +544,7 @@ TEST_F(CommonOperatorReducerTest, SwitchInputMatchesCaseExtraCaseWithDefault) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(if_0, control));
- Reduction r = Reduce(&editor, sw);
+ Reduction r = Reduce(&editor, sw, BranchSemantics::kMachine);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index 98de4c7f7c..ba777a8cac 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -413,14 +413,14 @@ class GraphReducerTest : public TestWithZone {
public:
GraphReducerTest() : TestWithZone(kCompressGraphZone), graph_(zone()) {}
- static void SetUpTestCase() {
- TestWithZone::SetUpTestCase();
+ static void SetUpTestSuite() {
+ TestWithZone::SetUpTestSuite();
DefaultValue<Reduction>::Set(Reducer::NoChange());
}
- static void TearDownTestCase() {
+ static void TearDownTestSuite() {
DefaultValue<Reduction>::Clear();
- TestWithZone::TearDownTestCase();
+ TestWithZone::TearDownTestSuite();
}
protected:
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index b6376ff280..d126c4d7d0 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -901,6 +901,31 @@ TEST_F(InstructionSelectorTest, SIMDSplatZero) {
}
}
+TEST_F(InstructionSelectorTest, Int32AddMinNegativeDisplacement) {
+ // This test case is simplified from a Wasm fuzz test in
+ // https://crbug.com/1091892. The key here is that we match on a
+ // sequence like: Int32Add(Int32Sub(-524288, -2147483648), -26048), which
+ // matches on an EmitLea, with -2147483648 as the displacement. Since we
+ // have a Int32Sub node, it sets kNegativeDisplacement, and later we try to
+ // negate -2147483648, which overflows.
+ StreamBuilder m(this, MachineType::Int32());
+ Node* const c0 = m.Int32Constant(-524288);
+ Node* const c1 = m.Int32Constant(std::numeric_limits<int32_t>::min());
+ Node* const c2 = m.Int32Constant(-26048);
+ Node* const a0 = m.Int32Sub(c0, c1);
+ Node* const a1 = m.Int32Add(a0, c2);
+ m.Return(a1);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+
+ EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(2147457600,
+ ImmediateOperand::cast(s[0]->InputAt(1))->inline_int32_value());
+}
+
struct SwizzleConstants {
uint8_t shuffle[kSimd128Size];
bool omit_add;
diff --git a/deps/v8/test/unittests/compiler/persistent-unittest.cc b/deps/v8/test/unittests/compiler/persistent-unittest.cc
index 4c5a1974c7..89c65c2579 100644
--- a/deps/v8/test/unittests/compiler/persistent-unittest.cc
+++ b/deps/v8/test/unittests/compiler/persistent-unittest.cc
@@ -17,7 +17,9 @@ static int small_big_distr(base::RandomNumberGenerator* rand) {
return rand->NextInt() / std::max(1, rand->NextInt() / 100);
}
-TEST(PersistentMap, RefTest) {
+class PersistentMapTest : public TestWithPlatform {};
+
+TEST_F(PersistentMapTest, RefTest) {
base::RandomNumberGenerator rand(92834738);
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -76,7 +78,7 @@ TEST(PersistentMap, RefTest) {
}
}
-TEST(PersistentMap, Zip) {
+TEST_F(PersistentMapTest, Zip) {
base::RandomNumberGenerator rand(92834738);
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 6eddb961ca..5c85cdd1bf 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -50,7 +50,8 @@ class SimplifiedLoweringTest : public GraphTest {
Linkage* linkage = zone()->New<Linkage>(Linkage::GetJSCallDescriptor(
zone(), false, num_parameters_ + 1, CallDescriptor::kCanUseRoots));
SimplifiedLowering lowering(jsgraph(), broker(), zone(), source_positions(),
- node_origins(), tick_counter(), linkage);
+ node_origins(), tick_counter(), linkage,
+ nullptr);
lowering.LowerAllNodes();
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index b73207ad98..6e839a3304 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -35,7 +35,8 @@ class SimplifiedOperatorReducerTest : public GraphTest {
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
GraphReducer graph_reducer(zone(), graph(), tick_counter(), broker());
- SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph, broker());
+ SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph, broker(),
+ BranchSemantics::kJS);
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 1ba4a29ceb..439c7742ba 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -1847,6 +1847,151 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xFF) {
}
}
+TEST_F(InstructionSelectorTest, Word64AndWith0xFFFFFFFF) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(p0, m.Int32Constant(0xFFFFFFFF));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(m.Int32Constant(0xFFFFFFFF), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64AndWith0xFFFF) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(p0, m.Int32Constant(0xFFFF));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxwq, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(m.Int32Constant(0xFFFF), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxwq, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64AndWith0xFF) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(p0, m.Int32Constant(0xFF));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxbq, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(m.Int32Constant(0xFF), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxbq, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64AndWithInt64FitsUint32) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(p0, m.Int64Constant(15));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64And32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(m.Int64Constant(15), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64And32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64AndWithInt64DontFitsUint32) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(p0, m.Int64Constant(0x100000000));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64And, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64And(m.Int64Constant(0x100000000), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64And, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
TEST_F(InstructionSelectorTest, Word32AndWith0xFFFF) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
diff --git a/deps/v8/test/unittests/compiler/zone-stats-unittest.cc b/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
index c75ba1eff7..7187d35000 100644
--- a/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
+++ b/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
@@ -10,7 +10,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-class ZoneStatsTest : public ::testing::Test {
+class ZoneStatsTest : public TestWithPlatform {
public:
ZoneStatsTest() : zone_stats_(&allocator_) {}
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 736de80762..55e36c9f44 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -39,16 +39,16 @@ class WithFinalizationRegistryMixin : public TMixin {
WithFinalizationRegistryMixin& operator=(
const WithFinalizationRegistryMixin&) = delete;
- static void SetUpTestCase() {
+ static void SetUpTestSuite() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
FLAG_expose_gc = true;
FLAG_allow_natives_syntax = true;
- TMixin::SetUpTestCase();
+ TMixin::SetUpTestSuite();
}
- static void TearDownTestCase() {
- TMixin::TearDownTestCase();
+ static void TearDownTestSuite() {
+ TMixin::TearDownTestSuite();
CHECK_NOT_NULL(save_flags_);
delete save_flags_;
save_flags_ = nullptr;
@@ -67,7 +67,8 @@ using TestWithNativeContextAndFinalizationRegistry = //
WithFinalizationRegistryMixin< //
WithIsolateScopeMixin< //
WithIsolateMixin< //
- ::testing::Test>>>>>;
+ WithDefaultPlatformMixin< //
+ ::testing::Test>>>>>>;
namespace {
diff --git a/deps/v8/test/unittests/heap/barrier-unittest.cc b/deps/v8/test/unittests/heap/barrier-unittest.cc
deleted file mode 100644
index 99cf5d8978..0000000000
--- a/deps/v8/test/unittests/heap/barrier-unittest.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/barrier.h"
-#include "src/base/platform/platform.h"
-#include "src/base/platform/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-namespace heap {
-
-namespace {
-
-// Large timeout that will not trigger in tests.
-constexpr base::TimeDelta test_timeout = base::TimeDelta::FromHours(3);
-
-} // namespace
-
-TEST(OneshotBarrier, InitializeNotDone) {
- OneshotBarrier barrier(test_timeout);
- EXPECT_FALSE(barrier.DoneForTesting());
-}
-
-TEST(OneshotBarrier, DoneAfterWait_Sequential) {
- OneshotBarrier barrier(test_timeout);
- barrier.Start();
- barrier.Wait();
- EXPECT_TRUE(barrier.DoneForTesting());
-}
-
-namespace {
-
-class ThreadWaitingOnBarrier final : public base::Thread {
- public:
- ThreadWaitingOnBarrier()
- : base::Thread(Options("ThreadWaitingOnBarrier")), barrier_(nullptr) {}
-
- void Initialize(OneshotBarrier* barrier) { barrier_ = barrier; }
-
- void Run() final { barrier_->Wait(); }
-
- private:
- OneshotBarrier* barrier_;
-};
-
-} // namespace
-
-TEST(OneshotBarrier, DoneAfterWait_Concurrent) {
- const int kThreadCount = 2;
- OneshotBarrier barrier(test_timeout);
- ThreadWaitingOnBarrier threads[kThreadCount];
- for (int i = 0; i < kThreadCount; i++) {
- threads[i].Initialize(&barrier);
- // All threads need to call Wait() to be done.
- barrier.Start();
- }
- for (int i = 0; i < kThreadCount; i++) {
- CHECK(threads[i].Start());
- }
- for (int i = 0; i < kThreadCount; i++) {
- threads[i].Join();
- }
- EXPECT_TRUE(barrier.DoneForTesting());
-}
-
-TEST(OneshotBarrier, EarlyFinish_Concurrent) {
- const int kThreadCount = 2;
- OneshotBarrier barrier(test_timeout);
- ThreadWaitingOnBarrier threads[kThreadCount];
- // Test that one thread that actually finishes processing work before other
- // threads call Start() will move the barrier in Done state.
- barrier.Start();
- barrier.Wait();
- EXPECT_TRUE(barrier.DoneForTesting());
- for (int i = 0; i < kThreadCount; i++) {
- threads[i].Initialize(&barrier);
- // All threads need to call Wait() to be done.
- barrier.Start();
- }
- for (int i = 0; i < kThreadCount; i++) {
- CHECK(threads[i].Start());
- }
- for (int i = 0; i < kThreadCount; i++) {
- threads[i].Join();
- }
- EXPECT_TRUE(barrier.DoneForTesting());
-}
-
-namespace {
-
-class CountingThread final : public base::Thread {
- public:
- CountingThread(OneshotBarrier* barrier, base::Mutex* mutex, size_t* work)
- : base::Thread(Options("CountingThread")),
- barrier_(barrier),
- mutex_(mutex),
- work_(work),
- processed_work_(0) {}
-
- void Run() final {
- do {
- ProcessWork();
- } while (!barrier_->Wait());
- // Main thread is not processing work, so we need one last step.
- ProcessWork();
- }
-
- size_t processed_work() const { return processed_work_; }
-
- private:
- void ProcessWork() {
- base::MutexGuard guard(mutex_);
- processed_work_ += *work_;
- *work_ = 0;
- }
-
- OneshotBarrier* const barrier_;
- base::Mutex* const mutex_;
- size_t* const work_;
- size_t processed_work_;
-};
-
-} // namespace
-
-TEST(OneshotBarrier, Processing_Concurrent) {
- const size_t kWorkCounter = 173173;
- OneshotBarrier barrier(test_timeout);
- base::Mutex mutex;
- size_t work = 0;
- CountingThread counting_thread(&barrier, &mutex, &work);
- barrier.Start();
- barrier.Start();
- EXPECT_FALSE(barrier.DoneForTesting());
- CHECK(counting_thread.Start());
-
- for (size_t i = 0; i < kWorkCounter; i++) {
- {
- base::MutexGuard guard(&mutex);
- work++;
- }
- barrier.NotifyAll();
- }
- barrier.Wait();
- counting_thread.Join();
- EXPECT_TRUE(barrier.DoneForTesting());
- EXPECT_EQ(kWorkCounter, counting_thread.processed_work());
-}
-
-} // namespace heap
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc b/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
index a7b2be0b11..9a18c49a2c 100644
--- a/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
@@ -184,11 +184,9 @@ TEST_F(CppgcAllocationTest, LargeDoubleWordAlignedAllocation) {
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromUnaligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
auto* padding_object =
- MakeGarbageCollected<CustomPadding<16>>(GetAllocationHandle());
- // First allocation is not aligned.
- ASSERT_EQ(kWord,
- reinterpret_cast<uintptr_t>(padding_object) & kAlignmentMask);
- // The end should also not be properly aligned.
+ MakeGarbageCollected<CustomPadding<kWord>>(GetAllocationHandle());
+ // The address from which the next object can be allocated, i.e. the end of
+ // |padding_object|, should not be properly aligned.
ASSERT_EQ(kWord, (reinterpret_cast<uintptr_t>(padding_object) +
sizeof(*padding_object)) &
kAlignmentMask);
@@ -204,11 +202,9 @@ TEST_F(CppgcAllocationTest, AlignToDoubleWordFromUnaligned) {
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromAligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
auto* padding_object =
- MakeGarbageCollected<CustomPadding<kWord>>(GetAllocationHandle());
- // First allocation is not aligned.
- ASSERT_EQ(kWord,
- reinterpret_cast<uintptr_t>(padding_object) & kAlignmentMask);
- // The end should be properly aligned.
+ MakeGarbageCollected<CustomPadding<16>>(GetAllocationHandle());
+ // The address from which the next object can be allocated, i.e. the end of
+ // |padding_object|, should be properly aligned.
ASSERT_EQ(0u, (reinterpret_cast<uintptr_t>(padding_object) +
sizeof(*padding_object)) &
kAlignmentMask);
diff --git a/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc b/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
index 0bfad3b2f0..c80baa48ea 100644
--- a/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
@@ -142,8 +142,7 @@ struct PostConstructionCallbackTrait<
template <typename T>
struct PostConstructionCallbackTrait<
- T,
- internal::void_t<typename T::MarkerForMixinWithPostConstructionCallback>> {
+ T, std::void_t<typename T::MarkerForMixinWithPostConstructionCallback>> {
// The parameter could just be T*.
static void Call(
internal::GCedWithMixinWithPostConstructionCallback* object) {
diff --git a/deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc b/deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc
index a21e985498..32e0802b2e 100644
--- a/deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc
@@ -13,9 +13,9 @@ namespace internal {
namespace {
class MetricRecorderImpl final : public MetricRecorder {
public:
- void AddMainThreadEvent(const FullCycle& event) final {
- FullCycle_event = event;
- FullCycle_callcount++;
+ void AddMainThreadEvent(const GCCycle& event) final {
+ GCCycle_event = event;
+ GCCycle_callcount++;
}
void AddMainThreadEvent(const MainThreadIncrementalMark& event) final {
MainThreadIncrementalMark_event = event;
@@ -26,8 +26,8 @@ class MetricRecorderImpl final : public MetricRecorder {
MainThreadIncrementalSweep_callcount++;
}
- static size_t FullCycle_callcount;
- static FullCycle FullCycle_event;
+ static size_t GCCycle_callcount;
+ static GCCycle GCCycle_event;
static size_t MainThreadIncrementalMark_callcount;
static MainThreadIncrementalMark MainThreadIncrementalMark_event;
static size_t MainThreadIncrementalSweep_callcount;
@@ -35,8 +35,8 @@ class MetricRecorderImpl final : public MetricRecorder {
};
// static
-size_t MetricRecorderImpl::FullCycle_callcount = 0u;
-MetricRecorderImpl::FullCycle MetricRecorderImpl::FullCycle_event;
+size_t MetricRecorderImpl::GCCycle_callcount = 0u;
+MetricRecorderImpl::GCCycle MetricRecorderImpl::GCCycle_event;
size_t MetricRecorderImpl::MainThreadIncrementalMark_callcount = 0u;
MetricRecorderImpl::MainThreadIncrementalMark
MetricRecorderImpl::MainThreadIncrementalMark_event;
@@ -65,7 +65,7 @@ class MetricRecorderTest : public testing::TestWithHeap {
} // namespace
TEST_F(MetricRecorderTest, IncrementalScopesReportedImmediately) {
- MetricRecorderImpl::FullCycle_callcount = 0u;
+ MetricRecorderImpl::GCCycle_callcount = 0u;
MetricRecorderImpl::MainThreadIncrementalMark_callcount = 0u;
MetricRecorderImpl::MainThreadIncrementalSweep_callcount = 0u;
StartGC();
@@ -95,12 +95,12 @@ TEST_F(MetricRecorderTest, IncrementalScopesReportedImmediately) {
EXPECT_LT(0u,
MetricRecorderImpl::MainThreadIncrementalSweep_event.duration_us);
}
- EXPECT_EQ(0u, MetricRecorderImpl::FullCycle_callcount);
+ EXPECT_EQ(0u, MetricRecorderImpl::GCCycle_callcount);
EndGC(0);
}
TEST_F(MetricRecorderTest, NonIncrementlaScopesNotReportedImmediately) {
- MetricRecorderImpl::FullCycle_callcount = 0u;
+ MetricRecorderImpl::GCCycle_callcount = 0u;
MetricRecorderImpl::MainThreadIncrementalMark_callcount = 0u;
MetricRecorderImpl::MainThreadIncrementalSweep_callcount = 0u;
StartGC();
@@ -132,19 +132,19 @@ TEST_F(MetricRecorderTest, NonIncrementlaScopesNotReportedImmediately) {
}
EXPECT_EQ(0u, MetricRecorderImpl::MainThreadIncrementalMark_callcount);
EXPECT_EQ(0u, MetricRecorderImpl::MainThreadIncrementalSweep_callcount);
- EXPECT_EQ(0u, MetricRecorderImpl::FullCycle_callcount);
+ EXPECT_EQ(0u, MetricRecorderImpl::GCCycle_callcount);
EndGC(0);
}
TEST_F(MetricRecorderTest, CycleEndMetricsReportedOnGcEnd) {
- MetricRecorderImpl::FullCycle_callcount = 0u;
+ MetricRecorderImpl::GCCycle_callcount = 0u;
MetricRecorderImpl::MainThreadIncrementalMark_callcount = 0u;
MetricRecorderImpl::MainThreadIncrementalSweep_callcount = 0u;
StartGC();
EndGC(0);
EXPECT_EQ(0u, MetricRecorderImpl::MainThreadIncrementalMark_callcount);
EXPECT_EQ(0u, MetricRecorderImpl::MainThreadIncrementalSweep_callcount);
- EXPECT_EQ(1u, MetricRecorderImpl::FullCycle_callcount);
+ EXPECT_EQ(1u, MetricRecorderImpl::GCCycle_callcount);
}
TEST_F(MetricRecorderTest, CycleEndHistogramReportsCorrectValues) {
@@ -210,75 +210,69 @@ TEST_F(MetricRecorderTest, CycleEndHistogramReportsCorrectValues) {
EndGC(300);
// Check durations.
static constexpr int64_t kDurationComparisonTolerance = 5000;
- EXPECT_LT(std::abs(MetricRecorderImpl::FullCycle_event.main_thread_incremental
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.main_thread_incremental
.mark_duration_us -
10000),
kDurationComparisonTolerance);
- EXPECT_LT(std::abs(MetricRecorderImpl::FullCycle_event.main_thread_incremental
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.main_thread_incremental
.sweep_duration_us -
20000),
kDurationComparisonTolerance);
- EXPECT_LT(std::abs(MetricRecorderImpl::FullCycle_event.main_thread_atomic
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.main_thread_atomic
.mark_duration_us -
30000),
kDurationComparisonTolerance);
- EXPECT_LT(std::abs(MetricRecorderImpl::FullCycle_event.main_thread_atomic
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.main_thread_atomic
.weak_duration_us -
50000),
kDurationComparisonTolerance);
- EXPECT_LT(std::abs(MetricRecorderImpl::FullCycle_event.main_thread_atomic
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.main_thread_atomic
.compact_duration_us -
60000),
kDurationComparisonTolerance);
- EXPECT_LT(std::abs(MetricRecorderImpl::FullCycle_event.main_thread_atomic
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.main_thread_atomic
.sweep_duration_us -
70000),
kDurationComparisonTolerance);
EXPECT_LT(
- std::abs(
- MetricRecorderImpl::FullCycle_event.main_thread.mark_duration_us -
- 40000),
+ std::abs(MetricRecorderImpl::GCCycle_event.main_thread.mark_duration_us -
+ 40000),
kDurationComparisonTolerance);
EXPECT_LT(
- std::abs(
- MetricRecorderImpl::FullCycle_event.main_thread.weak_duration_us -
- 50000),
+ std::abs(MetricRecorderImpl::GCCycle_event.main_thread.weak_duration_us -
+ 50000),
kDurationComparisonTolerance);
EXPECT_LT(
std::abs(
- MetricRecorderImpl::FullCycle_event.main_thread.compact_duration_us -
+ MetricRecorderImpl::GCCycle_event.main_thread.compact_duration_us -
60000),
kDurationComparisonTolerance);
EXPECT_LT(
- std::abs(
- MetricRecorderImpl::FullCycle_event.main_thread.sweep_duration_us -
- 90000),
- kDurationComparisonTolerance);
- EXPECT_LT(
- std::abs(MetricRecorderImpl::FullCycle_event.total.mark_duration_us -
- 120000),
- kDurationComparisonTolerance);
- EXPECT_LT(
- std::abs(MetricRecorderImpl::FullCycle_event.total.weak_duration_us -
- 50000),
+ std::abs(MetricRecorderImpl::GCCycle_event.main_thread.sweep_duration_us -
+ 90000),
kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.total.mark_duration_us -
+ 120000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.total.weak_duration_us -
+ 50000),
+ kDurationComparisonTolerance);
EXPECT_LT(
- std::abs(MetricRecorderImpl::FullCycle_event.total.compact_duration_us -
+ std::abs(MetricRecorderImpl::GCCycle_event.total.compact_duration_us -
60000),
kDurationComparisonTolerance);
- EXPECT_LT(
- std::abs(MetricRecorderImpl::FullCycle_event.total.sweep_duration_us -
- 190000),
- kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event.total.sweep_duration_us -
+ 190000),
+ kDurationComparisonTolerance);
// Check collection rate and efficiency.
EXPECT_DOUBLE_EQ(
- 0.3, MetricRecorderImpl::FullCycle_event.collection_rate_in_percent);
+ 0.3, MetricRecorderImpl::GCCycle_event.collection_rate_in_percent);
static constexpr double kEfficiencyComparisonTolerance = 0.0005;
EXPECT_LT(
- std::abs(MetricRecorderImpl::FullCycle_event.efficiency_in_bytes_per_us -
+ std::abs(MetricRecorderImpl::GCCycle_event.efficiency_in_bytes_per_us -
(700.0 / (120000 + 50000 + 60000 + 190000))),
kEfficiencyComparisonTolerance);
- EXPECT_LT(std::abs(MetricRecorderImpl::FullCycle_event
+ EXPECT_LT(std::abs(MetricRecorderImpl::GCCycle_event
.main_thread_efficiency_in_bytes_per_us -
(700.0 / (40000 + 50000 + 60000 + 90000))),
kEfficiencyComparisonTolerance);
@@ -291,12 +285,12 @@ TEST_F(MetricRecorderTest, ObjectSizeMetricsNoAllocations) {
// Populate current event.
StartGC();
EndGC(800);
- EXPECT_EQ(1000u, MetricRecorderImpl::FullCycle_event.objects.before_bytes);
- EXPECT_EQ(800u, MetricRecorderImpl::FullCycle_event.objects.after_bytes);
- EXPECT_EQ(200u, MetricRecorderImpl::FullCycle_event.objects.freed_bytes);
- EXPECT_EQ(0u, MetricRecorderImpl::FullCycle_event.memory.before_bytes);
- EXPECT_EQ(0u, MetricRecorderImpl::FullCycle_event.memory.after_bytes);
- EXPECT_EQ(0u, MetricRecorderImpl::FullCycle_event.memory.freed_bytes);
+ EXPECT_EQ(1000u, MetricRecorderImpl::GCCycle_event.objects.before_bytes);
+ EXPECT_EQ(800u, MetricRecorderImpl::GCCycle_event.objects.after_bytes);
+ EXPECT_EQ(200u, MetricRecorderImpl::GCCycle_event.objects.freed_bytes);
+ EXPECT_EQ(0u, MetricRecorderImpl::GCCycle_event.memory.before_bytes);
+ EXPECT_EQ(0u, MetricRecorderImpl::GCCycle_event.memory.after_bytes);
+ EXPECT_EQ(0u, MetricRecorderImpl::GCCycle_event.memory.freed_bytes);
}
TEST_F(MetricRecorderTest, ObjectSizeMetricsWithAllocations) {
@@ -313,12 +307,12 @@ TEST_F(MetricRecorderTest, ObjectSizeMetricsWithAllocations) {
stats->NotifyAllocatedMemory(1000);
stats->NotifyFreedMemory(400);
stats->NotifySweepingCompleted();
- EXPECT_EQ(1300u, MetricRecorderImpl::FullCycle_event.objects.before_bytes);
- EXPECT_EQ(800, MetricRecorderImpl::FullCycle_event.objects.after_bytes);
- EXPECT_EQ(500u, MetricRecorderImpl::FullCycle_event.objects.freed_bytes);
- EXPECT_EQ(700u, MetricRecorderImpl::FullCycle_event.memory.before_bytes);
- EXPECT_EQ(300u, MetricRecorderImpl::FullCycle_event.memory.after_bytes);
- EXPECT_EQ(400u, MetricRecorderImpl::FullCycle_event.memory.freed_bytes);
+ EXPECT_EQ(1300u, MetricRecorderImpl::GCCycle_event.objects.before_bytes);
+ EXPECT_EQ(800, MetricRecorderImpl::GCCycle_event.objects.after_bytes);
+ EXPECT_EQ(500u, MetricRecorderImpl::GCCycle_event.objects.freed_bytes);
+ EXPECT_EQ(700u, MetricRecorderImpl::GCCycle_event.memory.before_bytes);
+ EXPECT_EQ(300u, MetricRecorderImpl::GCCycle_event.memory.after_bytes);
+ EXPECT_EQ(400u, MetricRecorderImpl::GCCycle_event.memory.freed_bytes);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc b/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
index 0b742a16c0..3290cd954b 100644
--- a/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
@@ -262,8 +262,9 @@ void InterGenerationalPointerTest(MinorGCTest* test, cppgc::Heap* heap) {
const uintptr_t offset =
internal_heap->caged_heap().OffsetFromAddress(young);
// Age may be young or unknown.
- EXPECT_NE(AgeTable::Age::kOld,
- Heap::From(heap)->caged_heap().local_data().age_table[offset]);
+ EXPECT_NE(
+ AgeTable::Age::kOld,
+ Heap::From(heap)->caged_heap().local_data().age_table.GetAge(offset));
}
}
diff --git a/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc b/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
index dc30e750cd..a920c14f2f 100644
--- a/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
+++ b/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
@@ -3,25 +3,22 @@
// found in the LICENSE file.
#include "include/cppgc/platform.h"
+#include "src/base/page-allocator.h"
#include "test/unittests/heap/cppgc/test-platform.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace {
-class DefaultPlatformEnvironment final : public ::testing::Environment {
+class CppGCEnvironment final : public ::testing::Environment {
public:
- DefaultPlatformEnvironment() = default;
-
void SetUp() override {
- platform_ =
- std::make_unique<cppgc::internal::testing::TestPlatform>(nullptr);
- cppgc::InitializeProcess(platform_->GetPageAllocator());
+ // Initialize the process for cppgc with an arbitrary page allocator. This
+ // has to survive as long as the process, so it's ok to leak the allocator
+ // here.
+ cppgc::InitializeProcess(new v8::base::PageAllocator());
}
void TearDown() override { cppgc::ShutdownProcess(); }
-
- private:
- std::shared_ptr<cppgc::internal::testing::TestPlatform> platform_;
};
} // namespace
@@ -35,6 +32,6 @@ int main(int argc, char** argv) {
testing::FLAGS_gtest_death_test_style = "threadsafe";
testing::InitGoogleMock(&argc, argv);
- testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
+ testing::AddGlobalTestEnvironment(new CppGCEnvironment);
return RUN_ALL_TESTS();
}
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.cc b/deps/v8/test/unittests/heap/cppgc/tests.cc
index b2bed85f1d..60c47c9537 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.cc
+++ b/deps/v8/test/unittests/heap/cppgc/tests.cc
@@ -9,6 +9,10 @@
#include "src/heap/cppgc/object-allocator.h"
#include "test/unittests/heap/cppgc/test-platform.h"
+#if !CPPGC_IS_STANDALONE
+#include "include/v8-initialization.h"
+#endif // !CPPGC_IS_STANDALONE
+
namespace cppgc {
namespace internal {
namespace testing {
@@ -18,12 +22,26 @@ std::shared_ptr<TestPlatform> TestWithPlatform::platform_;
// static
void TestWithPlatform::SetUpTestSuite() {
- platform_ = std::make_unique<TestPlatform>(
+ platform_ = std::make_shared<TestPlatform>(
std::make_unique<DelegatingTracingController>());
+
+#if !CPPGC_IS_STANDALONE
+ // For non-standalone builds, we need to initialize V8's platform so that it
+ // can be looked-up by trace-event.h.
+ v8::V8::InitializePlatform(platform_->GetV8Platform());
+#ifdef V8_SANDBOX
+ CHECK(v8::V8::InitializeSandbox());
+#endif // V8_SANDBOX
+ v8::V8::Initialize();
+#endif // !CPPGC_IS_STANDALONE
}
// static
void TestWithPlatform::TearDownTestSuite() {
+#if !CPPGC_IS_STANDALONE
+ v8::V8::Dispose();
+ v8::V8::DisposePlatform();
+#endif // !CPPGC_IS_STANDALONE
platform_.reset();
}
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index df4ad206f5..b436cf9ef5 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -3,7 +3,14 @@
// found in the LICENSE file.
#include "src/heap/embedder-tracing.h"
+
+#include "include/v8-function.h"
+#include "include/v8-template.h"
+#include "src/common/allow-deprecated.h"
+#include "src/handles/global-handles.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap.h"
+#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -11,7 +18,7 @@
namespace v8 {
namespace internal {
-using LocalEmbedderHeapTracerWithIsolate = TestWithIsolate;
+using LocalEmbedderHeapTracerWithIsolate = TestWithHeapInternals;
namespace heap {
@@ -235,6 +242,997 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, DestructorClearsIsolate) {
EXPECT_EQ(nullptr, remote_tracer.isolate());
}
+namespace {
+
+v8::Local<v8::Object> ConstructTraceableJSApiObject(
+ v8::Local<v8::Context> context, void* first_field, void* second_field) {
+ v8::EscapableHandleScope scope(context->GetIsolate());
+ v8::Local<v8::FunctionTemplate> function_t =
+ v8::FunctionTemplate::New(context->GetIsolate());
+ v8::Local<v8::ObjectTemplate> instance_t = function_t->InstanceTemplate();
+ instance_t->SetInternalFieldCount(2);
+ v8::Local<v8::Function> function =
+ function_t->GetFunction(context).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ function->NewInstance(context).ToLocalChecked();
+ instance->SetAlignedPointerInInternalField(0, first_field);
+ instance->SetAlignedPointerInInternalField(1, second_field);
+ EXPECT_FALSE(instance.IsEmpty());
+ i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
+ EXPECT_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
+ return scope.Escape(instance);
+}
+
+START_ALLOW_USE_DEPRECATED()
+
+enum class TracePrologueBehavior { kNoop, kCallV8WriteBarrier };
+
+class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
+ public:
+ TestEmbedderHeapTracer() = default;
+ TestEmbedderHeapTracer(TracePrologueBehavior prologue_behavior,
+ v8::Global<v8::Array> array)
+ : prologue_behavior_(prologue_behavior), array_(std::move(array)) {}
+
+ void RegisterV8References(
+ const std::vector<std::pair<void*, void*>>& embedder_fields) final {
+ registered_from_v8_.insert(registered_from_v8_.end(),
+ embedder_fields.begin(), embedder_fields.end());
+ }
+
+ void AddReferenceForTracing(v8::TracedReference<v8::Value>* ref) {
+ to_register_with_v8_references_.push_back(ref);
+ }
+
+ bool AdvanceTracing(double deadline_in_ms) final {
+ for (auto ref : to_register_with_v8_references_) {
+ RegisterEmbedderReference(ref->As<v8::Data>());
+ }
+ to_register_with_v8_references_.clear();
+ return true;
+ }
+
+ bool IsTracingDone() final { return to_register_with_v8_references_.empty(); }
+
+ void TracePrologue(EmbedderHeapTracer::TraceFlags) final {
+ if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
+ auto local = array_.Get(isolate());
+ local
+ ->Set(local->GetCreationContext().ToLocalChecked(), 0,
+ v8::Object::New(isolate()))
+ .Check();
+ }
+ }
+
+ void TraceEpilogue(TraceSummary*) final {}
+ void EnterFinalPause(EmbedderStackState) final {}
+
+ bool IsRegisteredFromV8(void* first_field) const {
+ for (auto pair : registered_from_v8_) {
+ if (pair.first == first_field) return true;
+ }
+ return false;
+ }
+
+ void DoNotConsiderAsRootForScavenge(v8::TracedReference<v8::Value>* handle) {
+ handle->SetWrapperClassId(17);
+ non_root_handles_.push_back(handle);
+ }
+
+ bool IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
+ return handle.WrapperClassId() != 17;
+ }
+
+ void ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
+ for (auto* non_root_handle : non_root_handles_) {
+ if (*non_root_handle == handle) {
+ non_root_handle->Reset();
+ }
+ }
+ }
+
+ private:
+ std::vector<std::pair<void*, void*>> registered_from_v8_;
+ std::vector<v8::TracedReference<v8::Value>*> to_register_with_v8_references_;
+ TracePrologueBehavior prologue_behavior_ = TracePrologueBehavior::kNoop;
+ v8::Global<v8::Array> array_;
+ std::vector<v8::TracedReference<v8::Value>*> non_root_handles_;
+};
+
+class V8_NODISCARD TemporaryEmbedderHeapTracerScope final {
+ public:
+ TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
+ v8::EmbedderHeapTracer* tracer)
+ : isolate_(isolate) {
+ isolate_->SetEmbedderHeapTracer(tracer);
+ }
+
+ ~TemporaryEmbedderHeapTracerScope() {
+ isolate_->SetEmbedderHeapTracer(nullptr);
+ }
+
+ private:
+ v8::Isolate* const isolate_;
+};
+
+} // namespace
+
+using EmbedderTracingTest = TestWithHeapInternalsAndContext;
+
+TEST_F(EmbedderTracingTest, V8RegisterEmbedderReference) {
+ // Tests that wrappers are properly registered with the embedder heap
+ // tracer.
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ void* first_and_second_field = reinterpret_cast<void*>(0x2);
+ v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
+ context, first_and_second_field, first_and_second_field);
+ ASSERT_FALSE(api_object.IsEmpty());
+ i_isolate()->heap()->CollectGarbage(i::OLD_SPACE,
+ GarbageCollectionReason::kTesting);
+ EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field));
+}
+
+TEST_F(EmbedderTracingTest, EmbedderRegisteringV8Reference) {
+ // Tests that references that are registered by the embedder heap tracer are
+ // considered live by V8.
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ auto handle = std::make_unique<v8::TracedReference<v8::Value>>();
+ {
+ v8::HandleScope inner_scope(v8_isolate());
+ v8::Local<v8::Value> o =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ handle->Reset(v8_isolate(), o);
+ }
+ tracer.AddReferenceForTracing(handle.get());
+ i_isolate()->heap()->CollectGarbage(i::OLD_SPACE,
+ GarbageCollectionReason::kTesting);
+ EXPECT_FALSE(handle->IsEmpty());
+}
+
+namespace {
+
+void ResurrectingFinalizer(
+ const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
+ data.GetParameter()->ClearWeak();
+}
+
+} // namespace
+
+TEST_F(EmbedderTracingTest, TracingInRevivedSubgraph) {
+ // Tests that wrappers are traced when they are contained with in a subgraph
+ // that is revived by a finalizer.
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ v8::Global<v8::Object> g;
+ void* first_and_second_field = reinterpret_cast<void*>(0x4);
+ {
+ v8::HandleScope inner_scope(v8_isolate());
+ v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
+ context, first_and_second_field, first_and_second_field);
+ EXPECT_FALSE(api_object.IsEmpty());
+ v8::Local<v8::Object> o =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ o->Set(context,
+ v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), "link")
+ .ToLocalChecked(),
+ api_object)
+ .FromJust();
+ g.Reset(v8_isolate(), o);
+ g.SetWeak(&g, ResurrectingFinalizer, v8::WeakCallbackType::kFinalizer);
+ }
+ i_isolate()->heap()->CollectGarbage(i::OLD_SPACE,
+ GarbageCollectionReason::kTesting);
+ EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field));
+}
+
+TEST_F(EmbedderTracingTest, TracingInEphemerons) {
+ // Tests that wrappers that are part of ephemerons are traced.
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Object> key =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ void* first_and_second_field = reinterpret_cast<void*>(0x8);
+ Handle<JSWeakMap> weak_map = i_isolate()->factory()->NewJSWeakMap();
+ {
+ v8::HandleScope inner_scope(v8_isolate());
+ v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
+ context, first_and_second_field, first_and_second_field);
+ EXPECT_FALSE(api_object.IsEmpty());
+ Handle<JSObject> js_key =
+ handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate());
+ Handle<JSReceiver> js_api_object = v8::Utils::OpenHandle(*api_object);
+ int32_t hash = js_key->GetOrCreateHash(i_isolate()).value();
+ JSWeakCollection::Set(weak_map, js_key, js_api_object, hash);
+ }
+ i_isolate()->heap()->CollectGarbage(i::OLD_SPACE,
+ GarbageCollectionReason::kTesting);
+ EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field));
+}
+
+TEST_F(EmbedderTracingTest, FinalizeTracingIsNoopWhenNotMarking) {
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+
+ // Finalize a potentially running garbage collection.
+ i_isolate()->heap()->CollectGarbage(OLD_SPACE,
+ GarbageCollectionReason::kTesting);
+ EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped());
+
+ int gc_counter = i_isolate()->heap()->gc_count();
+ tracer.FinalizeTracing();
+ EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped());
+ EXPECT_EQ(gc_counter, i_isolate()->heap()->gc_count());
+}
+
+TEST_F(EmbedderTracingTest, FinalizeTracingWhenMarking) {
+ if (!FLAG_incremental_marking) return;
+ ManualGCScope manual_gc(i_isolate());
+ Heap* heap = i_isolate()->heap();
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+
+ // Finalize a potentially running garbage collection.
+ heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+ if (heap->mark_compact_collector()->sweeping_in_progress()) {
+ heap->mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
+ }
+ heap->tracer()->StopFullCycleIfNeeded();
+ EXPECT_TRUE(heap->incremental_marking()->IsStopped());
+
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ {
+ SafepointScope scope(heap);
+ heap->tracer()->StartCycle(
+ GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector cctest", GCTracer::MarkingType::kIncremental);
+ marking->Start(GarbageCollectionReason::kTesting);
+ }
+
+ // Sweeping is not runing so we should immediately start marking.
+ EXPECT_TRUE(marking->IsMarking());
+ tracer.FinalizeTracing();
+ EXPECT_TRUE(marking->IsStopped());
+}
+
+namespace {
+
+void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ v8::TracedReference<v8::Object>* handle) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(v8::Object::New(isolate));
+ EXPECT_FALSE(object.IsEmpty());
+ *handle = v8::TracedReference<v8::Object>(isolate, object);
+ EXPECT_FALSE(handle->IsEmpty());
+}
+
+template <typename T>
+void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ T* global) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(
+ ConstructTraceableJSApiObject(context, nullptr, nullptr));
+ EXPECT_FALSE(object.IsEmpty());
+ *global = T(isolate, object);
+ EXPECT_FALSE(global->IsEmpty());
+}
+
+namespace {
+
+bool InCorrectGeneration(HeapObject object) {
+ return FLAG_single_generation ? !i::Heap::InYoungGeneration(object)
+ : i::Heap::InYoungGeneration(object);
+}
+
+template <typename GlobalOrPersistent>
+bool InCorrectGeneration(v8::Isolate* isolate,
+ const GlobalOrPersistent& global) {
+ v8::HandleScope scope(isolate);
+ auto tmp = global.Get(isolate);
+ return InCorrectGeneration(*v8::Utils::OpenHandle(*tmp));
+}
+
+} // namespace
+
+enum class SurvivalMode { kSurvives, kDies };
+
+template <typename ModifierFunction, typename ConstructTracedReferenceFunction,
+ typename GCFunction>
+void TracedReferenceTest(v8::Isolate* isolate,
+ ConstructTracedReferenceFunction construct_function,
+ ModifierFunction modifier_function,
+ GCFunction gc_function, SurvivalMode survives) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ auto* global_handles =
+ reinterpret_cast<i::Isolate*>(isolate)->global_handles();
+
+ const size_t initial_count = global_handles->handles_count();
+ auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
+ construct_function(isolate, context, handle.get());
+ ASSERT_TRUE(InCorrectGeneration(isolate, *handle));
+ modifier_function(*handle);
+ const size_t after_modification_count = global_handles->handles_count();
+ gc_function();
+ // Cannot check the handle as it is not explicitly cleared by the GC. Instead
+ // check the handles count.
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives,
+ after_modification_count == global_handles->handles_count());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies,
+ initial_count == global_handles->handles_count());
+}
+
+} // namespace
+
+TEST_F(EmbedderTracingTest, TracedReferenceReset) {
+ v8::HandleScope scope(v8_isolate());
+ v8::TracedReference<v8::Object> handle;
+ ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &handle);
+ EXPECT_FALSE(handle.IsEmpty());
+ handle.Reset();
+ EXPECT_TRUE(handle.IsEmpty());
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceCopyReferences) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope outer_scope(v8_isolate());
+ i::GlobalHandles* global_handles = i_isolate()->global_handles();
+
+ const size_t initial_count = global_handles->handles_count();
+ auto handle1 = std::make_unique<v8::TracedReference<v8::Value>>();
+ {
+ v8::HandleScope scope(v8_isolate());
+ handle1->Reset(v8_isolate(), v8::Object::New(v8_isolate()));
+ }
+ auto handle2 = std::make_unique<v8::TracedReference<v8::Value>>(*handle1);
+ auto handle3 = std::make_unique<v8::TracedReference<v8::Value>>();
+ *handle3 = *handle2;
+ EXPECT_EQ(initial_count + 3, global_handles->handles_count());
+ EXPECT_FALSE(handle1->IsEmpty());
+ EXPECT_EQ(*handle1, *handle2);
+ EXPECT_EQ(*handle2, *handle3);
+ {
+ v8::HandleScope scope(v8_isolate());
+ auto tmp = v8::Local<v8::Value>::New(v8_isolate(), *handle3);
+ EXPECT_FALSE(tmp.IsEmpty());
+ FullGC();
+ }
+ EXPECT_EQ(initial_count + 3, global_handles->handles_count());
+ EXPECT_FALSE(handle1->IsEmpty());
+ EXPECT_EQ(*handle1, *handle2);
+ EXPECT_EQ(*handle2, *handle3);
+ FullGC();
+ EXPECT_EQ(initial_count, global_handles->handles_count());
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceToUnmodifiedJSObjectDiesOnFullGC) {
+ // When stressing incremental marking, a write barrier may keep the object
+ // alive.
+ if (FLAG_stress_incremental_marking) return;
+
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](const TracedReference<v8::Object>&) {}, [this]() { FullGC(); },
+ SurvivalMode::kDies);
+}
+
+TEST_F(EmbedderTracingTest,
+ TracedReferenceToUnmodifiedJSObjectSurvivesFullGCWhenHeldAlive) {
+ v8::Global<v8::Object> strong_global;
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [this, &strong_global](const TracedReference<v8::Object>& handle) {
+ v8::HandleScope scope(v8_isolate());
+ strong_global =
+ v8::Global<v8::Object>(v8_isolate(), handle.Get(v8_isolate()));
+ },
+ [this]() { FullGC(); }, SurvivalMode::kSurvives);
+}
+
+TEST_F(EmbedderTracingTest,
+ TracedReferenceToUnmodifiedJSObjectSurvivesYoungGC) {
+ if (FLAG_single_generation) return;
+ ManualGCScope manual_gc(i_isolate());
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
+ SurvivalMode::kSurvives);
+}
+
+TEST_F(
+ EmbedderTracingTest,
+ TracedReferenceToUnmodifiedJSObjectSurvivesYoungGCWhenExcludedFromRoots) {
+ if (FLAG_single_generation) return;
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [&tracer](const TracedReference<v8::Object>& handle) {
+ tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
+ },
+ [this]() { YoungGC(); }, SurvivalMode::kSurvives);
+}
+
+TEST_F(EmbedderTracingTest,
+ TracedReferenceToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
+ if (FLAG_single_generation) return;
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
+ SurvivalMode::kSurvives);
+}
+
+TEST_F(
+ EmbedderTracingTest,
+ TracedReferenceToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
+ if (FLAG_single_generation) return;
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [&tracer](const TracedReference<v8::Object>& handle) {
+ tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
+ },
+ [this]() { YoungGC(); }, SurvivalMode::kDies);
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceWrapperClassId) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+
+ v8::TracedReference<v8::Object> traced;
+ ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &traced);
+ EXPECT_EQ(0, traced.WrapperClassId());
+ traced.SetWrapperClassId(17);
+ EXPECT_EQ(17, traced.WrapperClassId());
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceHandlesMarking) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ auto live = std::make_unique<v8::TracedReference<v8::Value>>();
+ auto dead = std::make_unique<v8::TracedReference<v8::Value>>();
+ live->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
+ dead->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
+ i::GlobalHandles* global_handles = i_isolate()->global_handles();
+ {
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ tracer.AddReferenceForTracing(live.get());
+ const size_t initial_count = global_handles->handles_count();
+ FullGC();
+ const size_t final_count = global_handles->handles_count();
+ // Handles are black allocated, so the first GC does not collect them.
+ EXPECT_EQ(initial_count, final_count);
+ }
+
+ {
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ tracer.AddReferenceForTracing(live.get());
+ const size_t initial_count = global_handles->handles_count();
+ FullGC();
+ const size_t final_count = global_handles->handles_count();
+ EXPECT_EQ(initial_count, final_count + 1);
+ }
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceHandlesDoNotLeak) {
+ // TracedReference handles are not cleared by the destructor of the embedder
+ // object. To avoid leaks we need to mark these handles during GC.
+ // This test checks that unmarked handles do not leak.
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ auto ref = std::make_unique<v8::TracedReference<v8::Value>>();
+ ref->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
+ i::GlobalHandles* global_handles = i_isolate()->global_handles();
+ const size_t initial_count = global_handles->handles_count();
+ // We need two GCs because handles are black allocated.
+ FullGC();
+ FullGC();
+ const size_t final_count = global_handles->handles_count();
+ EXPECT_EQ(initial_count, final_count + 1);
+}
+
+namespace {
+
+class TracedReferenceVisitor final
+ : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
+ public:
+ ~TracedReferenceVisitor() override = default;
+
+ void VisitTracedReference(const TracedReference<Value>& value) final {
+ if (value.WrapperClassId() == 57) {
+ count_++;
+ }
+ }
+
+ size_t count() const { return count_; }
+
+ private:
+ size_t count_ = 0;
+};
+
+} // namespace
+
+TEST_F(EmbedderTracingTest, TracedReferenceIteration) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+
+ auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
+ ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(),
+ handle.get());
+ EXPECT_FALSE(handle->IsEmpty());
+ handle->SetWrapperClassId(57);
+ TracedReferenceVisitor visitor;
+ {
+ v8::HandleScope new_scope(v8_isolate());
+ tracer.IterateTracedGlobalHandles(&visitor);
+ }
+ EXPECT_EQ(1u, visitor.count());
+}
+
+TEST_F(EmbedderTracingTest, TracePrologueCallingIntoV8WriteBarrier) {
+ // Regression test: https://crbug.com/940003
+ if (!FLAG_incremental_marking) return;
+ ManualGCScope manual_gc(isolate());
+ v8::HandleScope scope(v8_isolate());
+ v8::Global<v8::Array> global;
+ {
+ v8::HandleScope new_scope(v8_isolate());
+ auto local = v8::Array::New(v8_isolate(), 10);
+ global.Reset(v8_isolate(), local);
+ }
+ TestEmbedderHeapTracer tracer(TracePrologueBehavior::kCallV8WriteBarrier,
+ std::move(global));
+ TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ SimulateIncrementalMarking();
+ // Finish GC to avoid removing the tracer while GC is running which may end up
+ // in an infinite loop because of unprocessed objects.
+ FullGC();
+}
+
+TEST_F(EmbedderTracingTest, BasicTracedReference) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ i::GlobalHandles* global_handles = i_isolate()->global_handles();
+
+ const size_t initial_count = global_handles->handles_count();
+ char* memory = new char[sizeof(v8::TracedReference<v8::Value>)];
+ auto* traced = new (memory) v8::TracedReference<v8::Value>();
+ {
+ v8::HandleScope new_scope(v8_isolate());
+ v8::Local<v8::Value> object(ConstructTraceableJSApiObject(
+ v8_isolate()->GetCurrentContext(), nullptr, nullptr));
+ EXPECT_TRUE(traced->IsEmpty());
+ *traced = v8::TracedReference<v8::Value>(v8_isolate(), object);
+ EXPECT_FALSE(traced->IsEmpty());
+ EXPECT_EQ(initial_count + 1, global_handles->handles_count());
+ }
+ traced->~TracedReference<v8::Value>();
+ EXPECT_EQ(initial_count + 1, global_handles->handles_count());
+ // GC should clear the handle.
+ FullGC();
+ EXPECT_EQ(initial_count, global_handles->handles_count());
+ delete[] memory;
+}
+
+namespace {
+
+class EmptyEmbedderHeapTracer : public v8::EmbedderHeapTracer {
+ public:
+ void RegisterV8References(
+ const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
+
+ bool AdvanceTracing(double deadline_in_ms) final { return true; }
+ bool IsTracingDone() final { return true; }
+ void TracePrologue(EmbedderHeapTracer::TraceFlags) final {}
+ void TraceEpilogue(TraceSummary*) final {}
+ void EnterFinalPause(EmbedderStackState) final {}
+};
+
+// EmbedderHeapTracer that can optimize Scavenger handling when used with
+// TracedReference.
+class EmbedderHeapTracerNoDestructorNonTracingClearing final
+ : public EmptyEmbedderHeapTracer {
+ public:
+ explicit EmbedderHeapTracerNoDestructorNonTracingClearing(
+ uint16_t class_id_to_optimize)
+ : class_id_to_optimize_(class_id_to_optimize) {}
+
+ bool IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
+ return handle.WrapperClassId() != class_id_to_optimize_;
+ }
+
+ void ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) final {
+ if (handle.WrapperClassId() != class_id_to_optimize_) return;
+
+ // Convention (for test): Objects that are optimized have their first field
+ // set as a back pointer.
+ BasicTracedReference<v8::Value>* original_handle =
+ reinterpret_cast<BasicTracedReference<v8::Value>*>(
+ v8::Object::GetAlignedPointerFromInternalField(
+ handle.As<v8::Object>(), 0));
+ original_handle->Reset();
+ }
+
+ private:
+ uint16_t class_id_to_optimize_;
+};
+
+template <typename T>
+void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
+ uint16_t optimized_class_id,
+ T* optimized_handle,
+ T* non_optimized_handle) {
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Object> optimized_object(ConstructTraceableJSApiObject(
+ isolate->GetCurrentContext(), optimized_handle, nullptr));
+ EXPECT_TRUE(optimized_handle->IsEmpty());
+ *optimized_handle = T(isolate, optimized_object);
+ EXPECT_FALSE(optimized_handle->IsEmpty());
+ optimized_handle->SetWrapperClassId(optimized_class_id);
+
+ v8::Local<v8::Object> non_optimized_object(ConstructTraceableJSApiObject(
+ isolate->GetCurrentContext(), nullptr, nullptr));
+ EXPECT_TRUE(non_optimized_handle->IsEmpty());
+ *non_optimized_handle = T(isolate, non_optimized_object);
+ EXPECT_FALSE(non_optimized_handle->IsEmpty());
+}
+
+} // namespace
+
+TEST_F(EmbedderTracingTest, TracedReferenceNoDestructorReclaimedOnScavenge) {
+ if (FLAG_single_generation) return;
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ constexpr uint16_t kClassIdToOptimize = 23;
+ EmbedderHeapTracerNoDestructorNonTracingClearing tracer(kClassIdToOptimize);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ i::GlobalHandles* global_handles = i_isolate()->global_handles();
+
+ const size_t initial_count = global_handles->handles_count();
+ auto* optimized_handle = new v8::TracedReference<v8::Value>();
+ auto* non_optimized_handle = new v8::TracedReference<v8::Value>();
+ SetupOptimizedAndNonOptimizedHandle(v8_isolate(), kClassIdToOptimize,
+ optimized_handle, non_optimized_handle);
+ EXPECT_EQ(initial_count + 2, global_handles->handles_count());
+ YoungGC();
+ EXPECT_EQ(initial_count + 1, global_handles->handles_count());
+ EXPECT_TRUE(optimized_handle->IsEmpty());
+ delete optimized_handle;
+ EXPECT_FALSE(non_optimized_handle->IsEmpty());
+ non_optimized_handle->Reset();
+ delete non_optimized_handle;
+ EXPECT_EQ(initial_count, global_handles->handles_count());
+}
+
+namespace {
+
+template <typename T>
+V8_NOINLINE void OnStackTest(v8::Isolate* v8_isolate,
+ TestEmbedderHeapTracer* tracer) {
+ v8::Global<v8::Object> observer;
+ T stack_ref;
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ stack_ref.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+}
+
+V8_NOINLINE void CreateTracedReferenceInDeepStack(
+ v8::Isolate* isolate, v8::Global<v8::Object>* observer) {
+ v8::TracedReference<v8::Value> stack_ref;
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
+ isolate->GetCurrentContext(), nullptr, nullptr));
+ stack_ref.Reset(isolate, object);
+ observer->Reset(isolate, object);
+ observer->SetWeak();
+}
+
+V8_NOINLINE void TracedReferenceNotifyEmptyStackTest(
+ v8::Isolate* v8_isolate, TestEmbedderHeapTracer* tracer) {
+ v8::Global<v8::Object> observer;
+ CreateTracedReferenceInDeepStack(v8_isolate, &observer);
+ EXPECT_FALSE(observer.IsEmpty());
+ reinterpret_cast<i::Isolate*>(v8_isolate)
+ ->heap()
+ ->local_embedder_heap_tracer()
+ ->NotifyEmptyEmbedderStack();
+ FullGC(v8_isolate);
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+enum class Operation {
+ kCopy,
+ kMove,
+};
+
+template <typename T>
+void PerformOperation(Operation op, T* lhs, T* rhs) {
+ switch (op) {
+ case Operation::kMove:
+ *lhs = std::move(*rhs);
+ break;
+ case Operation::kCopy:
+ *lhs = *rhs;
+ rhs->Reset();
+ break;
+ }
+}
+
+enum class TargetHandling {
+ kNonInitialized,
+ kInitializedYoungGen,
+ kInitializedOldGen
+};
+
+V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate,
+ TestEmbedderHeapTracer* tracer, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::TracedReference<v8::Value>* heap_handle =
+ new v8::TracedReference<v8::Value>();
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ EXPECT_TRUE(InCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!FLAG_single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ YoungGC(v8_isolate);
+ YoungGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ heap_handle->Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ stack_handle.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ tracer->AddReferenceForTracing(heap_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ tracer->AddReferenceForTracing(heap_handle);
+ PerformOperation(op, heap_handle, &stack_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_TRUE(observer.IsEmpty());
+ delete heap_handle;
+}
+
+V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate,
+ TestEmbedderHeapTracer* tracer, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::TracedReference<v8::Value>* heap_handle =
+ new v8::TracedReference<v8::Value>();
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ EXPECT_TRUE(InCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!FLAG_single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ YoungGC(v8_isolate);
+ YoungGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ stack_handle.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ heap_handle->Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ tracer->AddReferenceForTracing(heap_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &stack_handle, heap_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ stack_handle.Reset();
+ FullGC(v8_isolate);
+ EXPECT_TRUE(observer.IsEmpty());
+ delete heap_handle;
+}
+
+V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate,
+ TestEmbedderHeapTracer* tracer, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle1;
+ v8::TracedReference<v8::Value> stack_handle2;
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ EXPECT_TRUE(InCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!FLAG_single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ YoungGC(v8_isolate);
+ YoungGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ stack_handle2.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ stack_handle1.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &stack_handle2, &stack_handle1);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ stack_handle2.Reset();
+ FullGC(v8_isolate);
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+V8_NOINLINE void TracedReferenceCleanedTest(v8::Isolate* v8_isolate,
+ TestEmbedderHeapTracer* tracer) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr));
+ const size_t before = reinterpret_cast<Isolate*>(v8_isolate)
+ ->global_handles()
+ ->NumberOfOnStackHandlesForTesting();
+ for (int i = 0; i < 100; i++) {
+ v8::TracedReference<v8::Value> stack_handle;
+ stack_handle.Reset(v8_isolate, object);
+ }
+ EXPECT_EQ(before + 1, reinterpret_cast<Isolate*>(v8_isolate)
+ ->global_handles()
+ ->NumberOfOnStackHandlesForTesting());
+}
+
+} // namespace
+
+TEST_F(EmbedderTracingTest, TracedReferenceOnStack) {
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ tracer.SetStackStart(&manual_gc);
+ OnStackTest<v8::TracedReference<v8::Value>>(v8_isolate(), &tracer);
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceCleaned) {
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ tracer.SetStackStart(&manual_gc);
+ TracedReferenceCleanedTest(v8_isolate(), &tracer);
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceMove) {
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ tracer.SetStackStart(&manual_gc);
+ StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kNonInitialized);
+ StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kNonInitialized);
+ HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kNonInitialized);
+ StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+}
+
+TEST_F(EmbedderTracingTest, TracedReferenceCopy) {
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ tracer.SetStackStart(&manual_gc);
+ StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+}
+
+TEST_F(EmbedderTracingTest, NotifyEmptyStack) {
+ ManualGCScope manual_gc(i_isolate());
+ TestEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
+ tracer.SetStackStart(&manual_gc);
+ TracedReferenceNotifyEmptyStackTest(v8_isolate(), &tracer);
+}
+
+END_ALLOW_USE_DEPRECATED()
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 19c8e37585..c0f1997e3d 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -2,13 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/heap/gc-tracer.h"
+
#include <cmath>
#include <limits>
#include "src/base/platform/platform.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
-#include "src/heap/gc-tracer.h"
+#include "src/heap/gc-tracer-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -102,7 +104,7 @@ void StopTracing(GCTracer* tracer, GarbageCollector collector) {
tracer->StopObservablePause();
tracer->UpdateStatistics(collector);
if (Heap::IsYoungGenerationCollector(collector)) {
- tracer->StopCycle(collector);
+ tracer->StopYoungCycleIfNeeded();
} else {
tracer->NotifySweepingCompleted();
}
@@ -263,20 +265,18 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
- 100,
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .longest_step);
- EXPECT_EQ(
- 2,
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .steps);
+ 100, tracer->current_
+ .incremental_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .longest_step);
+ EXPECT_EQ(2, tracer->current_
+ .incremental_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .steps);
EXPECT_DOUBLE_EQ(
- 150,
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .duration);
+ 150, tracer->current_
+ .incremental_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .duration);
+ EXPECT_DOUBLE_EQ(
+ 150, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
// Round 2. Numbers should be reset.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 13);
@@ -286,20 +286,18 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 122);
StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
- 122,
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .longest_step);
- EXPECT_EQ(
- 3,
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .steps);
+ 122, tracer->current_
+ .incremental_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .longest_step);
+ EXPECT_EQ(3, tracer->current_
+ .incremental_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .steps);
+ EXPECT_DOUBLE_EQ(
+ 150, tracer->current_
+ .incremental_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .duration);
EXPECT_DOUBLE_EQ(
- 150,
- tracer->current_
- .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .duration);
+ 150, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
}
TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
@@ -392,9 +390,9 @@ TEST_F(GCTracerTest, BackgroundScavengerScope) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
- tracer->AddScopeSampleBackground(
+ tracer->AddScopeSample(
GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10);
- tracer->AddScopeSampleBackground(
+ tracer->AddScopeSample(
GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1);
StopTracing(tracer, GarbageCollector::SCAVENGER);
EXPECT_DOUBLE_EQ(
@@ -407,17 +405,14 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
tracer->ResetForTesting();
StartTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR,
StartTracingMode::kAtomic);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
- 10);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
- 1);
- tracer->AddScopeSampleBackground(
- GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 20);
- tracer->AddScopeSampleBackground(
- GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2);
- tracer->AddScopeSampleBackground(
+ tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING, 10);
+ tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING, 1);
+ tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY,
+ 20);
+ tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2);
+ tracer->AddScopeSample(
GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 30);
- tracer->AddScopeSampleBackground(
+ tracer->AddScopeSample(
GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 3);
StopTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
@@ -434,25 +429,22 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
TEST_F(GCTracerTest, BackgroundMajorMCScope) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_MARKING, 100);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_SWEEPING,
- 200);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_MARKING, 10);
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_MARKING, 100);
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 200);
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_MARKING, 10);
// Scavenger should not affect the major mark-compact scopes.
StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
StopTracing(tracer, GarbageCollector::SCAVENGER);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 20);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_MARKING, 1);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 2);
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 20);
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_MARKING, 1);
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 2);
StartTracing(tracer, GarbageCollector::MARK_COMPACTOR,
StartTracingMode::kAtomic);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
- 30);
- tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
- 3);
- tracer->AddScopeSampleBackground(
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY, 30);
+ tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY, 3);
+ tracer->AddScopeSample(
GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40);
- tracer->AddScopeSampleBackground(
+ tracer->AddScopeSample(
GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4);
StopTracing(tracer, GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
diff --git a/deps/v8/test/unittests/heap/heap-utils.h b/deps/v8/test/unittests/heap/heap-utils.h
index 2cd123c827..4e8a97ef8f 100644
--- a/deps/v8/test/unittests/heap/heap-utils.h
+++ b/deps/v8/test/unittests/heap/heap-utils.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
+#include "src/heap/heap.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -29,6 +30,14 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
}
+ void FullGC() {
+ heap()->CollectGarbage(OLD_SPACE, i::GarbageCollectionReason::kTesting);
+ }
+
+ void YoungGC() {
+ heap()->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
+ }
+
Heap* heap() const { return this->i_isolate()->heap(); }
void SimulateIncrementalMarking(bool force_completion = true) {
@@ -37,12 +46,27 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
}
};
-using TestWithHeapInternals = //
- WithHeapInternals< //
- WithInternalIsolateMixin< //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
- ::testing::Test>>>>;
+using TestWithHeapInternals = //
+ WithHeapInternals< //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>>>>>;
+
+using TestWithHeapInternalsAndContext = //
+ WithContextMixin< //
+ TestWithHeapInternals>;
+
+inline void FullGC(v8::Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
+}
+
+inline void YoungGC(v8::Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectGarbage(
+ i::NEW_SPACE, i::GarbageCollectionReason::kTesting);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/unittests/heap/lab-unittest.cc
index 1ca4699c1c..98451854b0 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/unittests/heap/lab-unittest.cc
@@ -1,46 +1,30 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <vector>
-
-#include "src/common/globals.h"
-#include "src/heap/heap-inl.h"
+#include "include/v8-internal.h"
+#include "src/heap/heap.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/objects/objects.h"
-#include "test/cctest/cctest.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
namespace heap {
-static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
+namespace {
+
+Address AllocateLabBackingStore(Heap* heap, size_t size_in_bytes) {
AllocationResult result = heap->old_space()->AllocateRawAligned(
static_cast<int>(size_in_bytes), kDoubleAligned);
Address adr = result.ToObjectChecked().address();
return adr;
}
-
-static void VerifyIterable(v8::internal::Address base,
- v8::internal::Address limit,
- std::vector<intptr_t> expected_size) {
- CHECK_LE(base, limit);
- HeapObject object;
- size_t counter = 0;
- while (base < limit) {
- object = HeapObject::FromAddress(base);
- CHECK(object.IsFreeSpaceOrFiller());
- CHECK_LT(counter, expected_size.size());
- CHECK_EQ(expected_size[counter], object.Size());
- base += object.Size();
- counter++;
- }
-}
-
-static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
- intptr_t size_in_bytes,
- AllocationAlignment alignment = kTaggedAligned) {
+bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
+ size_t size_in_bytes,
+ AllocationAlignment alignment = kTaggedAligned) {
HeapObject obj;
AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
@@ -52,49 +36,64 @@ static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
return false;
}
-TEST(InvalidLab) {
- LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer();
- CHECK(!lab.IsValid());
+void VerifyIterable(Address base, Address limit,
+ std::vector<size_t> expected_size) {
+ EXPECT_LE(base, limit);
+ HeapObject object;
+ size_t counter = 0;
+ while (base < limit) {
+ object = HeapObject::FromAddress(base);
+ EXPECT_TRUE(object.IsFreeSpaceOrFiller());
+ EXPECT_LT(counter, expected_size.size());
+ EXPECT_EQ(expected_size[counter], static_cast<size_t>(object.Size()));
+ base += object.Size();
+ counter++;
+ }
}
+} // namespace
+
+using LabTest = TestWithIsolate;
-TEST(UnusedLabImplicitClose) {
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- const int kLabSize = 4 * KB;
+TEST_F(LabTest, InvalidLab) {
+ LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer();
+ EXPECT_FALSE(lab.IsValid());
+}
+
+TEST_F(LabTest, UnusedLabImplicitClose) {
+ Heap* heap = isolate()->heap();
+ const size_t kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
- intptr_t expected_sizes_raw[1] = {kLabSize};
- std::vector<intptr_t> expected_sizes(expected_sizes_raw,
- expected_sizes_raw + 1);
+ size_t expected_sizes_raw[1] = {kLabSize};
+ std::vector<size_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 1);
{
AllocationResult lab_backing_store =
AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
- CHECK(lab.IsValid());
+ EXPECT_TRUE(lab.IsValid());
}
VerifyIterable(base, limit, expected_sizes);
}
-
-TEST(SimpleAllocate) {
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- const int kLabSize = 4 * KB;
+TEST_F(LabTest, SimpleAllocate) {
+ Heap* heap = isolate()->heap();
+ const size_t kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
- intptr_t sizes_raw[1] = {128};
- intptr_t expected_sizes_raw[2] = {128, kLabSize - 128};
- std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 1);
- std::vector<intptr_t> expected_sizes(expected_sizes_raw,
- expected_sizes_raw + 2);
+ size_t sizes_raw[1] = {128};
+ size_t expected_sizes_raw[2] = {128, kLabSize - 128};
+ std::vector<size_t> sizes(sizes_raw, sizes_raw + 1);
+ std::vector<size_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 2);
{
AllocationResult lab_backing_store =
AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
- CHECK(lab.IsValid());
+ EXPECT_TRUE(lab.IsValid());
for (auto size : sizes) {
AllocateFromLab(heap, &lab, size);
}
@@ -102,55 +101,51 @@ TEST(SimpleAllocate) {
VerifyIterable(base, limit, expected_sizes);
}
-
-TEST(AllocateUntilLabOOM) {
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- const int kLabSize = 2 * KB;
+TEST_F(LabTest, AllocateUntilLabOOM) {
+ Heap* heap = isolate()->heap();
+ const size_t kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
// The following objects won't fit in {kLabSize}.
- intptr_t sizes_raw[5] = {512, 512, 128, 512, 512};
- intptr_t expected_sizes_raw[5] = {512, 512, 128, 512, 384 /* left over */};
- std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 5);
- std::vector<intptr_t> expected_sizes(expected_sizes_raw,
- expected_sizes_raw + 5);
- intptr_t sum = 0;
+ size_t sizes_raw[5] = {512, 512, 128, 512, 512};
+ size_t expected_sizes_raw[5] = {512, 512, 128, 512, 384 /* left over */};
+ std::vector<size_t> sizes(sizes_raw, sizes_raw + 5);
+ std::vector<size_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 5);
+ size_t sum = 0;
{
AllocationResult lab_backing_store =
AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
- CHECK(lab.IsValid());
+ EXPECT_TRUE(lab.IsValid());
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
}
}
- CHECK_EQ(kLabSize - sum, 384);
+ EXPECT_EQ(kLabSize - sum, 384u);
}
VerifyIterable(base, limit, expected_sizes);
}
-
-TEST(AllocateExactlyUntilLimit) {
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- const int kLabSize = 2 * KB;
+TEST_F(LabTest, AllocateExactlyUntilLimit) {
+ Heap* heap = isolate()->heap();
+ const size_t kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
- intptr_t sizes_raw[4] = {512, 512, 512, 512};
- intptr_t expected_sizes_raw[5] = {512, 512, 512, 512, 0};
- std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 4);
- std::vector<intptr_t> expected_sizes(expected_sizes_raw,
- expected_sizes_raw + 5);
+ size_t sizes_raw[4] = {512, 512, 512, 512};
+ size_t expected_sizes_raw[5] = {512, 512, 512, 512, 0};
+ std::vector<size_t> sizes(sizes_raw, sizes_raw + 4);
+ std::vector<size_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 5);
{
AllocationResult lab_backing_store =
AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
- CHECK(lab.IsValid());
- intptr_t sum = 0;
+ EXPECT_TRUE(lab.IsValid());
+ size_t sum = 0;
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
@@ -158,41 +153,39 @@ TEST(AllocateExactlyUntilLimit) {
break;
}
}
- CHECK_EQ(kLabSize - sum, 0);
+ EXPECT_EQ(kLabSize - sum, 0u);
}
VerifyIterable(base, limit, expected_sizes);
}
-
-TEST(MergeSuccessful) {
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- const int kLabSize = 2 * KB;
+TEST_F(LabTest, MergeSuccessful) {
+ Heap* heap = isolate()->heap();
+ const size_t kLabSize = 2 * KB;
Address base1 = AllocateLabBackingStore(heap, 2 * kLabSize);
Address limit1 = base1 + kLabSize;
Address base2 = limit1;
Address limit2 = base2 + kLabSize;
- intptr_t sizes1_raw[4] = {512, 512, 512, 256};
- intptr_t expected_sizes1_raw[5] = {512, 512, 512, 256, 256};
- std::vector<intptr_t> sizes1(sizes1_raw, sizes1_raw + 4);
- std::vector<intptr_t> expected_sizes1(expected_sizes1_raw,
- expected_sizes1_raw + 5);
+ size_t sizes1_raw[4] = {512, 512, 512, 256};
+ size_t expected_sizes1_raw[5] = {512, 512, 512, 256, 256};
+ std::vector<size_t> sizes1(sizes1_raw, sizes1_raw + 4);
+ std::vector<size_t> expected_sizes1(expected_sizes1_raw,
+ expected_sizes1_raw + 5);
- intptr_t sizes2_raw[5] = {256, 512, 512, 512, 512};
- intptr_t expected_sizes2_raw[10] = {512, 512, 512, 256, 256,
- 512, 512, 512, 512, 0};
- std::vector<intptr_t> sizes2(sizes2_raw, sizes2_raw + 5);
- std::vector<intptr_t> expected_sizes2(expected_sizes2_raw,
- expected_sizes2_raw + 10);
+ size_t sizes2_raw[5] = {256, 512, 512, 512, 512};
+ size_t expected_sizes2_raw[10] = {512, 512, 512, 256, 256,
+ 512, 512, 512, 512, 0};
+ std::vector<size_t> sizes2(sizes2_raw, sizes2_raw + 5);
+ std::vector<size_t> expected_sizes2(expected_sizes2_raw,
+ expected_sizes2_raw + 10);
{
AllocationResult lab_backing_store1 =
AllocationResult::FromObject(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
- CHECK(lab1.IsValid());
- intptr_t sum = 0;
+ EXPECT_TRUE(lab1.IsValid());
+ size_t sum = 0;
for (auto size : sizes1) {
if (AllocateFromLab(heap, &lab1, size)) {
sum += size;
@@ -205,9 +198,9 @@ TEST(MergeSuccessful) {
AllocationResult::FromObject(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
- CHECK(lab2.IsValid());
- CHECK(lab2.TryMerge(&lab1));
- CHECK(!lab1.IsValid());
+ EXPECT_TRUE(lab2.IsValid());
+ EXPECT_TRUE(lab2.TryMerge(&lab1));
+ EXPECT_FALSE(lab1.IsValid());
for (auto size : sizes2) {
if (AllocateFromLab(heap, &lab2, size)) {
sum += size;
@@ -215,17 +208,15 @@ TEST(MergeSuccessful) {
break;
}
}
- CHECK_EQ(2 * kLabSize - sum, 0);
+ EXPECT_EQ(2 * kLabSize - sum, 0u);
}
VerifyIterable(base1, limit1, expected_sizes1);
VerifyIterable(base1, limit2, expected_sizes2);
}
-
-TEST(MergeFailed) {
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- const int kLabSize = 2 * KB;
+TEST_F(LabTest, MergeFailed) {
+ Heap* heap = isolate()->heap();
+ const size_t kLabSize = 2 * KB;
Address base1 = AllocateLabBackingStore(heap, 3 * kLabSize);
Address base2 = base1 + kLabSize;
Address base3 = base2 + kLabSize;
@@ -235,46 +226,46 @@ TEST(MergeFailed) {
AllocationResult::FromObject(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
- CHECK(lab1.IsValid());
+ EXPECT_TRUE(lab1.IsValid());
AllocationResult lab_backing_store2 =
AllocationResult::FromObject(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
- CHECK(lab2.IsValid());
+ EXPECT_TRUE(lab2.IsValid());
AllocationResult lab_backing_store3 =
AllocationResult::FromObject(HeapObject::FromAddress(base3));
LocalAllocationBuffer lab3 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store3, kLabSize);
- CHECK(lab3.IsValid());
+ EXPECT_TRUE(lab3.IsValid());
- CHECK(!lab3.TryMerge(&lab1));
+ EXPECT_FALSE(lab3.TryMerge(&lab1));
}
}
-TEST(AllocateAligned) {
- // The test works only for configurations with 32-bit tagged values.
- if (kTaggedSize != kUInt32Size) return;
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- const int kLabSize = 2 * KB;
+TEST_F(LabTest, AllocateAligned) {
+ if (kTaggedSize != kUInt32Size)
+ GTEST_SKIP() << "Test only works with 32-bit tagged values.";
+
+ Heap* heap = isolate()->heap();
+ const size_t kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
- std::pair<intptr_t, AllocationAlignment> sizes_raw[2] = {
+ std::pair<size_t, AllocationAlignment> sizes_raw[2] = {
std::make_pair(116, kTaggedAligned), std::make_pair(64, kDoubleAligned)};
std::vector<std::pair<intptr_t, AllocationAlignment>> sizes(sizes_raw,
sizes_raw + 2);
- intptr_t expected_sizes_raw[4] = {116, 4, 64, 1864};
- std::vector<intptr_t> expected_sizes(expected_sizes_raw,
- expected_sizes_raw + 4);
+ size_t expected_sizes_raw[4] = {116, 4, 64, 1864};
+ std::vector<size_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 4);
{
AllocationResult lab_backing_store =
AllocationResult::FromObject(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
- CHECK(lab.IsValid());
+ EXPECT_TRUE(lab.IsValid());
for (auto pair : sizes) {
if (!AllocateFromLab(heap, &lab, pair.first, pair.second)) {
break;
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 1519a1a8c1..effd54290f 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -278,14 +278,14 @@ TEST(TypedSlotSet, ClearInvalidSlots) {
set.Insert(type, i * kHostDelta);
}
- std::map<uint32_t, uint32_t> invalid_ranges;
+ TypedSlotSet::FreeRangesMap invalid_ranges;
for (uint32_t i = 1; i < entries; i += 2) {
invalid_ranges.insert(
std::pair<uint32_t, uint32_t>(i * kHostDelta, i * kHostDelta + 1));
}
set.ClearInvalidSlots(invalid_ranges);
- for (std::map<uint32_t, uint32_t>::iterator it = invalid_ranges.begin();
+ for (TypedSlotSet::FreeRangesMap::iterator it = invalid_ranges.begin();
it != invalid_ranges.end(); ++it) {
uint32_t start = it->first;
uint32_t end = it->second;
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index aba0bdb964..9f6dc506de 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -236,14 +236,32 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
// This test is currently incompatible with the sandbox. Enable it
// once the VirtualAddressSpace interface is stable.
#if !V8_OS_FUCHSIA && !V8_SANDBOX
-class SequentialUnmapperTest : public TestWithIsolate {
+
+template <typename TMixin>
+class SequentialUnmapperTestMixin : public TMixin {
+ public:
+ SequentialUnmapperTestMixin();
+ ~SequentialUnmapperTestMixin() override;
+};
+
+class SequentialUnmapperTest : public //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ SequentialUnmapperTestMixin< //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>>>>> {
public:
SequentialUnmapperTest() = default;
~SequentialUnmapperTest() override = default;
SequentialUnmapperTest(const SequentialUnmapperTest&) = delete;
SequentialUnmapperTest& operator=(const SequentialUnmapperTest&) = delete;
- static void SetUpTestCase() {
+ static void FreeProcessWidePtrComprCageForTesting() {
+ IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
+ }
+
+ static void DoMixinSetUp() {
CHECK_NULL(tracking_page_allocator_);
old_page_allocator_ = GetPlatformPageAllocator();
tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
@@ -266,11 +284,9 @@ class SequentialUnmapperTest : public TestWithIsolate {
#endif
IsolateAllocator::InitializeOncePerProcess();
#endif
- TestWithIsolate::SetUpTestCase();
}
- static void TearDownTestCase() {
- TestWithIsolate::TearDownTestCase();
+ static void DoMixinTearDown() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Free the process-wide cage reservation, otherwise the pages won't be
// freed until process teardown.
@@ -308,19 +324,27 @@ TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
bool SequentialUnmapperTest::old_flag_;
+template <typename TMixin>
+SequentialUnmapperTestMixin<TMixin>::SequentialUnmapperTestMixin() {
+ SequentialUnmapperTest::DoMixinSetUp();
+}
+template <typename TMixin>
+SequentialUnmapperTestMixin<TMixin>::~SequentialUnmapperTestMixin() {
+ SequentialUnmapperTest::DoMixinTearDown();
+}
+
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
if (FLAG_enable_third_party_heap) return;
- Page* page = allocator()->AllocatePage(
- MemoryAllocator::kRegular,
- MemoryChunkLayout::AllocatableMemoryInDataPage(),
- static_cast<PagedSpace*>(heap()->old_space()),
- Executability::NOT_EXECUTABLE);
+ Page* page =
+ allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
+ static_cast<PagedSpace*>(heap()->old_space()),
+ Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
- allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
+ allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks();
@@ -341,17 +365,16 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
if (FLAG_enable_third_party_heap) return;
- Page* page = allocator()->AllocatePage(
- MemoryAllocator::kRegular,
- MemoryChunkLayout::AllocatableMemoryInDataPage(),
- static_cast<PagedSpace*>(heap()->old_space()),
- Executability::NOT_EXECUTABLE);
+ Page* page =
+ allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
+ static_cast<PagedSpace*>(heap()->old_space()),
+ Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
- allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
+ allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->TearDown();
diff --git a/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
new file mode 100644
index 0000000000..7b43acb8bf
--- /dev/null
+++ b/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
@@ -0,0 +1,82 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-platform.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+
+template <typename TMixin>
+class WithSingleThreadedDefaultPlatformMixin : public TMixin {
+ public:
+ WithSingleThreadedDefaultPlatformMixin() {
+ platform_ = v8::platform::NewSingleThreadedDefaultPlatform();
+ CHECK_NOT_NULL(platform_.get());
+ v8::V8::InitializePlatform(platform_.get());
+#ifdef V8_SANDBOX
+ CHECK(v8::V8::InitializeSandbox());
+#endif // V8_SANDBOX
+ v8::V8::Initialize();
+ }
+
+ ~WithSingleThreadedDefaultPlatformMixin() override {
+ CHECK_NOT_NULL(platform_.get());
+ v8::V8::Dispose();
+ v8::V8::DisposePlatform();
+ }
+
+ v8::Platform* platform() const { return platform_.get(); }
+
+ private:
+ std::unique_ptr<v8::Platform> platform_;
+};
+
+class SingleThreadedDefaultPlatformTest
+ : public WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithSingleThreadedDefaultPlatformMixin< //
+ ::testing::Test>>> {
+ public:
+ static void SetUpTestSuite() {
+ CHECK_NULL(save_flags_);
+ save_flags_ = new i::SaveFlags();
+ v8::V8::SetFlagsFromString("--single-threaded");
+ WithIsolateScopeMixin::SetUpTestSuite();
+ }
+
+ static void TearDownTestSuite() {
+ WithIsolateScopeMixin::TearDownTestSuite();
+ CHECK_NOT_NULL(save_flags_);
+ delete save_flags_;
+ save_flags_ = nullptr;
+ }
+
+ private:
+ static i::SaveFlags* save_flags_;
+};
+
+// static
+i::SaveFlags* SingleThreadedDefaultPlatformTest::save_flags_;
+
+TEST_F(SingleThreadedDefaultPlatformTest, SingleThreadedDefaultPlatform) {
+ {
+ i::HandleScope scope(i_isolate());
+ v8::Local<Context> env = Context::New(isolate());
+ v8::Context::Scope context_scope(env);
+
+ RunJS(
+ "function f() {"
+ " for (let i = 0; i < 10; i++)"
+ " (new Array(10)).fill(0);"
+ " return 0;"
+ "}"
+ "f();");
+ }
+
+ CollectGarbage(i::NEW_SPACE);
+ CollectAllAvailableGarbage();
+}
+
+} // namespace v8
diff --git a/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc b/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
index 2f03b17327..f69127bf9e 100644
--- a/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
+++ b/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
@@ -4,6 +4,8 @@
#include "src/logging/runtime-call-stats.h"
+#include <atomic>
+
#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/base/atomic-utils.h"
@@ -21,7 +23,8 @@ namespace internal {
namespace {
-static base::TimeTicks runtime_call_stats_test_time_ = base::TimeTicks();
+static std::atomic<base::TimeTicks> runtime_call_stats_test_time_ =
+ base::TimeTicks();
// Time source used for the RuntimeCallTimer during tests. We cannot rely on
// the native timer since it's too unpredictable on the build bots.
static base::TimeTicks RuntimeCallStatsTestNow() {
@@ -47,14 +50,14 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
TracingFlags::runtime_stats.store(0, std::memory_order_relaxed);
}
- static void SetUpTestCase() {
- TestWithIsolate::SetUpTestCase();
+ static void SetUpTestSuite() {
+ TestWithIsolate::SetUpTestSuite();
// Use a custom time source to precisly emulate system time.
RuntimeCallTimer::Now = &RuntimeCallStatsTestNow;
}
- static void TearDownTestCase() {
- TestWithIsolate::TearDownTestCase();
+ static void TearDownTestSuite() {
+ TestWithIsolate::TearDownTestSuite();
// Restore the original time source.
RuntimeCallTimer::Now = &base::TimeTicks::Now;
}
diff --git a/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
index a7e3ce7980..8150a55ae2 100644
--- a/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
+++ b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
@@ -52,23 +52,24 @@ TEST_F(TestWithNativeContext, AddCodeToEmptyCache) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
Handle<CodeT> code(function->code(), isolate);
BytecodeOffset bailout_id(1);
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- bailout_id);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ bailout_id);
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
EXPECT_EQ(osr_cache->length(), kInitialLength);
HeapObject sfi_entry;
- osr_cache->Get(OSROptimizedCodeCache::kSharedOffset)
+ osr_cache->RawGetForTesting(OSROptimizedCodeCache::kSharedOffset)
->GetHeapObject(&sfi_entry);
EXPECT_EQ(sfi_entry, *shared);
HeapObject code_entry;
- osr_cache->Get(OSROptimizedCodeCache::kCachedCodeOffset)
+ osr_cache->RawGetForTesting(OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&code_entry);
EXPECT_EQ(code_entry, *code);
Smi osr_offset_entry;
- osr_cache->Get(OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&osr_offset_entry);
+ osr_cache->RawGetForTesting(OSROptimizedCodeCache::kOsrIdOffset)
+ ->ToSmi(&osr_offset_entry);
EXPECT_EQ(osr_offset_entry.value(), bailout_id.ToInt());
}
@@ -87,30 +88,30 @@ TEST_F(TestWithNativeContext, GrowCodeCache) {
int bailout_id = 0;
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ BytecodeOffset(bailout_id));
}
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
EXPECT_EQ(osr_cache->length(), kInitialLength);
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BytecodeOffset(bailout_id));
- osr_cache = Handle<OSROptimizedCodeCache>(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ BytecodeOffset(bailout_id));
+ osr_cache =
+ Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
EXPECT_EQ(osr_cache->length(), kInitialLength * 2);
int index = kInitialLength;
HeapObject sfi_entry;
- osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
->GetHeapObject(&sfi_entry);
EXPECT_EQ(sfi_entry, *shared);
HeapObject code_entry;
- osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&code_entry);
EXPECT_EQ(code_entry, *code);
Smi osr_offset_entry;
- osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
->ToSmi(&osr_offset_entry);
EXPECT_EQ(osr_offset_entry.value(), bailout_id);
}
@@ -130,8 +131,8 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
int bailout_id = 0;
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ BytecodeOffset(bailout_id));
}
base::ScopedVector<char> source1(1024);
@@ -139,26 +140,22 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
Handle<CodeT> code1(function1->code(), isolate);
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
- BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
+ BytecodeOffset(bailout_id));
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
- EXPECT_EQ(osr_cache->GetOptimizedCode(shared, BytecodeOffset(0), isolate),
- *code);
- EXPECT_EQ(
- osr_cache->GetOptimizedCode(shared1, BytecodeOffset(bailout_id), isolate),
- *code1);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
+ EXPECT_EQ(osr_cache->TryGet(*shared, BytecodeOffset(0), isolate), *code);
+ EXPECT_EQ(osr_cache->TryGet(*shared1, BytecodeOffset(bailout_id), isolate),
+ *code1);
RunJS("%DeoptimizeFunction(f1)");
- EXPECT_TRUE(
- osr_cache->GetOptimizedCode(shared1, BytecodeOffset(bailout_id), isolate)
- .is_null());
-
- osr_cache->Set(OSROptimizedCodeCache::kCachedCodeOffset,
- HeapObjectReference::ClearedValue(isolate));
- EXPECT_TRUE(osr_cache->GetOptimizedCode(shared, BytecodeOffset(0), isolate)
+ EXPECT_TRUE(osr_cache->TryGet(*shared1, BytecodeOffset(bailout_id), isolate)
.is_null());
+
+ osr_cache->RawSetForTesting(OSROptimizedCodeCache::kCachedCodeOffset,
+ HeapObjectReference::ClearedValue(isolate));
+ EXPECT_TRUE(osr_cache->TryGet(*shared, BytecodeOffset(0), isolate).is_null());
}
TEST_F(TestWithNativeContext, MaxCapacityCache) {
@@ -177,11 +174,11 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
int bailout_id = 0;
// Add max_capacity - 1 entries.
for (bailout_id = 0; bailout_id < kMaxEntries - 1; bailout_id++) {
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ BytecodeOffset(bailout_id));
}
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
EXPECT_EQ(osr_cache->length(), kMaxLength);
// Add an entry to reach max capacity.
@@ -190,22 +187,23 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
Handle<CodeT> code1(function1->code(), isolate);
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
- BytecodeOffset(bailout_id));
- osr_cache = Handle<OSROptimizedCodeCache>(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
+ BytecodeOffset(bailout_id));
+ osr_cache =
+ Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
EXPECT_EQ(osr_cache->length(), kMaxLength);
int index = (kMaxEntries - 1) * OSROptimizedCodeCache::kEntryLength;
HeapObject object;
Smi smi;
- osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *shared1);
- osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *code1);
- osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
+ ->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
// Add an entry beyond max capacity.
@@ -215,20 +213,21 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
Handle<CodeT> code2(function2->code(), isolate);
bailout_id++;
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
- BytecodeOffset(bailout_id));
- osr_cache = Handle<OSROptimizedCodeCache>(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared2, code2,
+ BytecodeOffset(bailout_id));
+ osr_cache =
+ Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
EXPECT_EQ(osr_cache->length(), kMaxLength);
index = 0;
- osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *shared2);
- osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *code2);
- osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
+ ->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
}
@@ -249,41 +248,44 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
int expected_length = kInitialLength * 2;
int bailout_id = 0;
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ BytecodeOffset(bailout_id));
}
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
int clear_index1 = 0;
int clear_index2 = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
- osr_cache->Set(clear_index1 + OSROptimizedCodeCache::kSharedOffset,
- HeapObjectReference::ClearedValue(isolate));
- osr_cache->Set(clear_index2 + OSROptimizedCodeCache::kCachedCodeOffset,
- HeapObjectReference::ClearedValue(isolate));
+ osr_cache->RawSetForTesting(
+ clear_index1 + OSROptimizedCodeCache::kSharedOffset,
+ HeapObjectReference::ClearedValue(isolate));
+ osr_cache->RawSetForTesting(
+ clear_index2 + OSROptimizedCodeCache::kCachedCodeOffset,
+ HeapObjectReference::ClearedValue(isolate));
base::ScopedVector<char> source1(1024);
GetSource(&source1, 1);
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
Handle<CodeT> code1(function1->code(), isolate);
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
- BytecodeOffset(bailout_id));
- osr_cache = Handle<OSROptimizedCodeCache>(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
+ BytecodeOffset(bailout_id));
+ osr_cache =
+ Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
int index = clear_index1;
HeapObject object;
Smi smi;
- osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *shared1);
- osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *code1);
- osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
+ ->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
base::ScopedVector<char> source2(1024);
@@ -292,20 +294,21 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
Handle<CodeT> code2(function2->code(), isolate);
bailout_id++;
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
- BytecodeOffset(bailout_id));
- osr_cache = Handle<OSROptimizedCodeCache>(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared2, code2,
+ BytecodeOffset(bailout_id));
+ osr_cache =
+ Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
index = clear_index2;
- osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *shared2);
- osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
->GetHeapObject(&object);
EXPECT_EQ(object, *code2);
- osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
+ ->ToSmi(&smi);
EXPECT_EQ(smi.value(), bailout_id);
}
@@ -335,37 +338,45 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesNoCompact) {
int bailout_id = 0;
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
if (bailout_id == deopt_id1 || bailout_id == deopt_id2) {
- OSROptimizedCodeCache::AddOptimizedCode(
- native_context, deopt_shared, deopt_code, BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, deopt_shared,
+ deopt_code, BytecodeOffset(bailout_id));
} else {
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ BytecodeOffset(bailout_id));
}
}
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
RunJS("%DeoptimizeFunction(f1)");
- osr_cache = Handle<OSROptimizedCodeCache>(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ osr_cache =
+ Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
int index = (num_entries - 2) * OSROptimizedCodeCache::kEntryLength;
- EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
- ->IsCleared());
- EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
- ->IsCleared());
EXPECT_TRUE(
- osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->IsCleared());
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
+ ->IsCleared());
+ EXPECT_TRUE(
+ osr_cache
+ ->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->IsCleared());
+ EXPECT_TRUE(
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
+ ->IsCleared());
index = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
- EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
- ->IsCleared());
- EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
- ->IsCleared());
EXPECT_TRUE(
- osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->IsCleared());
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
+ ->IsCleared());
+ EXPECT_TRUE(
+ osr_cache
+ ->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
+ ->IsCleared());
+ EXPECT_TRUE(
+ osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
+ ->IsCleared());
}
TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
@@ -392,20 +403,20 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
int bailout_id = 0;
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
if (bailout_id % 2 == 0) {
- OSROptimizedCodeCache::AddOptimizedCode(
- native_context, deopt_shared, deopt_code, BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, deopt_shared,
+ deopt_code, BytecodeOffset(bailout_id));
} else {
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BytecodeOffset(bailout_id));
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ BytecodeOffset(bailout_id));
}
}
- Handle<OSROptimizedCodeCache> osr_cache(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
+ isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
RunJS("%DeoptimizeFunction(f1)");
- osr_cache = Handle<OSROptimizedCodeCache>(
- native_context->GetOSROptimizedCodeCache(), isolate);
+ osr_cache =
+ Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
EXPECT_EQ(osr_cache->length(), kInitialLength);
}
diff --git a/deps/v8/test/unittests/objects/swiss-hash-table-helpers-unittest.cc b/deps/v8/test/unittests/objects/swiss-hash-table-helpers-unittest.cc
new file mode 100644
index 0000000000..850b2571a3
--- /dev/null
+++ b/deps/v8/test/unittests/objects/swiss-hash-table-helpers-unittest.cc
@@ -0,0 +1,109 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/swiss-hash-table-helpers.h"
+
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock-matchers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::ElementsAre;
+
+namespace v8 {
+namespace internal {
+namespace swiss_table {
+
+template <typename T>
+class SwissTableGroupTest : public testing::Test {};
+
+using GroupTypes = testing::Types<
+#if V8_SWISS_TABLE_HAVE_SSE2_HOST
+ GroupSse2Impl,
+#endif
+ GroupSse2Polyfill, GroupPortableImpl>;
+TYPED_TEST_SUITE(SwissTableGroupTest, GroupTypes);
+
+// Tests imported from Abseil's raw_hash_set_test.cc, modified to be
+// parameterized.
+
+TYPED_TEST(SwissTableGroupTest, EmptyGroup) {
+ const ctrl_t kEmptyGroup[16] = {
+ kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
+ kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
+ };
+ for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(TypeParam{kEmptyGroup}.Match(h));
+}
+
+TYPED_TEST(SwissTableGroupTest, Match) {
+ if (TypeParam::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(TypeParam{group}.Match(0), ElementsAre());
+ EXPECT_THAT(TypeParam{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
+ EXPECT_THAT(TypeParam{group}.Match(3), ElementsAre(3, 10));
+ EXPECT_THAT(TypeParam{group}.Match(5), ElementsAre(5, 9));
+ EXPECT_THAT(TypeParam{group}.Match(7), ElementsAre(7, 8));
+ } else if (TypeParam::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(TypeParam{group}.Match(0), ElementsAre());
+ EXPECT_THAT(TypeParam{group}.Match(1), ElementsAre(1, 5, 7));
+ EXPECT_THAT(TypeParam{group}.Match(2), ElementsAre(2, 4));
+ } else {
+ FAIL() << "No test coverage for kWidth==" << TypeParam::kWidth;
+ }
+}
+
+TYPED_TEST(SwissTableGroupTest, MatchEmpty) {
+ if (TypeParam::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(TypeParam{group}.MatchEmpty(), ElementsAre(0, 4));
+ } else if (TypeParam::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(TypeParam{group}.MatchEmpty(), ElementsAre(0));
+ } else {
+ FAIL() << "No test coverage for kWidth==" << TypeParam::kWidth;
+ }
+}
+
+TYPED_TEST(SwissTableGroupTest, MatchEmptyOrDeleted) {
+ if (TypeParam::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(TypeParam{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
+ } else if (TypeParam::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(TypeParam{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
+ } else {
+ FAIL() << "No test coverage for kWidth==" << TypeParam::kWidth;
+ }
+}
+
+TYPED_TEST(SwissTableGroupTest, CountLeadingEmptyOrDeleted) {
+ const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
+ const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
+
+ for (ctrl_t empty : empty_examples) {
+ std::vector<ctrl_t> e(TypeParam::kWidth, empty);
+ EXPECT_EQ(TypeParam::kWidth,
+ TypeParam{e.data()}.CountLeadingEmptyOrDeleted());
+ for (ctrl_t full : full_examples) {
+ for (size_t i = 0; i != TypeParam::kWidth; ++i) {
+ std::vector<ctrl_t> f(TypeParam::kWidth, empty);
+ f[i] = full;
+ EXPECT_EQ(i, TypeParam{f.data()}.CountLeadingEmptyOrDeleted());
+ }
+ std::vector<ctrl_t> f(TypeParam::kWidth, empty);
+ f[TypeParam::kWidth * 2 / 3] = full;
+ f[TypeParam::kWidth / 2] = full;
+ EXPECT_EQ(TypeParam::kWidth / 2,
+ TypeParam{f.data()}.CountLeadingEmptyOrDeleted());
+ }
+ }
+}
+
+} // namespace swiss_table
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index d3bec38c40..41c4a5fb33 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -2334,6 +2334,23 @@ TEST_F(ValueSerializerTest, DecodeTypedArrayBackwardsCompatiblity) {
});
}
+TEST_F(ValueSerializerTest, DecodeTypedArrayBrokenData) {
+ // Test decoding the broken data where the version is 13 but the
+ // JSArrayBufferView flags are present.
+
+ // The data below is produced by the following code + changing the version
+ // to 13:
+ // std::vector<uint8_t> encoded =
+ // EncodeTest("({ a: new Uint8Array(), b: 13 })");
+
+ Local<Value> value = DecodeTest({0xFF, 0xD, 0x6F, 0x22, 0x1, 0x61, 0x42,
+ 0x0, 0x56, 0x42, 0x0, 0x0, 0xE8, 0x47,
+ 0x22, 0x1, 0x62, 0x49, 0x1A, 0x7B, 0x2});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("Object.getPrototypeOf(result.a) === Uint8Array.prototype");
+ ExpectScriptTrue("result.b === 13");
+}
+
TEST_F(ValueSerializerTest, DecodeInvalidTypedArray) {
// Byte offset out of range.
InvalidDecodeTest(
@@ -2466,14 +2483,14 @@ class ValueSerializerTestWithSharedArrayBufferClone
return sab;
}
- static void SetUpTestCase() {
+ static void SetUpTestSuite() {
flag_was_enabled_ = i::FLAG_harmony_sharedarraybuffer;
i::FLAG_harmony_sharedarraybuffer = true;
- ValueSerializerTest::SetUpTestCase();
+ ValueSerializerTest::SetUpTestSuite();
}
- static void TearDownTestCase() {
- ValueSerializerTest::TearDownTestCase();
+ static void TearDownTestSuite() {
+ ValueSerializerTest::TearDownTestSuite();
i::FLAG_harmony_sharedarraybuffer = flag_was_enabled_;
flag_was_enabled_ = false;
}
@@ -2923,14 +2940,14 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
}
protected:
- static void SetUpTestCase() {
+ static void SetUpTestSuite() {
g_saved_flag = i::FLAG_expose_wasm;
i::FLAG_expose_wasm = true;
- ValueSerializerTest::SetUpTestCase();
+ ValueSerializerTest::SetUpTestSuite();
}
- static void TearDownTestCase() {
- ValueSerializerTest::TearDownTestCase();
+ static void TearDownTestSuite() {
+ ValueSerializerTest::TearDownTestSuite();
i::FLAG_expose_wasm = g_saved_flag;
g_saved_flag = false;
}
@@ -3281,5 +3298,20 @@ TEST_F(ValueSerializerTest, NonStringErrorStack) {
EXPECT_TRUE(stack->IsUndefined());
}
+TEST_F(ValueSerializerTest, InvalidLegacyFormatData) {
+ std::vector<uint8_t> data = {0xFF, 0x0, 0xDE, 0xAD, 0xDA, 0xDA};
+ Local<Context> context = deserialization_context();
+ Context::Scope scope(context);
+ TryCatch try_catch(isolate());
+ ValueDeserializer deserializer(isolate(), &data[0],
+ static_cast<int>(data.size()),
+ GetDeserializerDelegate());
+ deserializer.SetSupportsLegacyWireFormat(true);
+ BeforeDecode(&deserializer);
+ CHECK(deserializer.ReadHeader(context).FromMaybe(false));
+ CHECK(deserializer.ReadValue(context).IsEmpty());
+ CHECK(try_catch.HasCaught());
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index 8437ac0acb..fba903688d 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -8,34 +8,21 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-initialization.h"
#include "src/base/compiler-specific.h"
+#include "src/base/page-allocator.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace {
-class DefaultPlatformEnvironment final : public ::testing::Environment {
+class CppGCEnvironment final : public ::testing::Environment {
public:
- DefaultPlatformEnvironment() = default;
-
void SetUp() override {
- platform_ = v8::platform::NewDefaultPlatform(
- 0, v8::platform::IdleTaskSupport::kEnabled);
- ASSERT_TRUE(platform_.get() != nullptr);
- v8::V8::InitializePlatform(platform_.get());
-#ifdef V8_SANDBOX
- ASSERT_TRUE(v8::V8::InitializeSandbox());
-#endif
- cppgc::InitializeProcess(platform_->GetPageAllocator());
- v8::V8::Initialize();
- }
-
- void TearDown() override {
- ASSERT_TRUE(platform_.get() != nullptr);
- v8::V8::Dispose();
- v8::V8::DisposePlatform();
+ // Initialize the process for cppgc with an arbitrary page allocator. This
+ // has to survive as long as the process, so it's ok to leak the allocator
+ // here.
+ cppgc::InitializeProcess(new v8::base::PageAllocator());
}
- private:
- std::unique_ptr<v8::Platform> platform_;
+ void TearDown() override { cppgc::ShutdownProcess(); }
};
} // namespace
@@ -50,7 +37,7 @@ int main(int argc, char** argv) {
testing::FLAGS_gtest_death_test_style = "threadsafe";
testing::InitGoogleMock(&argc, argv);
- testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
+ testing::AddGlobalTestEnvironment(new CppGCEnvironment);
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::V8::InitializeICUDefaultLocation(argv[0]);
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index 0bdf866ae0..5ad5279a72 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -35,14 +35,14 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
AccountingAllocator* allocator() { return allocator_; }
- static void SetUpTestCase() {
+ static void SetUpTestSuite() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
- TestWithNativeContext::SetUpTestCase();
+ TestWithNativeContext::SetUpTestSuite();
}
- static void TearDownTestCase() {
- TestWithNativeContext::TearDownTestCase();
+ static void TearDownTestSuite() {
+ TestWithNativeContext::TearDownTestSuite();
CHECK_NOT_NULL(save_flags_);
delete save_flags_;
save_flags_ = nullptr;
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index eef418113a..2f6fc83f7a 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -83,5 +83,23 @@ SaveFlags::~SaveFlags() {
#undef FLAG_MODE_APPLY
}
+ManualGCScope::ManualGCScope(i::Isolate* isolate) {
+ // Some tests run threaded (back-to-back) and thus the GC may already be
+ // running by the time a ManualGCScope is created. Finalizing existing marking
+ // prevents any undefined/unexpected behavior.
+ if (isolate && isolate->heap()->incremental_marking()->IsMarking()) {
+ isolate->heap()->CollectGarbage(i::OLD_SPACE,
+ i::GarbageCollectionReason::kTesting);
+ }
+
+ i::FLAG_concurrent_marking = false;
+ i::FLAG_concurrent_sweeping = false;
+ i::FLAG_stress_incremental_marking = false;
+ i::FLAG_stress_concurrent_allocation = false;
+ // Parallel marking has a dependency on concurrent marking.
+ i::FLAG_parallel_marking = false;
+ i::FLAG_detect_ineffective_gcs_near_heap_limit = false;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index cac8980b68..dec5c25209 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -8,6 +8,7 @@
#include <memory>
#include <vector>
+#include "include/libplatform/libplatform.h"
#include "include/v8-array-buffer.h"
#include "include/v8-context.h"
#include "include/v8-local-handle.h"
@@ -26,6 +27,32 @@ namespace v8 {
class ArrayBufferAllocator;
+template <typename TMixin>
+class WithDefaultPlatformMixin : public TMixin {
+ public:
+ WithDefaultPlatformMixin() {
+ platform_ = v8::platform::NewDefaultPlatform(
+ 0, v8::platform::IdleTaskSupport::kEnabled);
+ CHECK_NOT_NULL(platform_.get());
+ v8::V8::InitializePlatform(platform_.get());
+#ifdef V8_SANDBOX
+ CHECK(v8::V8::InitializeSandbox());
+#endif // V8_SANDBOX
+ v8::V8::Initialize();
+ }
+
+ ~WithDefaultPlatformMixin() {
+ CHECK_NOT_NULL(platform_.get());
+ v8::V8::Dispose();
+ v8::V8::DisposePlatform();
+ }
+
+ v8::Platform* platform() const { return platform_.get(); }
+
+ private:
+ std::unique_ptr<v8::Platform> platform_;
+};
+
using CounterMap = std::map<std::string, int>;
enum CountersMode { kNoCounters, kEnableCounters };
@@ -74,7 +101,48 @@ class WithIsolateScopeMixin : public TMixin {
return reinterpret_cast<v8::internal::Isolate*>(this->v8_isolate());
}
+ Local<Value> RunJS(const char* source) {
+ return RunJS(
+ v8::String::NewFromUtf8(this->v8_isolate(), source).ToLocalChecked());
+ }
+
+ Local<Value> RunJS(v8::String::ExternalOneByteStringResource* source) {
+ return RunJS(v8::String::NewExternalOneByte(this->v8_isolate(), source)
+ .ToLocalChecked());
+ }
+
+ void CollectGarbage(i::AllocationSpace space) {
+ i_isolate()->heap()->CollectGarbage(space,
+ i::GarbageCollectionReason::kTesting);
+ }
+
+ void CollectAllGarbage() {
+ i_isolate()->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
+ }
+
+ void CollectAllAvailableGarbage() {
+ i_isolate()->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kTesting);
+ }
+
+ void PreciseCollectAllGarbage() {
+ i_isolate()->heap()->PreciseCollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
+ }
+
+ v8::Local<v8::String> NewString(const char* string) {
+ return v8::String::NewFromUtf8(this->v8_isolate(), string).ToLocalChecked();
+ }
+
private:
+ Local<Value> RunJS(Local<String> source) {
+ auto context = this->v8_isolate()->GetCurrentContext();
+ Local<Script> script =
+ v8::Script::Compile(context, source).ToLocalChecked();
+ return script->Run(context).ToLocalChecked();
+ }
+
v8::Isolate::Scope isolate_scope_;
v8::HandleScope handle_scope_;
};
@@ -90,53 +158,38 @@ class WithContextMixin : public TMixin {
const Local<Context>& context() const { return v8_context(); }
const Local<Context>& v8_context() const { return context_; }
- Local<Value> RunJS(const char* source) {
- return RunJS(
- v8::String::NewFromUtf8(this->v8_isolate(), source).ToLocalChecked());
- }
-
- Local<Value> RunJS(v8::String::ExternalOneByteStringResource* source) {
- return RunJS(v8::String::NewExternalOneByte(this->v8_isolate(), source)
- .ToLocalChecked());
- }
-
- v8::Local<v8::String> NewString(const char* string) {
- return v8::String::NewFromUtf8(this->v8_isolate(), string).ToLocalChecked();
- }
-
void SetGlobalProperty(const char* name, v8::Local<v8::Value> value) {
CHECK(v8_context()
->Global()
- ->Set(v8_context(), NewString(name), value)
+ ->Set(v8_context(), TMixin::NewString(name), value)
.FromJust());
}
private:
- Local<Value> RunJS(Local<String> source) {
- auto context = this->v8_isolate()->GetCurrentContext();
- Local<Script> script =
- v8::Script::Compile(context, source).ToLocalChecked();
- return script->Run(context).ToLocalChecked();
- }
-
v8::Local<v8::Context> context_;
v8::Context::Scope context_scope_;
};
+using TestWithPlatform = //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>;
+
// Use v8::internal::TestWithIsolate if you are testing internals,
// aka. directly work with Handles.
-using TestWithIsolate = //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
- ::testing::Test>>;
+using TestWithIsolate = //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>>>;
// Use v8::internal::TestWithNativeContext if you are testing internals,
// aka. directly work with Handles.
-using TestWithContext = //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
- ::testing::Test>>>;
+using TestWithContext = //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>>>>;
namespace internal {
@@ -196,43 +249,50 @@ class WithZoneMixin : public TMixin {
Zone zone_;
};
-using TestWithIsolate = //
- WithInternalIsolateMixin< //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
- ::testing::Test>>>;
-
-using TestWithZone = WithZoneMixin<::testing::Test>;
-
-using TestWithIsolateAndZone = //
- WithZoneMixin< //
- WithInternalIsolateMixin< //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
+using TestWithIsolate = //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
::testing::Test>>>>;
-using TestWithNativeContext = //
- WithInternalIsolateMixin< //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
- ::testing::Test>>>>;
+using TestWithZone = WithZoneMixin<WithDefaultPlatformMixin< //
+ ::testing::Test>>;
-using TestWithNativeContextAndCounters = //
- WithInternalIsolateMixin< //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
- ::testing::Test, kEnableCounters>>>>;
-
-using TestWithNativeContextAndZone = //
- WithZoneMixin< //
- WithInternalIsolateMixin< //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithIsolateMixin< //
+using TestWithIsolateAndZone = //
+ WithZoneMixin< //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
::testing::Test>>>>>;
+using TestWithNativeContext = //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>>>>>;
+
+using TestWithNativeContextAndCounters = //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>,
+ kEnableCounters>>>>;
+
+using TestWithNativeContextAndZone = //
+ WithZoneMixin< //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithDefaultPlatformMixin< //
+ ::testing::Test>>>>>>;
+
class V8_NODISCARD SaveFlags {
public:
SaveFlags();
@@ -254,6 +314,16 @@ inline void PrintTo(Smi o, ::std::ostream* os) {
*os << reinterpret_cast<void*>(o.ptr());
}
+// ManualGCScope allows for disabling GC heuristics. This is useful for tests
+// that want to check specific corner cases around GC.
+//
+// The scope will finalize any ongoing GC on the provided Isolate.
+class V8_NODISCARD ManualGCScope final : private SaveFlags {
+ public:
+ explicit ManualGCScope(i::Isolate* isolate);
+ ~ManualGCScope() = default;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/testcfg.py b/deps/v8/test/unittests/testcfg.py
index d1e0968724..c3aadd8402 100644
--- a/deps/v8/test/unittests/testcfg.py
+++ b/deps/v8/test/unittests/testcfg.py
@@ -2,9 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import os
from testrunner.local import command
diff --git a/deps/v8/test/unittests/utils/allocation-unittest.cc b/deps/v8/test/unittests/utils/allocation-unittest.cc
index b6f8d8699f..af0eb5a612 100644
--- a/deps/v8/test/unittests/utils/allocation-unittest.cc
+++ b/deps/v8/test/unittests/utils/allocation-unittest.cc
@@ -4,6 +4,8 @@
#include "src/utils/allocation.h"
+#include "test/unittests/test-utils.h"
+
#if V8_OS_POSIX
#include <setjmp.h>
#include <signal.h>
@@ -29,7 +31,7 @@ namespace {
// We don't test the execution permission because to do so we'd have to
// dynamically generate code and test if we can execute it.
-class MemoryAllocationPermissionsTest : public ::testing::Test {
+class MemoryAllocationPermissionsTest : public TestWithPlatform {
static void SignalHandler(int signal, siginfo_t* info, void*) {
siglongjmp(continuation_, 1);
}
@@ -127,9 +129,9 @@ TEST_F(MemoryAllocationPermissionsTest, DoTest) {
// Basic tests of allocation.
-class AllocationTest : public ::testing::Test {};
+class AllocationTest : public TestWithPlatform {};
-TEST(AllocationTest, AllocateAndFree) {
+TEST_F(AllocationTest, AllocateAndFree) {
size_t page_size = v8::internal::AllocatePageSize();
CHECK_NE(0, page_size);
@@ -154,7 +156,7 @@ TEST(AllocationTest, AllocateAndFree) {
v8::internal::FreePages(page_allocator, aligned_mem_addr, kAllocationSize);
}
-TEST(AllocationTest, ReserveMemory) {
+TEST_F(AllocationTest, ReserveMemory) {
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
size_t page_size = v8::internal::AllocatePageSize();
const size_t kAllocationSize = 1 * v8::internal::MB;
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index b2afc18bc5..02f741f6e7 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -8,6 +8,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/canonical-types.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
@@ -89,6 +90,7 @@ class TestModuleBuilder {
byte AddSignature(const FunctionSig* sig, uint32_t supertype = kNoSuperType) {
mod.add_signature(sig, supertype);
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
+ GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
}
byte AddFunction(const FunctionSig* sig, bool declared = true) {
@@ -131,12 +133,14 @@ class TestModuleBuilder {
type_builder.AddField(field.first, field.second);
}
mod.add_struct_type(type_builder.Build(), supertype);
+ GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
}
byte AddArray(ValueType type, bool mutability) {
ArrayType* array = mod.signature_zone->New<ArrayType>(type, mutability);
mod.add_array_type(array, kNoSuperType);
+ GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
}
@@ -340,7 +344,7 @@ class FunctionBodyDecoderTestBase : public WithZoneMixin<BaseTest> {
}
};
-using FunctionBodyDecoderTest = FunctionBodyDecoderTestBase<::testing::Test>;
+using FunctionBodyDecoderTest = FunctionBodyDecoderTestBase<TestWithPlatform>;
TEST_F(FunctionBodyDecoderTest, Int32Const1) {
byte code[] = {kExprI32Const, 0};
@@ -3639,32 +3643,6 @@ TEST_F(FunctionBodyDecoderTest, StructNewDefaultWithRtt) {
}
}
-TEST_F(FunctionBodyDecoderTest, NominalStructSubtyping) {
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- byte structural_type = builder.AddStruct({F(kWasmI32, true)});
- byte nominal_type = builder.AddStruct({F(kWasmI32, true)});
- AddLocals(optref(structural_type), 1);
- AddLocals(optref(nominal_type), 1);
- // Try to assign a nominally-typed value to a structurally-typed local.
- ExpectFailure(sigs.v_v(),
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT(nominal_type))},
- kAppendEnd, "expected type (ref null 0)");
- // Try to assign a structurally-typed value to a nominally-typed local.
- ExpectFailure(sigs.v_v(),
- {WASM_LOCAL_SET(
- 1, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- structural_type, WASM_RTT_CANON(structural_type)))},
- kAppendEnd, "expected type (ref null 1)");
- // But assigning to the correctly typed local works.
- ExpectValidates(sigs.v_v(),
- {WASM_LOCAL_SET(1, WASM_STRUCT_NEW_DEFAULT(nominal_type))});
- ExpectValidates(sigs.v_v(),
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
- structural_type,
- WASM_RTT_CANON(structural_type)))});
-}
-
TEST_F(FunctionBodyDecoderTest, DefaultableLocal) {
WASM_FEATURE_SCOPE(typed_funcref);
AddLocals(kWasmAnyRef, 1);
@@ -5164,7 +5142,8 @@ TEST_F(BytecodeIteratorTest, WithLocalDecls) {
******************************************************************************/
class FunctionBodyDecoderTestOnBothMemoryTypes
- : public FunctionBodyDecoderTestBase<::testing::TestWithParam<MemoryType>> {
+ : public FunctionBodyDecoderTestBase<
+ WithDefaultPlatformMixin<::testing::TestWithParam<MemoryType>>> {
public:
bool is_memory64() const { return GetParam() == kMemory64; }
};
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index f33a8d8c1d..ef6cc8bcd8 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -166,10 +166,12 @@ namespace module_decoder_unittest {
} \
} while (false)
-#define EXPECT_NOT_OK(result, msg) \
- do { \
- EXPECT_FALSE(result.ok()); \
- EXPECT_THAT(result.error().message(), HasSubstr(msg)); \
+#define EXPECT_NOT_OK(result, msg) \
+ do { \
+ EXPECT_FALSE(result.ok()); \
+ if (!result.ok()) { \
+ EXPECT_THAT(result.error().message(), HasSubstr(msg)); \
+ } \
} while (false)
static size_t SizeOfVarInt(size_t value) {
@@ -803,7 +805,7 @@ TEST_F(WasmModuleVerifyTest, RttCanonGlobalTypeError) {
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(2),
WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI64Code, true))),
SECTION(Global, ENTRY_COUNT(1), WASM_RTT(0), 1, WASM_RTT_CANON(1),
kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
@@ -1189,6 +1191,46 @@ TEST_F(WasmModuleVerifyTest, InvalidArrayTypeDef) {
EXPECT_VERIFIES(immutable);
}
+TEST_F(WasmModuleVerifyTest, TypeCanonicalization) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(wasm_type_canonicalization);
+ static const byte identical_group[] = {
+ SECTION(Type, // --
+ ENTRY_COUNT(2), // two identical rec. groups
+ kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1), // --
+ kWasmArrayTypeCode, kI32Code, 0, // --
+ kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1), // --
+ kWasmArrayTypeCode, kI32Code, 0),
+ SECTION(Global, // --
+ ENTRY_COUNT(1), kRefCode, 0, 0, // Type, mutability
+ WASM_ARRAY_INIT_STATIC(1, 1, WASM_I32V(10)),
+ kExprEnd) // Init. expression
+ };
+
+ // Global initializer should verify as identical type in other group
+ EXPECT_VERIFIES(identical_group);
+
+ static const byte non_identical_group[] = {
+ SECTION(Type, // --
+ ENTRY_COUNT(2), // two distrinct rec. groups
+ kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1), // --
+ kWasmArrayTypeCode, kI32Code, 0, // --
+ kWasmRecursiveTypeGroupCode, ENTRY_COUNT(2), // --
+ kWasmArrayTypeCode, kI32Code, 0, // --
+ kWasmStructTypeCode, ENTRY_COUNT(0)),
+ SECTION(Global, // --
+ ENTRY_COUNT(1), kRefCode, 0, 0, // Type, mutability
+ WASM_ARRAY_INIT_STATIC(1, 1, WASM_I32V(10)),
+ kExprEnd) // Init. expression
+ };
+
+ // Global initializer should not verify as type in distinct rec. group.
+ EXPECT_FAILURE_WITH_MSG(
+ non_identical_group,
+ "type error in init. expression[0] (expected (ref 0), got (ref 1))");
+}
+
TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -3389,13 +3431,13 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
EXPECT_NOT_OK(result, "data segments count 0 mismatch (1 expected)");
}
-/* TODO(7748): Add support for rec. groups.
TEST_F(WasmModuleVerifyTest, GcStructIdsPass) {
WASM_FEATURE_SCOPE(gc);
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {SECTION(
- Type, ENTRY_COUNT(3),
+ Type, ENTRY_COUNT(1), // One recursive group...
+ kWasmRecursiveTypeGroupCode, ENTRY_COUNT(3), // with three entries.
WASM_STRUCT_DEF(FIELD_COUNT(3), STRUCT_FIELD(kI32Code, true),
STRUCT_FIELD(WASM_OPT_REF(0), true),
STRUCT_FIELD(WASM_OPT_REF(1), true)),
@@ -3404,7 +3446,7 @@ TEST_F(WasmModuleVerifyTest, GcStructIdsPass) {
WASM_ARRAY_DEF(WASM_OPT_REF(0), true))};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
-}*/
+}
TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInGlobal) {
WASM_FEATURE_SCOPE(typed_funcref);
@@ -3424,7 +3466,6 @@ TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInType) {
EXPECT_NOT_OK(result, "Type index 1 is out of bounds");
}
-// TODO(7748): Add support for rec. groups.
TEST_F(WasmModuleVerifyTest, ForwardSupertype) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
diff --git a/deps/v8/test/unittests/wasm/subtyping-unittest.cc b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
index 2dea26453a..9ad4731415 100644
--- a/deps/v8/test/unittests/wasm/subtyping-unittest.cc
+++ b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/wasm/canonical-types.h"
#include "src/wasm/wasm-subtyping.h"
+#include "test/common/flag-utils.h"
#include "test/common/wasm/flag-utils.h"
#include "test/unittests/test-utils.h"
@@ -11,7 +13,7 @@ namespace internal {
namespace wasm {
namespace subtyping_unittest {
-class WasmSubtypingTest : public ::testing::Test {};
+class WasmSubtypingTest : public TestWithPlatform {};
using FieldInit = std::pair<ValueType, bool>;
constexpr ValueType ref(uint32_t index) {
@@ -25,29 +27,41 @@ FieldInit mut(ValueType type) { return FieldInit(type, true); }
FieldInit immut(ValueType type) { return FieldInit(type, false); }
void DefineStruct(WasmModule* module, std::initializer_list<FieldInit> fields,
- uint32_t supertype = kNoSuperType) {
+ uint32_t supertype = kNoSuperType,
+ bool in_singleton_rec_group = true) {
StructType::Builder builder(module->signature_zone.get(),
static_cast<uint32_t>(fields.size()));
for (FieldInit field : fields) {
builder.AddField(field.first, field.second);
}
- return module->add_struct_type(builder.Build(), supertype);
+ module->add_struct_type(builder.Build(), supertype);
+ if (in_singleton_rec_group) {
+ GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
+ }
}
void DefineArray(WasmModule* module, FieldInit element_type,
- uint32_t supertype = kNoSuperType) {
+ uint32_t supertype = kNoSuperType,
+ bool in_singleton_rec_group = true) {
module->add_array_type(module->signature_zone->New<ArrayType>(
element_type.first, element_type.second),
supertype);
+ if (in_singleton_rec_group) {
+ GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
+ }
}
void DefineSignature(WasmModule* module,
std::initializer_list<ValueType> params,
std::initializer_list<ValueType> returns,
- uint32_t supertype = kNoSuperType) {
+ uint32_t supertype = kNoSuperType,
+ bool in_singleton_rec_group = true) {
module->add_signature(
FunctionSig::Build(module->signature_zone.get(), returns, params),
supertype);
+ if (in_singleton_rec_group) {
+ GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
+ }
}
TEST_F(WasmSubtypingTest, Subtyping) {
@@ -79,6 +93,37 @@ TEST_F(WasmSubtypingTest, Subtyping) {
/* 14 */ DefineSignature(module, {ref(0)}, {kWasmI32}, 13);
/* 15 */ DefineSignature(module, {ref(0)}, {ref(4)}, 16);
/* 16 */ DefineSignature(module, {ref(0)}, {ref(0)});
+ /* 17 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(17))});
+
+ // Rec. group.
+ /* 18 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(17))}, 17,
+ false);
+ /* 19 */ DefineArray(module, {mut(optRef(21))}, kNoSuperType, false);
+ /* 20 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
+ false);
+ /* 21 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false);
+ GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
+
+ // Identical rec. group.
+ /* 22 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(17))}, 17,
+ false);
+ /* 23 */ DefineArray(module, {mut(optRef(25))}, kNoSuperType, false);
+ /* 24 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
+ false);
+ /* 25 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 24, false);
+ GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
+
+ // Nonidentical rec. group: the last function extends a type outside the
+ // recursive group.
+ /* 26 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(17))}, 17,
+ false);
+ /* 27 */ DefineArray(module, {mut(optRef(29))}, kNoSuperType, false);
+ /* 28 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
+ false);
+ /* 29 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false);
+ GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
+
+ /* 30 */ DefineStruct(module, {mut(kWasmI32), immut(optRef(18))}, 18);
}
constexpr ValueType numeric_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
@@ -88,6 +133,7 @@ TEST_F(WasmSubtypingTest, Subtyping) {
optRef(0), ref(0), optRef(2),
ref(2), optRef(11), ref(11)};
+// Some macros to help managing types and modules.
#define SUBTYPE(type1, type2) \
EXPECT_TRUE(IsSubtypeOf(type1, type2, module1, module))
#define SUBTYPE_IFF(type1, type2, condition) \
@@ -102,10 +148,20 @@ TEST_F(WasmSubtypingTest, Subtyping) {
#define NOT_VALID_SUBTYPE(type1, type2) \
EXPECT_FALSE(ValidSubtypeDefinition(type1.ref_index(), type2.ref_index(), \
module1, module));
+#define IDENTICAL(index1, index2) \
+ EXPECT_TRUE(EquivalentTypes(ValueType::Ref(index1, kNullable), \
+ ValueType::Ref(index2, kNullable), module1, \
+ module));
+#define DISTINCT(index1, index2) \
+ EXPECT_FALSE(EquivalentTypes(ValueType::Ref(index1, kNullable), \
+ ValueType::Ref(index2, kNullable), module1, \
+ module));
+
+ for (WasmModule* module : {module1, module2}) {
+ // For cross module subtyping, we need to enable type canonicalization.
+ // Type judgements across modules should work the same as within one module.
+ FLAG_VALUE_SCOPE(wasm_type_canonicalization, module == module2);
- // Type judgements across modules should work the same as within one module.
- // TODO(7748): add module2 once we have a cross-module story.
- for (WasmModule* module : {module1 /* , module2 */}) {
// Value types are unrelated, except if they are equal.
for (ValueType subtype : numeric_types) {
for (ValueType supertype : numeric_types) {
@@ -183,9 +239,6 @@ TEST_F(WasmSubtypingTest, Subtyping) {
SUBTYPE(ValueType::Rtt(5), ValueType::Rtt(5));
// Rtts of unrelated types are unrelated.
NOT_SUBTYPE(ValueType::Rtt(1), ValueType::Rtt(2));
- // Rtts of identical types are subtype-related.
- // TODO(7748): Implement type canonicalization.
- // SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(9));
// Rtts of subtypes are not related.
NOT_SUBTYPE(ValueType::Rtt(1), ValueType::Rtt(0));
@@ -201,10 +254,42 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Identical types are subtype-related.
VALID_SUBTYPE(ref(10), ref(10));
VALID_SUBTYPE(ref(11), ref(11));
+
+ {
+ // Canonicalization tests.
+ FLAG_SCOPE(wasm_type_canonicalization);
+
+ // Groups should only be canonicalized to identical groups.
+ IDENTICAL(18, 22);
+ IDENTICAL(19, 23);
+ IDENTICAL(20, 24);
+ IDENTICAL(21, 25);
+
+ DISTINCT(18, 26);
+ DISTINCT(19, 27);
+ DISTINCT(20, 28);
+ DISTINCT(21, 29);
+
+ // A type should not be canonicalized to an identical one with a different
+ // group structure.
+ DISTINCT(18, 17);
+
+ // A subtype should also be subtype of an equivalent type.
+ VALID_SUBTYPE(ref(30), ref(18));
+ VALID_SUBTYPE(ref(30), ref(22));
+ NOT_SUBTYPE(ref(30), ref(26));
+
+ // Rtts of identical types are subtype-related.
+ SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(17));
+ }
}
#undef SUBTYPE
#undef NOT_SUBTYPE
#undef SUBTYPE_IFF
+#undef VALID_SUBTYPE
+#undef NOT_VALID_SUBTYPE
+#undef IDENTICAL
+#undef DISTINCT
}
} // namespace subtyping_unittest
diff --git a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
index b90fafeee9..53522f3a90 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
@@ -9,6 +9,7 @@
#include "src/base/page-allocator.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/allocation.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -23,7 +24,7 @@ i::Address g_start_address;
// on if V8 doesn't handle the exception. This allows tools like ASan to
// register a handler early on during the process startup and still generate
// stack traces on failures.
-class ExceptionHandlerFallbackTest : public ::testing::Test {
+class ExceptionHandlerFallbackTest : public v8::TestWithPlatform {
protected:
void SetUp() override {
// Register this handler as the last handler.
diff --git a/deps/v8/test/unittests/zone/zone-allocator-unittest.cc b/deps/v8/test/unittests/zone/zone-allocator-unittest.cc
index c0c18843ac..6f43d34983 100644
--- a/deps/v8/test/unittests/zone/zone-allocator-unittest.cc
+++ b/deps/v8/test/unittests/zone/zone-allocator-unittest.cc
@@ -3,12 +3,16 @@
// found in the LICENSE file.
#include "src/zone/zone-allocator.h"
+
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
-TEST(RecyclingZoneAllocator, ReuseSameSize) {
+class RecyclingZoneAllocatorTest : public TestWithPlatform {};
+
+TEST_F(RecyclingZoneAllocatorTest, ReuseSameSize) {
AccountingAllocator accounting_allocator;
Zone zone(&accounting_allocator, ZONE_NAME);
RecyclingZoneAllocator<int> zone_allocator(&zone);
@@ -18,7 +22,7 @@ TEST(RecyclingZoneAllocator, ReuseSameSize) {
CHECK_EQ(zone_allocator.allocate(10), allocated);
}
-TEST(RecyclingZoneAllocator, ReuseSmallerSize) {
+TEST_F(RecyclingZoneAllocatorTest, ReuseSmallerSize) {
AccountingAllocator accounting_allocator;
Zone zone(&accounting_allocator, ZONE_NAME);
RecyclingZoneAllocator<int> zone_allocator(&zone);
@@ -28,7 +32,7 @@ TEST(RecyclingZoneAllocator, ReuseSmallerSize) {
CHECK_EQ(zone_allocator.allocate(10), allocated);
}
-TEST(RecyclingZoneAllocator, DontReuseTooSmallSize) {
+TEST_F(RecyclingZoneAllocatorTest, DontReuseTooSmallSize) {
AccountingAllocator accounting_allocator;
Zone zone(&accounting_allocator, ZONE_NAME);
RecyclingZoneAllocator<int> zone_allocator(&zone);
@@ -40,7 +44,7 @@ TEST(RecyclingZoneAllocator, DontReuseTooSmallSize) {
CHECK_NE(zone_allocator.allocate(1), allocated);
}
-TEST(RecyclingZoneAllocator, ReuseMultipleSize) {
+TEST_F(RecyclingZoneAllocatorTest, ReuseMultipleSize) {
AccountingAllocator accounting_allocator;
Zone zone(&accounting_allocator, ZONE_NAME);
RecyclingZoneAllocator<int> zone_allocator(&zone);
@@ -56,7 +60,7 @@ TEST(RecyclingZoneAllocator, ReuseMultipleSize) {
CHECK_EQ(zone_allocator.allocate(10), allocated1);
}
-TEST(RecyclingZoneAllocator, DontChainSmallerSizes) {
+TEST_F(RecyclingZoneAllocatorTest, DontChainSmallerSizes) {
AccountingAllocator accounting_allocator;
Zone zone(&accounting_allocator, ZONE_NAME);
RecyclingZoneAllocator<int> zone_allocator(&zone);
diff --git a/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
index d982c1e0ec..697d29c28f 100644
--- a/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
+++ b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
@@ -6,6 +6,7 @@
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -13,7 +14,9 @@ namespace internal {
const size_t kItemCount = size_t(1) << 10;
-TEST(ZoneChunkList, ForwardIterationTest) {
+class ZoneChunkListTest : public TestWithPlatform {};
+
+TEST_F(ZoneChunkListTest, ForwardIterationTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -33,7 +36,7 @@ TEST(ZoneChunkList, ForwardIterationTest) {
EXPECT_EQ(count, kItemCount);
}
-TEST(ZoneChunkList, ReverseIterationTest) {
+TEST_F(ZoneChunkListTest, ReverseIterationTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -53,7 +56,7 @@ TEST(ZoneChunkList, ReverseIterationTest) {
EXPECT_EQ(count, kItemCount);
}
-TEST(ZoneChunkList, PushFrontTest) {
+TEST_F(ZoneChunkListTest, PushFrontTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -73,7 +76,7 @@ TEST(ZoneChunkList, PushFrontTest) {
EXPECT_EQ(count, kItemCount);
}
-TEST(ZoneChunkList, RewindTest) {
+TEST_F(ZoneChunkListTest, RewindTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -120,7 +123,7 @@ TEST(ZoneChunkList, RewindTest) {
EXPECT_EQ(count, zone_chunk_list.size());
}
-TEST(ZoneChunkList, FindTest) {
+TEST_F(ZoneChunkListTest, FindTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -139,7 +142,7 @@ TEST(ZoneChunkList, FindTest) {
EXPECT_EQ(*zone_chunk_list.Find(index), 42u);
}
-TEST(ZoneChunkList, CopyToTest) {
+TEST_F(ZoneChunkListTest, CopyToTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -158,7 +161,7 @@ TEST(ZoneChunkList, CopyToTest) {
}
}
-TEST(ZoneChunkList, SmallCopyToTest) {
+TEST_F(ZoneChunkListTest, SmallCopyToTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -182,7 +185,7 @@ struct Fubar {
size_t b_;
};
-TEST(ZoneChunkList, BigCopyToTest) {
+TEST_F(ZoneChunkListTest, BigCopyToTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -214,7 +217,7 @@ void TestForwardIterationOfConstList(
EXPECT_EQ(count, kItemCount);
}
-TEST(ZoneChunkList, ConstForwardIterationTest) {
+TEST_F(ZoneChunkListTest, ConstForwardIterationTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -227,7 +230,7 @@ TEST(ZoneChunkList, ConstForwardIterationTest) {
TestForwardIterationOfConstList(zone_chunk_list);
}
-TEST(ZoneChunkList, RewindAndIterate) {
+TEST_F(ZoneChunkListTest, RewindAndIterate) {
// Regression test for https://bugs.chromium.org/p/v8/issues/detail?id=7478
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -267,7 +270,7 @@ TEST(ZoneChunkList, RewindAndIterate) {
}
}
-TEST(ZoneChunkList, PushBackPopBackSize) {
+TEST_F(ZoneChunkListTest, PushBackPopBackSize) {
// Regression test for https://bugs.chromium.org/p/v8/issues/detail?id=7489
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -280,7 +283,7 @@ TEST(ZoneChunkList, PushBackPopBackSize) {
CHECK_EQ(size_t(0), zone_chunk_list.size());
}
-TEST(ZoneChunkList, AdvanceZeroTest) {
+TEST_F(ZoneChunkListTest, AdvanceZeroTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -297,7 +300,7 @@ TEST(ZoneChunkList, AdvanceZeroTest) {
CHECK_EQ(iterator_advance, zone_chunk_list.begin());
}
-TEST(ZoneChunkList, AdvancePartwayTest) {
+TEST_F(ZoneChunkListTest, AdvancePartwayTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -318,7 +321,7 @@ TEST(ZoneChunkList, AdvancePartwayTest) {
CHECK_EQ(iterator_advance, iterator_one_by_one);
}
-TEST(ZoneChunkList, AdvanceEndTest) {
+TEST_F(ZoneChunkListTest, AdvanceEndTest) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
@@ -335,7 +338,7 @@ TEST(ZoneChunkList, AdvanceEndTest) {
CHECK_EQ(iterator_advance, zone_chunk_list.end());
}
-TEST(ZoneChunkList, FindOverChunkBoundary) {
+TEST_F(ZoneChunkListTest, FindOverChunkBoundary) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
diff --git a/deps/v8/test/unittests/zone/zone-unittest.cc b/deps/v8/test/unittests/zone/zone-unittest.cc
index b063848990..b10c240c14 100644
--- a/deps/v8/test/unittests/zone/zone-unittest.cc
+++ b/deps/v8/test/unittests/zone/zone-unittest.cc
@@ -5,20 +5,23 @@
#include "src/zone/zone.h"
#include "src/zone/accounting-allocator.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
+class ZoneTest : public TestWithPlatform {};
+
// This struct is just a type tag for Zone::Allocate<T>(size_t) call.
-struct ZoneTest {};
+struct ZoneTestTag {};
-TEST(Zone, 8ByteAlignment) {
+TEST_F(ZoneTest, 8ByteAlignment) {
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
for (size_t i = 0; i < 16; ++i) {
- ASSERT_EQ(reinterpret_cast<intptr_t>(zone.Allocate<ZoneTest>(i)) % 8, 0);
+ ASSERT_EQ(reinterpret_cast<intptr_t>(zone.Allocate<ZoneTestTag>(i)) % 8, 0);
}
}
diff --git a/deps/v8/test/wasm-api-tests/testcfg.py b/deps/v8/test/wasm-api-tests/testcfg.py
index f2a48ae56a..5d2a96a7c6 100644
--- a/deps/v8/test/wasm-api-tests/testcfg.py
+++ b/deps/v8/test/wasm-api-tests/testcfg.py
@@ -2,9 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import os
from testrunner.local import command
diff --git a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
index b22030a8a8..1f37dc31c3 100644
--- a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
+++ b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -54,7 +54,7 @@
// Note: The test class must be in the same namespace as the class being tested.
// For example, putting MyClassTest in an anonymous namespace will not work.
-#define FRIEND_TEST(test_case_name, test_name)\
-friend class test_case_name##_##test_name##_Test
+#define FRIEND_TEST(test_case_name, test_name) \
+ friend class test_case_name##_##test_name##_Test
#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
diff --git a/deps/v8/third_party/inspector_protocol/roll.py b/deps/v8/third_party/inspector_protocol/roll.py
index b807257106..1b7bc54e36 100755
--- a/deps/v8/third_party/inspector_protocol/roll.py
+++ b/deps/v8/third_party/inspector_protocol/roll.py
@@ -1,9 +1,8 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copcright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-from __future__ import print_function
import argparse
import sys
import os
@@ -67,7 +66,7 @@ def RunCmd(cmd):
(stdoutdata, stderrdata) = p.communicate()
if p.returncode != 0:
raise Exception('%s: exit status %d', str(cmd), p.returncode)
- return stdoutdata
+ return stdoutdata.decode('utf-8')
def CheckRepoIsClean(path, suffix):
diff --git a/deps/v8/third_party/zlib/README.chromium b/deps/v8/third_party/zlib/README.chromium
index c3c1ef69ad..db159bef60 100644
--- a/deps/v8/third_party/zlib/README.chromium
+++ b/deps/v8/third_party/zlib/README.chromium
@@ -1,7 +1,7 @@
Name: zlib
Short Name: zlib
URL: http://zlib.net/
-Version: 1.2.11
+Version: 1.2.12
CPEPrefix: cpe:/a:zlib:zlib:1.2.11
Security Critical: yes
License: Custom license
@@ -27,3 +27,4 @@ Local Modifications:
- Plus the changes in 'patches' folder.
- Code in contrib/ other than contrib/minizip was added to match zlib's
contributor layout.
+ - Backported patches from 1.2.12 release (Work In Progress).
diff --git a/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c b/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c
index 4bacbc46f6..8d62920a21 100644
--- a/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c
+++ b/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c
@@ -95,7 +95,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
code const FAR *dcode; /* local strm->distcode */
unsigned lmask; /* mask for first level of length codes */
unsigned dmask; /* mask for first level of distance codes */
- code here; /* retrieved table entry */
+ code const *here; /* retrieved table entry */
unsigned op; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
unsigned len; /* match length, unused bytes */
@@ -139,20 +139,20 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
bits += 8;
#endif
}
- here = lcode[hold & lmask];
+ here = lcode + (hold & lmask);
dolen:
- op = (unsigned)(here.bits);
+ op = (unsigned)(here->bits);
hold >>= op;
bits -= op;
- op = (unsigned)(here.op);
+ op = (unsigned)(here->op);
if (op == 0) { /* literal */
- Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ?
"inflate: literal '%c'\n" :
- "inflate: literal 0x%02x\n", here.val));
- *out++ = (unsigned char)(here.val);
+ "inflate: literal 0x%02x\n", here->val));
+ *out++ = (unsigned char)(here->val);
}
else if (op & 16) { /* length base */
- len = (unsigned)(here.val);
+ len = (unsigned)(here->val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
@@ -182,14 +182,14 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
bits += 8;
#endif
}
- here = dcode[hold & dmask];
+ here = dcode + (hold & dmask);
dodist:
- op = (unsigned)(here.bits);
+ op = (unsigned)(here->bits);
hold >>= op;
bits -= op;
- op = (unsigned)(here.op);
+ op = (unsigned)(here->op);
if (op & 16) { /* distance base */
- dist = (unsigned)(here.val);
+ dist = (unsigned)(here->val);
op &= 15; /* number of extra bits */
if (bits < op) {
#ifdef INFLATE_CHUNK_READ_64LE
@@ -295,7 +295,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
- here = dcode[here.val + (hold & ((1U << op) - 1))];
+ here = dcode + here->val + (hold & ((1U << op) - 1));
goto dodist;
}
else {
@@ -305,7 +305,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
}
}
else if ((op & 64) == 0) { /* 2nd level length code */
- here = lcode[here.val + (hold & ((1U << op) - 1))];
+ here = lcode + here->val + (hold & ((1U << op) - 1));
goto dolen;
}
else if (op & 32) { /* end-of-block */
diff --git a/deps/v8/third_party/zlib/contrib/optimizations/inflate.c b/deps/v8/third_party/zlib/contrib/optimizations/inflate.c
index 81d558bd6e..4841cd964c 100644
--- a/deps/v8/third_party/zlib/contrib/optimizations/inflate.c
+++ b/deps/v8/third_party/zlib/contrib/optimizations/inflate.c
@@ -131,6 +131,7 @@ z_streamp strm;
state->mode = HEAD;
state->last = 0;
state->havedict = 0;
+ state->flags = -1;
state->dmax = 32768U;
state->head = Z_NULL;
state->hold = 0;
@@ -682,7 +683,6 @@ int flush;
state->mode = FLAGS;
break;
}
- state->flags = 0; /* expect zlib header */
if (state->head != Z_NULL)
state->head->done = -1;
if (!(state->wrap & 1) || /* check if zlib header allowed */
@@ -709,6 +709,7 @@ int flush;
break;
}
state->dmax = 1U << len;
+ state->flags = 0; /* indicate zlib header */
Tracev((stderr, "inflate: zlib header ok\n"));
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
@@ -1233,7 +1234,7 @@ int flush;
case LENGTH:
if (state->wrap && state->flags) {
NEEDBITS(32);
- if (hold != (state->total & 0xffffffffUL)) {
+ if ((state->wrap & 4) && hold != (state->total & 0xffffffff)) {
strm->msg = (char *)"incorrect length check";
state->mode = BAD;
break;
@@ -1423,6 +1424,7 @@ int ZEXPORT inflateSync(strm)
z_streamp strm;
{
unsigned len; /* number of bytes to look at or looked at */
+ int flags; /* temporary to save header status */
unsigned long in, out; /* temporary to save total_in and total_out */
unsigned char buf[4]; /* to restore bit buffer to byte string */
struct inflate_state FAR *state;
@@ -1455,9 +1457,15 @@ z_streamp strm;
/* return no joy or set up to restart inflate() on a new block */
if (state->have != 4) return Z_DATA_ERROR;
+ if (state->flags == -1)
+ state->wrap = 0; /* if no header yet, treat as raw */
+ else
+ state->wrap &= ~4; /* no point in computing a check value now */
+ flags = state->flags;
in = strm->total_in; out = strm->total_out;
inflateReset(strm);
strm->total_in = in; strm->total_out = out;
+ state->flags = flags;
state->mode = TYPE;
return Z_OK;
}
@@ -1553,7 +1561,7 @@ int check;
if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
- if (check)
+ if (check && state->wrap)
state->wrap |= 4;
else
state->wrap &= ~4;
diff --git a/deps/v8/third_party/zlib/crc32.c b/deps/v8/third_party/zlib/crc32.c
index d4c3248d98..5ee3bd425e 100644
--- a/deps/v8/third_party/zlib/crc32.c
+++ b/deps/v8/third_party/zlib/crc32.c
@@ -250,7 +250,7 @@ unsigned long ZEXPORT crc32_z(crc, buf, len)
#endif /* DYNAMIC_CRC_TABLE */
#ifdef BYFOUR
- if (sizeof(void *) == sizeof(ptrdiff_t)) {
+ if (sizeof(void *) == sizeof(z_size_t)) {
z_crc_t endian;
endian = 1;
@@ -327,7 +327,7 @@ local unsigned long crc32_little(crc, buf, len)
c = (z_crc_t)crc;
c = ~c;
- while (len && ((ptrdiff_t)buf & 3)) {
+ while (len && ((z_size_t)buf & 3)) {
c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
len--;
}
@@ -367,7 +367,7 @@ local unsigned long crc32_big(crc, buf, len)
c = ZSWAP32((z_crc_t)crc);
c = ~c;
- while (len && ((ptrdiff_t)buf & 3)) {
+ while (len && ((z_size_t)buf & 3)) {
c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
len--;
}
diff --git a/deps/v8/third_party/zlib/deflate.c b/deps/v8/third_party/zlib/deflate.c
index fc7ae45905..5c7c718d29 100644
--- a/deps/v8/third_party/zlib/deflate.c
+++ b/deps/v8/third_party/zlib/deflate.c
@@ -176,10 +176,15 @@ local const config configuration_table[10] = {
/* ===========================================================================
* Initialize the hash table (avoiding 64K overflow for 16 bit systems).
* prev[] will be initialized on the fly.
+ * TODO(cavalcantii): optimization opportunity, check comments on:
+ * https://chromium-review.googlesource.com/c/chromium/src/+/3561506/
*/
#define CLEAR_HASH(s) \
- s->head[s->hash_size-1] = NIL; \
- zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+ do { \
+ s->head[s->hash_size-1] = NIL; \
+ zmemzero((Bytef *)s->head, \
+ (unsigned)(s->hash_size-1)*sizeof(*s->head)); \
+ } while (0)
/* ===========================================================================
* Slide the hash table when sliding the window down (could be avoided with 32
@@ -534,13 +539,13 @@ int ZEXPORT deflateResetKeep (strm)
#ifdef GZIP
s->wrap == 2 ? GZIP_STATE :
#endif
- s->wrap ? INIT_STATE : BUSY_STATE;
+ INIT_STATE;
strm->adler =
#ifdef GZIP
s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
#endif
adler32(0L, Z_NULL, 0);
- s->last_flush = Z_NO_FLUSH;
+ s->last_flush = -2;
_tr_init(s);
@@ -595,7 +600,8 @@ int ZEXPORT deflatePrime (strm, bits, value)
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
s = strm->state;
- if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
+ if (bits < 0 || bits > 16 ||
+ s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
return Z_BUF_ERROR;
do {
put = Buf_size - s->bi_valid;
@@ -633,12 +639,12 @@ int ZEXPORT deflateParams(strm, level, strategy)
func = configuration_table[s->level].func;
if ((strategy != s->strategy || func != configuration_table[level].func) &&
- s->high_water) {
+ s->last_flush != -2) {
/* Flush the last buffer: */
int err = deflate(strm, Z_BLOCK);
if (err == Z_STREAM_ERROR)
return err;
- if (strm->avail_out == 0)
+ if (strm->avail_in || (s->strstart - s->block_start) + s->lookahead)
return Z_BUF_ERROR;
}
if (s->level != level) {
@@ -857,6 +863,8 @@ int ZEXPORT deflate (strm, flush)
}
/* Write the header */
+ if (s->status == INIT_STATE && s->wrap == 0)
+ s->status = BUSY_STATE;
if (s->status == INIT_STATE) {
/* zlib header */
uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
@@ -1589,6 +1597,8 @@ local void fill_window_c(s)
s->match_start -= wsize;
s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
s->block_start -= (long) wsize;
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
slide_hash(s);
more += wsize;
}
@@ -1818,6 +1828,7 @@ local block_state deflate_stored(s, flush)
s->matches = 2; /* clear hash */
zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
s->strstart = s->w_size;
+ s->insert = s->strstart;
}
else {
if (s->window_size - s->strstart <= used) {
@@ -1826,12 +1837,14 @@ local block_state deflate_stored(s, flush)
zmemcpy(s->window, s->window + s->w_size, s->strstart);
if (s->matches < 2)
s->matches++; /* add a pending slide_hash() */
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
}
zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
s->strstart += used;
+ s->insert += MIN(used, s->w_size - s->insert);
}
s->block_start = s->strstart;
- s->insert += MIN(used, s->w_size - s->insert);
}
if (s->high_water < s->strstart)
s->high_water = s->strstart;
@@ -1846,7 +1859,7 @@ local block_state deflate_stored(s, flush)
return block_done;
/* Fill the window with any remaining input. */
- have = s->window_size - s->strstart - 1;
+ have = s->window_size - s->strstart;
if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
/* Slide the window down. */
s->block_start -= s->w_size;
@@ -1855,12 +1868,15 @@ local block_state deflate_stored(s, flush)
if (s->matches < 2)
s->matches++; /* add a pending slide_hash() */
have += s->w_size; /* more space now */
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
}
if (have > s->strm->avail_in)
have = s->strm->avail_in;
if (have) {
deflate_read_buf(s->strm, s->window + s->strstart, have);
s->strstart += have;
+ s->insert += MIN(have, s->w_size - s->insert);
}
if (s->high_water < s->strstart)
s->high_water = s->strstart;
diff --git a/deps/v8/third_party/zlib/google/zip.cc b/deps/v8/third_party/zlib/google/zip.cc
index a52f40690b..1a43196e99 100644
--- a/deps/v8/third_party/zlib/google/zip.cc
+++ b/deps/v8/third_party/zlib/google/zip.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/files/file.h"
#include "base/files/file_enumerator.h"
+#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/strings/string_util.h"
@@ -29,7 +30,10 @@ bool IsHiddenFile(const base::FilePath& file_path) {
// Creates a directory at |extract_dir|/|entry_path|, including any parents.
bool CreateDirectory(const base::FilePath& extract_dir,
const base::FilePath& entry_path) {
- return base::CreateDirectory(extract_dir.Append(entry_path));
+ const base::FilePath dir = extract_dir.Append(entry_path);
+ const bool ok = base::CreateDirectory(dir);
+ PLOG_IF(ERROR, !ok) << "Cannot create directory " << Redact(dir);
+ return ok;
}
// Creates a WriterDelegate that can write a file at |extract_dir|/|entry_path|.
@@ -172,11 +176,14 @@ bool Unzip(const base::FilePath& src_file,
UnzipOptions options) {
base::File file(src_file, base::File::FLAG_OPEN | base::File::FLAG_READ);
if (!file.IsValid()) {
- LOG(ERROR) << "Cannot open " << Redact(src_file) << ": "
- << base::File::ErrorToString(file.error_details());
+ PLOG(ERROR) << "Cannot open " << Redact(src_file) << ": "
+ << base::File::ErrorToString(file.error_details());
return false;
}
+ DLOG_IF(WARNING, !base::IsDirectoryEmpty(dest_dir))
+ << "ZIP extraction directory is not empty: " << dest_dir;
+
return Unzip(file.GetPlatformFile(),
base::BindRepeating(&CreateFilePathWriterDelegate, dest_dir),
base::BindRepeating(&CreateDirectory, dest_dir),
@@ -199,7 +206,9 @@ bool Unzip(const base::PlatformFile& src_file,
while (const ZipReader::Entry* const entry = reader.Next()) {
if (entry->is_unsafe) {
LOG(ERROR) << "Found unsafe entry " << Redact(entry->path) << " in ZIP";
- return false;
+ if (!options.continue_on_error)
+ return false;
+ continue;
}
if (options.filter && !options.filter.Run(entry->path)) {
@@ -209,8 +218,11 @@ bool Unzip(const base::PlatformFile& src_file,
if (entry->is_directory) {
// It's a directory.
- if (!directory_creator.Run(entry->path))
- return false;
+ if (!directory_creator.Run(entry->path)) {
+ LOG(ERROR) << "Cannot create directory " << Redact(entry->path);
+ if (!options.continue_on_error)
+ return false;
+ }
continue;
}
@@ -220,7 +232,8 @@ bool Unzip(const base::PlatformFile& src_file,
if (!writer || !reader.ExtractCurrentEntry(writer.get())) {
LOG(ERROR) << "Cannot extract file " << Redact(entry->path)
<< " from ZIP";
- return false;
+ if (!options.continue_on_error)
+ return false;
}
}
diff --git a/deps/v8/third_party/zlib/google/zip.h b/deps/v8/third_party/zlib/google/zip.h
index 0928bbd12a..25ec655caf 100644
--- a/deps/v8/third_party/zlib/google/zip.h
+++ b/deps/v8/third_party/zlib/google/zip.h
@@ -182,6 +182,9 @@ struct UnzipOptions {
// Password to decrypt the encrypted files.
std::string password;
+
+ // Should ignore errors when extracting files?
+ bool continue_on_error = false;
};
typedef base::RepeatingCallback<std::unique_ptr<WriterDelegate>(
@@ -198,6 +201,9 @@ bool Unzip(const base::PlatformFile& zip_file,
UnzipOptions options = {});
// Unzips the contents of |zip_file| into |dest_dir|.
+// This function does not overwrite any existing file.
+// A filename collision will result in an error.
+// Therefore, |dest_dir| should initially be an empty directory.
bool Unzip(const base::FilePath& zip_file,
const base::FilePath& dest_dir,
UnzipOptions options = {});
diff --git a/deps/v8/third_party/zlib/google/zip_reader.cc b/deps/v8/third_party/zlib/google/zip_reader.cc
index 33bf788374..2cc101c75c 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/check.h"
#include "base/files/file.h"
+#include "base/files/file_util.h"
#include "base/i18n/icu_string_conversions.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
@@ -58,6 +59,26 @@ std::ostream& operator<<(std::ostream& out, UnzipError error) {
#undef SWITCH_ERR
}
+bool IsValidFileNameCharacterOnWindows(char16_t c) {
+ if (c < 32)
+ return false;
+
+ switch (c) {
+ case '<': // Less than
+ case '>': // Greater than
+ case ':': // Colon
+ case '"': // Double quote
+ case '|': // Vertical bar or pipe
+ case '?': // Question mark
+ case '*': // Asterisk
+ case '/': // Forward slash
+ case '\\': // Backslash
+ return false;
+ }
+
+ return true;
+}
+
// A writer delegate that writes to a given string.
class StringWriterDelegate : public WriterDelegate {
public:
@@ -145,8 +166,8 @@ bool ZipReader::OpenFromString(const std::string& data) {
void ZipReader::Close() {
if (zip_file_) {
- if (const int err = unzClose(zip_file_); err != UNZ_OK) {
- LOG(ERROR) << "Error while closing ZIP archive: " << UnzipError(err);
+ if (const UnzipError err{unzClose(zip_file_)}; err != UNZ_OK) {
+ LOG(ERROR) << "Error while closing ZIP archive: " << err;
}
}
Reset();
@@ -162,10 +183,10 @@ const ZipReader::Entry* ZipReader::Next() {
// Move to the next entry if we're not trying to open the first entry.
if (next_index_ > 0) {
- if (const int err = unzGoToNextFile(zip_file_); err != UNZ_OK) {
+ if (const UnzipError err{unzGoToNextFile(zip_file_)}; err != UNZ_OK) {
reached_end_ = true;
if (err != UNZ_END_OF_LIST_OF_FILE) {
- LOG(ERROR) << "Cannot go to next entry in ZIP: " << UnzipError(err);
+ LOG(ERROR) << "Cannot go to next entry in ZIP: " << err;
ok_ = false;
}
return nullptr;
@@ -189,11 +210,11 @@ bool ZipReader::OpenEntry() {
// Get entry info.
unz_file_info64 info = {};
char path_in_zip[internal::kZipMaxPath] = {};
- if (const int err = unzGetCurrentFileInfo64(zip_file_, &info, path_in_zip,
- sizeof(path_in_zip) - 1, nullptr,
- 0, nullptr, 0);
+ if (const UnzipError err{unzGetCurrentFileInfo64(
+ zip_file_, &info, path_in_zip, sizeof(path_in_zip) - 1, nullptr, 0,
+ nullptr, 0)};
err != UNZ_OK) {
- LOG(ERROR) << "Cannot get entry from ZIP: " << UnzipError(err);
+ LOG(ERROR) << "Cannot get entry from ZIP: " << err;
return false;
}
@@ -209,18 +230,10 @@ bool ZipReader::OpenEntry() {
return false;
}
- entry_.path = base::FilePath::FromUTF16Unsafe(path_in_utf16);
- entry_.original_size = info.uncompressed_size;
-
- // Directory entries in ZIP have a path ending with "/".
- entry_.is_directory = base::EndsWith(path_in_utf16, u"/");
+ // Normalize path.
+ Normalize(path_in_utf16);
- // Check the entry path for directory traversal issues. We consider entry
- // paths unsafe if they are absolute or if they contain "..". On Windows,
- // IsAbsolute() returns false for paths starting with "/".
- entry_.is_unsafe = entry_.path.ReferencesParent() ||
- entry_.path.IsAbsolute() ||
- base::StartsWith(path_in_utf16, u"/");
+ entry_.original_size = info.uncompressed_size;
// The file content of this entry is encrypted if flag bit 0 is set.
entry_.is_encrypted = info.flag & 1;
@@ -248,6 +261,73 @@ bool ZipReader::OpenEntry() {
return true;
}
+void ZipReader::Normalize(base::StringPiece16 in) {
+ entry_.is_unsafe = true;
+
+ // Directory entries in ZIP have a path ending with "/".
+ entry_.is_directory = base::EndsWith(in, u"/");
+
+ std::u16string normalized_path;
+ if (base::StartsWith(in, u"/")) {
+ normalized_path = u"ROOT";
+ entry_.is_unsafe = false;
+ }
+
+ for (;;) {
+ // Consume initial path separators.
+ const base::StringPiece16::size_type i = in.find_first_not_of(u'/');
+ if (i == base::StringPiece16::npos)
+ break;
+
+ in.remove_prefix(i);
+ DCHECK(!in.empty());
+
+ // Isolate next path component.
+ const base::StringPiece16 part = in.substr(0, in.find_first_of(u'/'));
+ DCHECK(!part.empty());
+
+ in.remove_prefix(part.size());
+
+ if (!normalized_path.empty())
+ normalized_path += u'/';
+
+ if (part == u".") {
+ normalized_path += u"DOT";
+ entry_.is_unsafe = true;
+ continue;
+ }
+
+ if (part == u"..") {
+ normalized_path += u"UP";
+ entry_.is_unsafe = true;
+ continue;
+ }
+
+ // Windows has more restrictions than other systems when it comes to valid
+ // file paths. Replace Windows-invalid characters on all systems for
+ // consistency. In particular, this prevents a path component containing
+ // colon and backslash from being misinterpreted as an absolute path on
+ // Windows.
+ for (const char16_t c : part) {
+ normalized_path += IsValidFileNameCharacterOnWindows(c) ? c : 0xFFFD;
+ }
+
+ entry_.is_unsafe = false;
+ }
+
+ // If the entry is a directory, add the final path separator to the entry
+ // path.
+ if (entry_.is_directory && !normalized_path.empty()) {
+ normalized_path += u'/';
+ entry_.is_unsafe = false;
+ }
+
+ entry_.path = base::FilePath::FromUTF16Unsafe(normalized_path);
+
+ // By construction, we should always get a relative path.
+ DCHECK(!entry_.path.IsAbsolute()) << entry_.path;
+}
+
bool ZipReader::ExtractCurrentEntry(WriterDelegate* delegate,
uint64_t num_bytes_to_extract) const {
DCHECK(zip_file_);
@@ -259,10 +339,10 @@ bool ZipReader::ExtractCurrentEntry(WriterDelegate* delegate,
// is needed, and must be nullptr.
const char* const password =
entry_.is_encrypted ? password_.c_str() : nullptr;
- if (const int err = unzOpenCurrentFilePassword(zip_file_, password);
+ if (const UnzipError err{unzOpenCurrentFilePassword(zip_file_, password)};
err != UNZ_OK) {
LOG(ERROR) << "Cannot open file " << Redact(entry_.path)
- << " from ZIP: " << UnzipError(err);
+ << " from ZIP: " << err;
return false;
}
@@ -309,9 +389,9 @@ bool ZipReader::ExtractCurrentEntry(WriterDelegate* delegate,
remaining_capacity -= num_bytes_to_write;
}
- if (const int err = unzCloseCurrentFile(zip_file_); err != UNZ_OK) {
+ if (const UnzipError err{unzCloseCurrentFile(zip_file_)}; err != UNZ_OK) {
LOG(ERROR) << "Cannot extract file " << Redact(entry_.path)
- << " from ZIP: " << UnzipError(err);
+ << " from ZIP: " << err;
entire_file_extracted = false;
}
@@ -354,10 +434,10 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
// is needed, and must be nullptr.
const char* const password =
entry_.is_encrypted ? password_.c_str() : nullptr;
- if (const int err = unzOpenCurrentFilePassword(zip_file_, password);
+ if (const UnzipError err{unzOpenCurrentFilePassword(zip_file_, password)};
err != UNZ_OK) {
LOG(ERROR) << "Cannot open file " << Redact(entry_.path)
- << " from ZIP: " << UnzipError(err);
+ << " from ZIP: " << err;
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, std::move(failure_callback));
return;
@@ -420,8 +500,9 @@ bool ZipReader::OpenInternal() {
DCHECK(zip_file_);
unz_global_info zip_info = {}; // Zero-clear.
- if (const int err = unzGetGlobalInfo(zip_file_, &zip_info); err != UNZ_OK) {
- LOG(ERROR) << "Cannot get ZIP info: " << UnzipError(err);
+ if (const UnzipError err{unzGetGlobalInfo(zip_file_, &zip_info)};
+ err != UNZ_OK) {
+ LOG(ERROR) << "Cannot get ZIP info: " << err;
return false;
}
@@ -451,9 +532,9 @@ void ZipReader::ExtractChunk(base::File output_file,
unzReadCurrentFile(zip_file_, buffer, internal::kZipBufSize);
if (num_bytes_read == 0) {
- if (const int err = unzCloseCurrentFile(zip_file_); err != UNZ_OK) {
+ if (const UnzipError err{unzCloseCurrentFile(zip_file_)}; err != UNZ_OK) {
LOG(ERROR) << "Cannot extract file " << Redact(entry_.path)
- << " from ZIP: " << UnzipError(err);
+ << " from ZIP: " << err;
std::move(failure_callback).Run();
return;
}
@@ -502,12 +583,26 @@ FileWriterDelegate::~FileWriterDelegate() {}
bool FileWriterDelegate::PrepareOutput() {
DCHECK(file_);
- const bool ok = file_->IsValid();
- if (ok) {
- DCHECK_EQ(file_->GetLength(), 0)
- << " The output file should be initially empty";
+
+ if (!file_->IsValid()) {
+ LOG(ERROR) << "File is not valid";
+ return false;
}
- return ok;
+
+ const int64_t length = file_->GetLength();
+ if (length < 0) {
+ PLOG(ERROR) << "Cannot get length of file handle "
+ << file_->GetPlatformFile();
+ return false;
+ }
+
+ // Just log a warning if the file is not empty.
+ // See crbug.com/1309879 and crbug.com/774762.
+ LOG_IF(WARNING, length > 0)
+ << "File handle " << file_->GetPlatformFile()
+ << " is not empty: Its length is " << length << " bytes";
+
+ return true;
}
bool FileWriterDelegate::WriteBytes(const char* data, int num_bytes) {
@@ -543,12 +638,33 @@ FilePathWriterDelegate::~FilePathWriterDelegate() {}
bool FilePathWriterDelegate::PrepareOutput() {
// We can't rely on parent directory entries being specified in the
// zip, so we make sure they are created.
- if (!base::CreateDirectory(output_file_path_.DirName()))
+ if (const base::FilePath dir = output_file_path_.DirName();
+ !base::CreateDirectory(dir)) {
+ PLOG(ERROR) << "Cannot create directory " << Redact(dir);
return false;
+ }
- owned_file_.Initialize(output_file_path_, base::File::FLAG_CREATE_ALWAYS |
- base::File::FLAG_WRITE);
- return FileWriterDelegate::PrepareOutput();
+ owned_file_.Initialize(output_file_path_,
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ if (!owned_file_.IsValid()) {
+ PLOG(ERROR) << "Cannot create file " << Redact(output_file_path_) << ": "
+ << base::File::ErrorToString(owned_file_.error_details());
+ return false;
+ }
+
+ const int64_t length = owned_file_.GetLength();
+ if (length < 0) {
+ PLOG(ERROR) << "Cannot get length of file " << Redact(output_file_path_);
+ return false;
+ }
+
+ if (length > 0) {
+ LOG(ERROR) << "File " << Redact(output_file_path_)
+ << " is not empty: Its length is " << length << " bytes";
+ return false;
+ }
+
+ return true;
}
void FilePathWriterDelegate::OnError() {
diff --git a/deps/v8/third_party/zlib/google/zip_reader.h b/deps/v8/third_party/zlib/google/zip_reader.h
index 6ca9cd9788..703b74be03 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.h
+++ b/deps/v8/third_party/zlib/google/zip_reader.h
@@ -14,7 +14,6 @@
#include "base/callback.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
-#include "base/files/file_util.h"
#include "base/memory/weak_ptr.h"
#include "base/numerics/safe_conversions.h"
#include "base/time/time.h"
@@ -95,9 +94,14 @@ class ZipReader {
// if it wants to interpret this path correctly.
std::string path_in_original_encoding;
- // Path of the entry, converted to Unicode. This path is usually relative
- // (eg "foo/bar.txt"), but it can also be absolute (eg "/foo/bar.txt") or
- // parent-relative (eg "../foo/bar.txt"). See also |is_unsafe|.
+ // Path of the entry, converted to Unicode. This path is relative (eg
+ // "foo/bar.txt"). Absolute paths (eg "/foo/bar.txt") or paths containing
+ // ".." or "." components (eg "../foo/bar.txt") are converted to safe
+ // relative paths. Eg:
+ // (In ZIP) -> (Entry.path)
+ // /foo/bar -> ROOT/foo/bar
+ // ../a -> UP/a
+ // ./a -> DOT/a
base::FilePath path;
// Size of the original uncompressed file, or 0 if the entry is a directory.
@@ -123,8 +127,8 @@ class ZipReader {
// False if the entry is a file.
bool is_directory;
- // True if the entry path is considered unsafe, ie if it is absolute or if
- // it contains "..".
+ // True if the entry path cannot be converted to a safe relative path. This
+ // happens if a file entry (not a directory) has a filename "." or "..".
bool is_unsafe;
// True if the file content is encrypted.
@@ -258,6 +262,10 @@ class ZipReader {
// reset automatically as needed.
bool OpenEntry();
+ // Normalizes the given path passed as UTF-16 string piece. Sets entry_.path,
+ // entry_.is_directory and entry_.is_unsafe.
+ void Normalize(base::StringPiece16 in);
+
// Extracts a chunk of the file to the target. Will post a task for the next
// chunk and success/failure/progress callbacks as necessary.
void ExtractChunk(base::File target_file,
@@ -278,8 +286,8 @@ class ZipReader {
base::WeakPtrFactory<ZipReader> weak_ptr_factory_{this};
};
-// A writer delegate that writes to a given File. This file is expected to be
-// initially empty.
+// A writer delegate that writes to a given File. It is recommended that this
+// file be initially empty.
class FileWriterDelegate : public WriterDelegate {
public:
// Constructs a FileWriterDelegate that manipulates |file|. The delegate will
@@ -326,7 +334,8 @@ class FileWriterDelegate : public WriterDelegate {
int64_t file_length_ = 0;
};
-// A writer delegate that writes a file at a given path.
+// A writer delegate that creates and writes a file at a given path. This does
+// not overwrite any existing file.
class FilePathWriterDelegate : public FileWriterDelegate {
public:
explicit FilePathWriterDelegate(base::FilePath output_file_path);
@@ -336,10 +345,12 @@ class FilePathWriterDelegate : public FileWriterDelegate {
~FilePathWriterDelegate() override;
- // Creates the output file and any necessary intermediate directories.
+ // Creates the output file and any necessary intermediate directories. Does
+ // not overwrite any existing file, and returns false if the output file
+ // cannot be created because another file conflicts with it.
bool PrepareOutput() override;
- // Deletes the file.
+ // Deletes the output file.
void OnError() override;
private:
diff --git a/deps/v8/third_party/zlib/google/zip_reader_unittest.cc b/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
index fc80637936..31dceaccad 100644
--- a/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
@@ -308,19 +308,18 @@ TEST_F(ZipReaderTest, DotDotFile) {
ZipReader reader;
ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("evil.zip")));
base::FilePath target_path(FILE_PATH_LITERAL(
- "../levilevilevilevilevilevilevilevilevilevilevilevil"));
+ "UP/levilevilevilevilevilevilevilevilevilevilevilevil"));
const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
ASSERT_TRUE(entry);
EXPECT_EQ(target_path, entry->path);
- // This file is unsafe because of ".." in the file name.
- EXPECT_TRUE(entry->is_unsafe);
+ EXPECT_FALSE(entry->is_unsafe);
EXPECT_FALSE(entry->is_directory);
}
TEST_F(ZipReaderTest, InvalidUTF8File) {
ZipReader reader;
ASSERT_TRUE(reader.Open(data_dir_.AppendASCII("evil_via_invalid_utf8.zip")));
- base::FilePath target_path = base::FilePath::FromUTF8Unsafe(".�.\\evil.txt");
+ base::FilePath target_path = base::FilePath::FromUTF8Unsafe(".�.�evil.txt");
const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
ASSERT_TRUE(entry);
EXPECT_EQ(target_path, entry->path);
@@ -337,7 +336,7 @@ TEST_F(ZipReaderTest, EncodingSjisAsUtf8) {
EXPECT_THAT(
GetPaths(data_dir_.AppendASCII("SJIS Bug 846195.zip")),
ElementsAre(
- base::FilePath::FromUTF8Unsafe("�V�����t�H���_/SJIS_835C_�\\.txt"),
+ base::FilePath::FromUTF8Unsafe("�V�����t�H���_/SJIS_835C_��.txt"),
base::FilePath::FromUTF8Unsafe(
"�V�����t�H���_/�V�����e�L�X�g �h�L�������g.txt")));
}
@@ -349,7 +348,7 @@ TEST_F(ZipReaderTest, EncodingSjisAs1252) {
EXPECT_THAT(
GetPaths(data_dir_.AppendASCII("SJIS Bug 846195.zip"), "windows-1252"),
ElementsAre(base::FilePath::FromUTF8Unsafe(
- "\u0090V‚µ‚¢ƒtƒHƒ‹ƒ_/SJIS_835C_ƒ\\.txt"),
+ "\u0090V‚µ‚¢ƒtƒHƒ‹ƒ_/SJIS_835C_ƒ�.txt"),
base::FilePath::FromUTF8Unsafe(
"\u0090V‚µ‚¢ƒtƒHƒ‹ƒ_/\u0090V‚µ‚¢ƒeƒLƒXƒg "
"ƒhƒLƒ…ƒ\u0081ƒ“ƒg.txt")));
@@ -361,7 +360,7 @@ TEST_F(ZipReaderTest, EncodingSjisAsIbm866) {
EXPECT_THAT(
GetPaths(data_dir_.AppendASCII("SJIS Bug 846195.zip"), "IBM866"),
ElementsAre(
- base::FilePath::FromUTF8Unsafe("РVВ╡ВвГtГHГЛГ_/SJIS_835C_Г\\.txt"),
+ base::FilePath::FromUTF8Unsafe("РVВ╡ВвГtГHГЛГ_/SJIS_835C_Г�.txt"),
base::FilePath::FromUTF8Unsafe(
"РVВ╡ВвГtГHГЛГ_/РVВ╡ВвГeГLГXГg ГhГLГЕГБГУГg.txt")));
}
@@ -380,12 +379,11 @@ TEST_F(ZipReaderTest, AbsoluteFile) {
ZipReader reader;
ASSERT_TRUE(
reader.Open(data_dir_.AppendASCII("evil_via_absolute_file_name.zip")));
- base::FilePath target_path(FILE_PATH_LITERAL("/evil.txt"));
+ base::FilePath target_path(FILE_PATH_LITERAL("ROOT/evil.txt"));
const ZipReader::Entry* entry = LocateAndOpenEntry(&reader, target_path);
ASSERT_TRUE(entry);
EXPECT_EQ(target_path, entry->path);
- // This file is unsafe because of the absolute file name.
- EXPECT_TRUE(entry->is_unsafe);
+ EXPECT_FALSE(entry->is_unsafe);
EXPECT_FALSE(entry->is_directory);
}
diff --git a/deps/v8/third_party/zlib/google/zip_unittest.cc b/deps/v8/third_party/zlib/google/zip_unittest.cc
index ab86e88343..435d7b02ee 100644
--- a/deps/v8/third_party/zlib/google/zip_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_unittest.cc
@@ -7,9 +7,9 @@
#include <iomanip>
#include <limits>
-#include <map>
-#include <set>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "base/bind.h"
@@ -20,11 +20,13 @@
#include "base/files/scoped_temp_dir.h"
#include "base/logging.h"
#include "base/path_service.h"
+#include "base/strings/strcat.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/test/bind.h"
#include "base/time/time.h"
#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
#include "third_party/zlib/google/zip.h"
@@ -36,6 +38,23 @@
namespace {
+using testing::UnorderedElementsAre;
+using testing::UnorderedElementsAreArray;
+
+std::vector<std::string> GetRelativePaths(const base::FilePath& dir,
+ base::FileEnumerator::FileType type) {
+ std::vector<std::string> got_paths;
+ base::FileEnumerator files(dir, true, type);
+ for (base::FilePath path = files.Next(); !path.empty(); path = files.Next()) {
+ base::FilePath relative;
+ EXPECT_TRUE(dir.AppendRelativePath(path, &relative));
+ got_paths.push_back(relative.NormalizePathSeparatorsTo('/').AsUTF8Unsafe());
+ }
+
+ EXPECT_EQ(base::File::FILE_OK, files.GetError());
+ return got_paths;
+}
+
bool CreateFile(const std::string& content,
base::FilePath* file_path,
base::File* file) {
@@ -196,8 +215,8 @@ class VirtualFileSystem : public zip::FileAccessor {
std::vector<base::FilePath> files, subdirs;
};
- std::map<base::FilePath, DirContents> file_tree_;
- std::map<base::FilePath, base::File> files_;
+ std::unordered_map<base::FilePath, DirContents> file_tree_;
+ std::unordered_map<base::FilePath, base::File> files_;
};
// static
@@ -261,9 +280,10 @@ class ZipTest : public PlatformTest {
base::FileEnumerator files(
test_dir_, true,
base::FileEnumerator::FILES | base::FileEnumerator::DIRECTORIES);
- base::FilePath unzipped_entry_path = files.Next();
+
size_t count = 0;
- while (!unzipped_entry_path.empty()) {
+ for (base::FilePath unzipped_entry_path = files.Next();
+ !unzipped_entry_path.empty(); unzipped_entry_path = files.Next()) {
EXPECT_EQ(zip_contents_.count(unzipped_entry_path), 1U)
<< "Couldn't find " << unzipped_entry_path;
count++;
@@ -281,13 +301,12 @@ class ZipTest : public PlatformTest {
<< "Original file '" << original_path << "' and unzipped file '"
<< unzipped_entry_path << "' have different contents";
}
- unzipped_entry_path = files.Next();
}
+ EXPECT_EQ(base::File::FILE_OK, files.GetError());
size_t expected_count = 0;
- for (std::set<base::FilePath>::iterator iter = zip_contents_.begin();
- iter != zip_contents_.end(); ++iter) {
- if (expect_hidden_files || iter->BaseName().value()[0] != '.')
+ for (const base::FilePath& path : zip_contents_) {
+ if (expect_hidden_files || path.BaseName().value()[0] != '.')
++expected_count;
}
@@ -353,12 +372,23 @@ class ZipTest : public PlatformTest {
base::ScopedTempDir temp_dir_;
// Hard-coded contents of a known zip file.
- std::set<base::FilePath> zip_contents_;
+ std::unordered_set<base::FilePath> zip_contents_;
// Hard-coded list of relative paths for a zip file created with ZipFiles.
std::vector<base::FilePath> zip_file_list_;
};
+TEST_F(ZipTest, UnzipNoSuchFile) {
+ EXPECT_FALSE(zip::Unzip(GetDataDirectory().AppendASCII("No Such File.zip"),
+ test_dir_));
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre());
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::DIRECTORIES),
+ UnorderedElementsAre());
+}
+
TEST_F(ZipTest, Unzip) {
TestUnzipFile(FILE_PATH_LITERAL("test.zip"), true);
}
@@ -373,11 +403,9 @@ TEST_F(ZipTest, UnzipEvil) {
// won't create a persistent file outside test_dir_ in case of a
// failure.
base::FilePath output_dir = test_dir_.AppendASCII("out");
- ASSERT_FALSE(zip::Unzip(path, output_dir));
- base::FilePath evil_file = output_dir;
- evil_file = evil_file.AppendASCII(
- "../levilevilevilevilevilevilevilevilevilevilevilevil");
- ASSERT_FALSE(base::PathExists(evil_file));
+ EXPECT_TRUE(zip::Unzip(path, output_dir));
+ EXPECT_TRUE(base::PathExists(output_dir.AppendASCII(
+ "UP/levilevilevilevilevilevilevilevilevilevilevilevil")));
}
TEST_F(ZipTest, UnzipEvil2) {
@@ -388,7 +416,7 @@ TEST_F(ZipTest, UnzipEvil2) {
base::FilePath output_dir = test_dir_.AppendASCII("out");
ASSERT_TRUE(zip::Unzip(path, output_dir));
ASSERT_TRUE(base::PathExists(
- output_dir.Append(base::FilePath::FromUTF8Unsafe(".�.\\evil.txt"))));
+ output_dir.Append(base::FilePath::FromUTF8Unsafe(".�.�evil.txt"))));
ASSERT_FALSE(base::PathExists(output_dir.AppendASCII("../evil.txt")));
}
@@ -398,32 +426,13 @@ TEST_F(ZipTest, UnzipWithFilter) {
});
ASSERT_TRUE(zip::Unzip(GetDataDirectory().AppendASCII("test.zip"), test_dir_,
{.filter = std::move(filter)}));
- // Only foo.txt should have been extracted. The following paths should not
- // be extracted:
- // foo/
- // foo/bar.txt
- // foo/bar/
- // foo/bar/.hidden
- // foo/bar/baz.txt
- // foo/bar/quux.txt
- ASSERT_TRUE(base::PathExists(test_dir_.AppendASCII("foo.txt")));
- base::FileEnumerator extractedFiles(
- test_dir_,
- false, // Do not enumerate recursively - the file must be in the root.
- base::FileEnumerator::FileType::FILES);
- int extracted_count = 0;
- while (!extractedFiles.Next().empty())
- ++extracted_count;
- ASSERT_EQ(1, extracted_count);
-
- base::FileEnumerator extractedDirs(
- test_dir_,
- false, // Do not enumerate recursively - we require zero directories.
- base::FileEnumerator::FileType::DIRECTORIES);
- extracted_count = 0;
- while (!extractedDirs.Next().empty())
- ++extracted_count;
- ASSERT_EQ(0, extracted_count);
+ // Only foo.txt should have been extracted.
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("foo.txt"));
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::DIRECTORIES),
+ UnorderedElementsAre());
}
TEST_F(ZipTest, UnzipEncryptedWithRightPassword) {
@@ -462,8 +471,9 @@ TEST_F(ZipTest, UnzipEncryptedWithWrongPassword) {
EXPECT_EQ("This is not encrypted.\n", contents);
// No rubbish file should be left behind.
- EXPECT_FALSE(
- base::PathExists(test_dir_.AppendASCII("Encrypted ZipCrypto.txt")));
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("ClearText.txt"));
}
TEST_F(ZipTest, UnzipEncryptedWithNoPassword) {
@@ -482,8 +492,25 @@ TEST_F(ZipTest, UnzipEncryptedWithNoPassword) {
EXPECT_EQ("This is not encrypted.\n", contents);
// No rubbish file should be left behind.
- EXPECT_FALSE(
- base::PathExists(test_dir_.AppendASCII("Encrypted ZipCrypto.txt")));
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("ClearText.txt"));
+}
+
+TEST_F(ZipTest, UnzipEncryptedContinueOnError) {
+ EXPECT_TRUE(
+ zip::Unzip(GetDataDirectory().AppendASCII("Different Encryptions.zip"),
+ test_dir_, {.continue_on_error = true}));
+
+ std::string contents;
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("ClearText.txt"),
+ &contents));
+ EXPECT_EQ("This is not encrypted.\n", contents);
+
+ // No rubbish file should be left behind.
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("ClearText.txt"));
}
TEST_F(ZipTest, UnzipWrongCrc) {
@@ -491,36 +518,437 @@ TEST_F(ZipTest, UnzipWrongCrc) {
zip::Unzip(GetDataDirectory().AppendASCII("Wrong CRC.zip"), test_dir_));
// No rubbish file should be left behind.
- EXPECT_FALSE(base::PathExists(test_dir_.AppendASCII("Corrupted.txt")));
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre());
+}
+
+TEST_F(ZipTest, UnzipRepeatedDirName) {
+ EXPECT_TRUE(zip::Unzip(
+ GetDataDirectory().AppendASCII("Repeated Dir Name.zip"), test_dir_));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre());
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::DIRECTORIES),
+ UnorderedElementsAre("repeated"));
+}
+
+TEST_F(ZipTest, UnzipRepeatedFileName) {
+ EXPECT_FALSE(zip::Unzip(
+ GetDataDirectory().AppendASCII("Repeated File Name.zip"), test_dir_));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("repeated"));
+
+ std::string contents;
+ EXPECT_TRUE(
+ base::ReadFileToString(test_dir_.AppendASCII("repeated"), &contents));
+ EXPECT_EQ("First file", contents);
+}
+
+TEST_F(ZipTest, UnzipCannotCreateEmptyDir) {
+ EXPECT_FALSE(zip::Unzip(
+ GetDataDirectory().AppendASCII("Empty Dir Same Name As File.zip"),
+ test_dir_));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("repeated"));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::DIRECTORIES),
+ UnorderedElementsAre());
+
+ std::string contents;
+ EXPECT_TRUE(
+ base::ReadFileToString(test_dir_.AppendASCII("repeated"), &contents));
+ EXPECT_EQ("First file", contents);
+}
+
+TEST_F(ZipTest, UnzipCannotCreateParentDir) {
+ EXPECT_FALSE(zip::Unzip(
+ GetDataDirectory().AppendASCII("Parent Dir Same Name As File.zip"),
+ test_dir_));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("repeated"));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::DIRECTORIES),
+ UnorderedElementsAre());
+
+ std::string contents;
+ EXPECT_TRUE(
+ base::ReadFileToString(test_dir_.AppendASCII("repeated"), &contents));
+ EXPECT_EQ("First file", contents);
+}
+
+// TODO(crbug.com/1311140) Detect and rename reserved file names on Windows.
+TEST_F(ZipTest, UnzipWindowsSpecialNames) {
+ EXPECT_TRUE(
+ zip::Unzip(GetDataDirectory().AppendASCII("Windows Special Names.zip"),
+ test_dir_, {.continue_on_error = true}));
+
+ std::unordered_set<std::string> want_paths = {
+ "First",
+ "Last",
+ "CLOCK$",
+ " NUL.txt",
+#ifndef OS_WIN
+ "NUL",
+ "NUL ",
+ "NUL.",
+ "NUL .",
+ "NUL.txt",
+ "NUL.tar.gz",
+ "NUL..txt",
+ "NUL...txt",
+ "NUL .txt",
+ "NUL .txt",
+ "NUL ..txt",
+#ifndef OS_MAC
+ "Nul.txt",
+#endif
+ "nul.very long extension",
+ "a/NUL",
+ "CON",
+ "PRN",
+ "AUX",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+#endif
+ };
+
+ const std::vector<std::string> got_paths =
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES);
+
+ for (const std::string& path : got_paths) {
+ const bool ok = want_paths.erase(path);
+
+#ifdef OS_WIN
+ if (!ok) {
+ // See crbug.com/1313991: Different versions of Windows treat these
+ // filenames differently. No hard error here if there is an unexpected
+ // file.
+ LOG(WARNING) << "Found unexpected file: " << std::quoted(path);
+ continue;
+ }
+#else
+ EXPECT_TRUE(ok) << "Found unexpected file: " << std::quoted(path);
+#endif
+
+ std::string contents;
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII(path), &contents));
+ EXPECT_EQ(base::StrCat({"This is: ", path}), contents);
+ }
+
+ for (const std::string& path : want_paths) {
+ EXPECT_TRUE(false) << "Cannot find expected file: " << std::quoted(path);
+ }
+}
+
+TEST_F(ZipTest, UnzipDifferentCases) {
+#if defined(OS_WIN) || defined(OS_MAC)
+ // Only the first file (with mixed case) is extracted.
+ EXPECT_FALSE(zip::Unzip(GetDataDirectory().AppendASCII(
+ "Repeated File Name With Different Cases.zip"),
+ test_dir_));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("Case"));
+
+ std::string contents;
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("Case"), &contents));
+ EXPECT_EQ("Mixed case 111", contents);
+#else
+ // All the files are extracted.
+ EXPECT_TRUE(zip::Unzip(GetDataDirectory().AppendASCII(
+ "Repeated File Name With Different Cases.zip"),
+ test_dir_));
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("Case", "case", "CASE"));
+
+ std::string contents;
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("Case"), &contents));
+ EXPECT_EQ("Mixed case 111", contents);
+
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("case"), &contents));
+ EXPECT_EQ("Lower case 22", contents);
+
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("CASE"), &contents));
+ EXPECT_EQ("Upper case 3", contents);
+#endif
+}
+
+TEST_F(ZipTest, UnzipDifferentCasesContinueOnError) {
+ EXPECT_TRUE(zip::Unzip(GetDataDirectory().AppendASCII(
+ "Repeated File Name With Different Cases.zip"),
+ test_dir_, {.continue_on_error = true}));
+
+ std::string contents;
+
+#if defined(OS_WIN) || defined(OS_MAC)
+ // Only the first file (with mixed case) has been extracted.
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("Case"));
+
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("Case"), &contents));
+ EXPECT_EQ("Mixed case 111", contents);
+#else
+ // All the files have been extracted.
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES),
+ UnorderedElementsAre("Case", "case", "CASE"));
+
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("Case"), &contents));
+ EXPECT_EQ("Mixed case 111", contents);
+
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("case"), &contents));
+ EXPECT_EQ("Lower case 22", contents);
+
+ EXPECT_TRUE(base::ReadFileToString(test_dir_.AppendASCII("CASE"), &contents));
+ EXPECT_EQ("Upper case 3", contents);
+#endif
+}
+
+TEST_F(ZipTest, UnzipMixedPaths) {
+ EXPECT_TRUE(zip::Unzip(GetDataDirectory().AppendASCII("Mixed Paths.zip"),
+ test_dir_, {.continue_on_error = true}));
+
+ std::unordered_set<std::string> want_paths = {
+#ifdef OS_WIN
+ "Dot", //
+ "Space→", //
+#else
+ " ", //
+ "AUX", // Disappears on Windows
+ "COM1", // Disappears on Windows
+ "COM2", // Disappears on Windows
+ "COM3", // Disappears on Windows
+ "COM4", // Disappears on Windows
+ "COM5", // Disappears on Windows
+ "COM6", // Disappears on Windows
+ "COM7", // Disappears on Windows
+ "COM8", // Disappears on Windows
+ "COM9", // Disappears on Windows
+ "CON", // Disappears on Windows
+ "Dot .", //
+ "LPT1", // Disappears on Windows
+ "LPT2", // Disappears on Windows
+ "LPT3", // Disappears on Windows
+ "LPT4", // Disappears on Windows
+ "LPT5", // Disappears on Windows
+ "LPT6", // Disappears on Windows
+ "LPT7", // Disappears on Windows
+ "LPT8", // Disappears on Windows
+ "LPT9", // Disappears on Windows
+ "NUL ..txt", // Disappears on Windows
+ "NUL .txt", // Disappears on Windows
+ "NUL ", // Disappears on Windows
+ "NUL .", // Disappears on Windows
+ "NUL .txt", // Disappears on Windows
+ "NUL", // Disappears on Windows
+ "NUL.", // Disappears on Windows
+ "NUL...txt", // Disappears on Windows
+ "NUL..txt", // Disappears on Windows
+ "NUL.tar.gz", // Disappears on Windows
+ "NUL.txt", // Disappears on Windows
+ "PRN", // Disappears on Windows
+ "Space→ ", //
+ "c/NUL", // Disappears on Windows
+ "nul.very long extension", // Disappears on Windows
+#ifndef OS_MAC
+ "CASE", // Conflicts with "Case"
+ "case", // Conflicts with "Case"
+#endif
+#endif
+ " NUL.txt", //
+ " ←Space", //
+ "$HOME", //
+ "%TMP", //
+ "-", //
+ "...Three", //
+ "..Two", //
+ ".One", //
+ "Ampersand &", //
+ "Angle ��", //
+ "At @", //
+ "Backslash1→�", //
+ "Backslash3→�←Backslash4", //
+ "Backspace �", //
+ "Backtick `", //
+ "Bell �", //
+ "CLOCK$", //
+ "Caret ^", //
+ "Carriage Return �", //
+ "Case", //
+ "Colon �", //
+ "Comma ,", //
+ "Curly {}", //
+ "C�", //
+ "C��", //
+ "C��Temp", //
+ "C��Temp�", //
+ "C��Temp�File", //
+ "Dash -", //
+ "Delete \x7F", //
+ "Dollar $", //
+ "Double quote �", //
+ "Equal =", //
+ "Escape �", //
+ "Euro €", //
+ "Exclamation !", //
+ "FileOrDir", //
+ "First", //
+ "Hash #", //
+ "Last", //
+ "Line Feed �", //
+ "Percent %", //
+ "Pipe �", //
+ "Plus +", //
+ "Question �", //
+ "Quote '", //
+ "ROOT/At The Top", //
+ "ROOT/UP/Over The Top", //
+ "ROOT/dev/null", //
+ "Round ()", //
+ "Semicolon ;", //
+ "Smile \U0001F642", //
+ "Square []", //
+ "Star �", //
+ "String Terminator \u009C", //
+ "Tab �", //
+ "Tilde ~", //
+ "UP/One Level Up", //
+ "UP/UP/Two Levels Up", //
+ "Underscore _", //
+ "a/DOT/b", //
+ "a/UP/b", //
+ "u/v/w/x/y/z", //
+ "~", //
+ "�←Backslash2", //
+ "��server�share�file", //
+ };
+
+ const std::vector<std::string> got_paths =
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::FILES);
+
+ for (const std::string& path : got_paths) {
+ const bool ok = want_paths.erase(path);
+#ifdef OS_WIN
+ // See crbug.com/1313991: Different versions of Windows treat reserved
+ // Windows filenames differently. No hard error here if there is an
+ // unexpected file.
+ LOG_IF(WARNING, !ok) << "Found unexpected file: " << std::quoted(path);
+#else
+ EXPECT_TRUE(ok) << "Found unexpected file: " << std::quoted(path);
+#endif
+ }
+
+ for (const std::string& path : want_paths) {
+ EXPECT_TRUE(false) << "Cannot find expected file: " << std::quoted(path);
+ }
+
+ EXPECT_THAT(
+ GetRelativePaths(test_dir_, base::FileEnumerator::FileType::DIRECTORIES),
+ UnorderedElementsAreArray({
+ "Empty",
+ "ROOT",
+ "ROOT/Empty",
+ "ROOT/UP",
+ "ROOT/dev",
+ "UP",
+ "UP/UP",
+ "a",
+ "a/DOT",
+ "a/UP",
+ "c",
+ "u",
+ "u/v",
+ "u/v/w",
+ "u/v/w/x",
+ "u/v/w/x/y",
+ }));
}
TEST_F(ZipTest, UnzipWithDelegates) {
- auto dir_creator = base::BindRepeating(
- [](const base::FilePath& extract_dir, const base::FilePath& entry_path) {
- return base::CreateDirectory(extract_dir.Append(entry_path));
- },
- test_dir_);
- auto writer = base::BindRepeating(
- [](const base::FilePath& extract_dir, const base::FilePath& entry_path)
- -> std::unique_ptr<zip::WriterDelegate> {
+ auto dir_creator =
+ base::BindLambdaForTesting([this](const base::FilePath& entry_path) {
+ return base::CreateDirectory(test_dir_.Append(entry_path));
+ });
+ auto writer =
+ base::BindLambdaForTesting([this](const base::FilePath& entry_path)
+ -> std::unique_ptr<zip::WriterDelegate> {
return std::make_unique<zip::FilePathWriterDelegate>(
- extract_dir.Append(entry_path));
- },
- test_dir_);
+ test_dir_.Append(entry_path));
+ });
+
+ base::File file(GetDataDirectory().AppendASCII("test.zip"),
+ base::File::Flags::FLAG_OPEN | base::File::Flags::FLAG_READ);
+ EXPECT_TRUE(zip::Unzip(file.GetPlatformFile(), writer, dir_creator));
+ base::FilePath dir = test_dir_;
+ base::FilePath dir_foo = dir.AppendASCII("foo");
+ base::FilePath dir_foo_bar = dir_foo.AppendASCII("bar");
+ EXPECT_TRUE(base::PathExists(dir.AppendASCII("foo.txt")));
+ EXPECT_TRUE(base::DirectoryExists(dir_foo));
+ EXPECT_TRUE(base::PathExists(dir_foo.AppendASCII("bar.txt")));
+ EXPECT_TRUE(base::DirectoryExists(dir_foo_bar));
+ EXPECT_TRUE(base::PathExists(dir_foo_bar.AppendASCII(".hidden")));
+ EXPECT_TRUE(base::PathExists(dir_foo_bar.AppendASCII("baz.txt")));
+ EXPECT_TRUE(base::PathExists(dir_foo_bar.AppendASCII("quux.txt")));
+}
+
+TEST_F(ZipTest, UnzipOnlyDirectories) {
+ auto dir_creator =
+ base::BindLambdaForTesting([this](const base::FilePath& entry_path) {
+ return base::CreateDirectory(test_dir_.Append(entry_path));
+ });
+
+ // Always return a null WriterDelegate.
+ auto writer =
+ base::BindLambdaForTesting([](const base::FilePath& entry_path) {
+ return std::unique_ptr<zip::WriterDelegate>();
+ });
base::File file(GetDataDirectory().AppendASCII("test.zip"),
base::File::Flags::FLAG_OPEN | base::File::Flags::FLAG_READ);
- ASSERT_TRUE(zip::Unzip(file.GetPlatformFile(), writer, dir_creator));
+ EXPECT_TRUE(zip::Unzip(file.GetPlatformFile(), writer, dir_creator,
+ {.continue_on_error = true}));
base::FilePath dir = test_dir_;
base::FilePath dir_foo = dir.AppendASCII("foo");
base::FilePath dir_foo_bar = dir_foo.AppendASCII("bar");
- ASSERT_TRUE(base::PathExists(dir.AppendASCII("foo.txt")));
- ASSERT_TRUE(base::PathExists(dir_foo));
- ASSERT_TRUE(base::PathExists(dir_foo.AppendASCII("bar.txt")));
- ASSERT_TRUE(base::PathExists(dir_foo_bar));
- ASSERT_TRUE(base::PathExists(dir_foo_bar.AppendASCII(".hidden")));
- ASSERT_TRUE(base::PathExists(dir_foo_bar.AppendASCII("baz.txt")));
- ASSERT_TRUE(base::PathExists(dir_foo_bar.AppendASCII("quux.txt")));
+ EXPECT_FALSE(base::PathExists(dir.AppendASCII("foo.txt")));
+ EXPECT_TRUE(base::DirectoryExists(dir_foo));
+ EXPECT_FALSE(base::PathExists(dir_foo.AppendASCII("bar.txt")));
+ EXPECT_TRUE(base::DirectoryExists(dir_foo_bar));
+ EXPECT_FALSE(base::PathExists(dir_foo_bar.AppendASCII(".hidden")));
+ EXPECT_FALSE(base::PathExists(dir_foo_bar.AppendASCII("baz.txt")));
+ EXPECT_FALSE(base::PathExists(dir_foo_bar.AppendASCII("quux.txt")));
}
// Tests that a ZIP archive containing SJIS-encoded file names can be correctly
@@ -566,7 +994,7 @@ TEST_F(ZipTest, UnzipSjisAsUtf8) {
std::string contents;
ASSERT_TRUE(base::ReadFileToString(
- dir.Append(base::FilePath::FromUTF8Unsafe("SJIS_835C_�\\.txt")),
+ dir.Append(base::FilePath::FromUTF8Unsafe("SJIS_835C_��.txt")),
&contents));
EXPECT_EQ(
"This file's name contains 0x5c (backslash) as the 2nd byte of Japanese "
diff --git a/deps/v8/third_party/zlib/gzguts.h b/deps/v8/third_party/zlib/gzguts.h
index 990a4d2514..6378d468a2 100644
--- a/deps/v8/third_party/zlib/gzguts.h
+++ b/deps/v8/third_party/zlib/gzguts.h
@@ -39,7 +39,7 @@
# include <io.h>
#endif
-#if defined(_WIN32) || defined(__CYGWIN__)
+#if defined(_WIN32)
# define WIDECHAR
#endif
diff --git a/deps/v8/third_party/zlib/gzlib.c b/deps/v8/third_party/zlib/gzlib.c
index 4105e6aff9..4838bf0474 100644
--- a/deps/v8/third_party/zlib/gzlib.c
+++ b/deps/v8/third_party/zlib/gzlib.c
@@ -5,7 +5,7 @@
#include "gzguts.h"
-#if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__)
+#if defined(_WIN32) && !defined(__BORLANDC__)
# define LSEEK _lseeki64
#else
#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0
@@ -397,7 +397,7 @@ z_off64_t ZEXPORT gzseek64(file, offset, whence)
/* if within raw area while reading, just go there */
if (state->mode == GZ_READ && state->how == COPY &&
state->x.pos + offset >= 0) {
- ret = LSEEK(state->fd, offset - state->x.have, SEEK_CUR);
+ ret = LSEEK(state->fd, offset - (z_off64_t)state->x.have, SEEK_CUR);
if (ret == -1)
return -1;
state->x.have = 0;
diff --git a/deps/v8/third_party/zlib/gzread.c b/deps/v8/third_party/zlib/gzread.c
index 832d3ef98c..85776cd257 100644
--- a/deps/v8/third_party/zlib/gzread.c
+++ b/deps/v8/third_party/zlib/gzread.c
@@ -314,9 +314,9 @@ local z_size_t gz_read(state, buf, len)
got = 0;
do {
/* set n to the maximum amount of len that fits in an unsigned int */
- n = -1;
+ n = (unsigned)-1;
if (n > len)
- n = len;
+ n = (unsigned)len;
/* first just try copying data from the output buffer */
if (state->x.have) {
@@ -397,7 +397,7 @@ int ZEXPORT gzread(file, buf, len)
}
/* read len or fewer bytes to buf */
- len = gz_read(state, buf, len);
+ len = (unsigned)gz_read(state, buf, len);
/* check for an error */
if (len == 0 && state->err != Z_OK && state->err != Z_BUF_ERROR)
@@ -451,7 +451,6 @@ z_size_t ZEXPORT gzfread(buf, size, nitems, file)
int ZEXPORT gzgetc(file)
gzFile file;
{
- int ret;
unsigned char buf[1];
gz_statep state;
@@ -473,8 +472,7 @@ int ZEXPORT gzgetc(file)
}
/* nothing there -- try gz_read() */
- ret = gz_read(state, buf, 1);
- return ret < 1 ? -1 : buf[0];
+ return gz_read(state, buf, 1) < 1 ? -1 : buf[0];
}
int ZEXPORT gzgetc_(file)
diff --git a/deps/v8/third_party/zlib/gzwrite.c b/deps/v8/third_party/zlib/gzwrite.c
index c7b5651d70..52381332ed 100644
--- a/deps/v8/third_party/zlib/gzwrite.c
+++ b/deps/v8/third_party/zlib/gzwrite.c
@@ -209,7 +209,7 @@ local z_size_t gz_write(state, buf, len)
state->in);
copy = state->size - have;
if (copy > len)
- copy = len;
+ copy = (unsigned)len;
memcpy(state->in + have, buf, copy);
state->strm.avail_in += copy;
state->x.pos += copy;
@@ -229,7 +229,7 @@ local z_size_t gz_write(state, buf, len)
do {
unsigned n = (unsigned)-1;
if (n > len)
- n = len;
+ n = (unsigned)len;
state->strm.avail_in = n;
state->x.pos += n;
if (gz_comp(state, Z_NO_FLUSH) == -1)
@@ -349,12 +349,11 @@ int ZEXPORT gzputc(file, c)
}
/* -- see zlib.h -- */
-int ZEXPORT gzputs(file, str)
+int ZEXPORT gzputs(file, s)
gzFile file;
- const char *str;
+ const char *s;
{
- int ret;
- z_size_t len;
+ z_size_t len, put;
gz_statep state;
/* get internal structure */
@@ -367,9 +366,13 @@ int ZEXPORT gzputs(file, str)
return -1;
/* write string */
- len = strlen(str);
- ret = gz_write(state, str, len);
- return ret == 0 && len != 0 ? -1 : ret;
+ len = strlen(s);
+ if ((int)len < 0 || (unsigned)len != len) {
+ gz_error(state, Z_STREAM_ERROR, "string length does not fit in int");
+ return -1;
+ }
+ put = gz_write(state, s, len);
+ return put < len ? -1 : (int)len;
}
#if defined(STDC) || defined(Z_HAVE_STDARG_H)
@@ -441,7 +444,7 @@ int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va)
strm->avail_in = state->size;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return state->err;
- memcpy(state->in, state->in + state->size, left);
+ memmove(state->in, state->in + state->size, left);
strm->next_in = state->in;
strm->avail_in = left;
}
@@ -540,7 +543,7 @@ int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
strm->avail_in = state->size;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return state->err;
- memcpy(state->in, state->in + state->size, left);
+ memmove(state->in, state->in + state->size, left);
strm->next_in = state->in;
strm->avail_in = left;
}
diff --git a/deps/v8/third_party/zlib/inffast.c b/deps/v8/third_party/zlib/inffast.c
index 2797e8a03c..d89ad5ccde 100644
--- a/deps/v8/third_party/zlib/inffast.c
+++ b/deps/v8/third_party/zlib/inffast.c
@@ -74,7 +74,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
code const FAR *dcode; /* local strm->distcode */
unsigned lmask; /* mask for first level of length codes */
unsigned dmask; /* mask for first level of distance codes */
- code here; /* retrieved table entry */
+ code const *here; /* retrieved table entry */
unsigned op; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
unsigned len; /* match length, unused bytes */
@@ -111,20 +111,20 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
- here = lcode[hold & lmask];
+ here = lcode + (hold & lmask);
dolen:
- op = (unsigned)(here.bits);
+ op = (unsigned)(here->bits);
hold >>= op;
bits -= op;
- op = (unsigned)(here.op);
+ op = (unsigned)(here->op);
if (op == 0) { /* literal */
- Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ?
"inflate: literal '%c'\n" :
- "inflate: literal 0x%02x\n", here.val));
- *out++ = (unsigned char)(here.val);
+ "inflate: literal 0x%02x\n", here->val));
+ *out++ = (unsigned char)(here->val);
}
else if (op & 16) { /* length base */
- len = (unsigned)(here.val);
+ len = (unsigned)(here->val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
@@ -142,14 +142,14 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
- here = dcode[hold & dmask];
+ here = dcode + (hold & dmask);
dodist:
- op = (unsigned)(here.bits);
+ op = (unsigned)(here->bits);
hold >>= op;
bits -= op;
- op = (unsigned)(here.op);
+ op = (unsigned)(here->op);
if (op & 16) { /* distance base */
- dist = (unsigned)(here.val);
+ dist = (unsigned)(here->val);
op &= 15; /* number of extra bits */
if (bits < op) {
hold += (unsigned long)(*in++) << bits;
@@ -268,7 +268,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
- here = dcode[here.val + (hold & ((1U << op) - 1))];
+ here = dcode + here->val + (hold & ((1U << op) - 1));
goto dodist;
}
else {
@@ -278,7 +278,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
}
}
else if ((op & 64) == 0) { /* 2nd level length code */
- here = lcode[here.val + (hold & ((1U << op) - 1))];
+ here = lcode + here->val + (hold & ((1U << op) - 1));
goto dolen;
}
else if (op & 32) { /* end-of-block */
diff --git a/deps/v8/third_party/zlib/inflate.c b/deps/v8/third_party/zlib/inflate.c
index 68902e81bd..7543c33def 100644
--- a/deps/v8/third_party/zlib/inflate.c
+++ b/deps/v8/third_party/zlib/inflate.c
@@ -130,6 +130,7 @@ z_streamp strm;
state->mode = HEAD;
state->last = 0;
state->havedict = 0;
+ state->flags = -1;
state->dmax = 32768U;
state->head = Z_NULL;
state->hold = 0;
@@ -671,7 +672,6 @@ int flush;
state->mode = FLAGS;
break;
}
- state->flags = 0; /* expect zlib header */
if (state->head != Z_NULL)
state->head->done = -1;
if (!(state->wrap & 1) || /* check if zlib header allowed */
@@ -698,6 +698,7 @@ int flush;
break;
}
state->dmax = 1U << len;
+ state->flags = 0; /* indicate zlib header */
Tracev((stderr, "inflate: zlib header ok\n"));
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
@@ -1223,7 +1224,7 @@ int flush;
case LENGTH:
if (state->wrap && state->flags) {
NEEDBITS(32);
- if (hold != (state->total & 0xffffffffUL)) {
+ if ((state->wrap & 4) && hold != (state->total & 0xffffffff)) {
strm->msg = (char *)"incorrect length check";
state->mode = BAD;
break;
@@ -1403,6 +1404,7 @@ int ZEXPORT inflateSync(strm)
z_streamp strm;
{
unsigned len; /* number of bytes to look at or looked at */
+ int flags; /* temporary to save header status */
unsigned long in, out; /* temporary to save total_in and total_out */
unsigned char buf[4]; /* to restore bit buffer to byte string */
struct inflate_state FAR *state;
@@ -1435,9 +1437,15 @@ z_streamp strm;
/* return no joy or set up to restart inflate() on a new block */
if (state->have != 4) return Z_DATA_ERROR;
+ if (state->flags == -1)
+ state->wrap = 0; /* if no header yet, treat as raw */
+ else
+ state->wrap &= ~4; /* no point in computing a check value now */
+ flags = state->flags;
in = strm->total_in; out = strm->total_out;
inflateReset(strm);
strm->total_in = in; strm->total_out = out;
+ state->flags = flags;
state->mode = TYPE;
return Z_OK;
}
@@ -1533,7 +1541,7 @@ int check;
if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
- if (check)
+ if (check && state->wrap)
state->wrap |= 4;
else
state->wrap &= ~4;
diff --git a/deps/v8/third_party/zlib/inflate.h b/deps/v8/third_party/zlib/inflate.h
index a46cce6b6d..98679fa9bc 100644
--- a/deps/v8/third_party/zlib/inflate.h
+++ b/deps/v8/third_party/zlib/inflate.h
@@ -86,7 +86,8 @@ struct inflate_state {
int wrap; /* bit 0 true for zlib, bit 1 true for gzip,
bit 2 true to validate check value */
int havedict; /* true if dictionary provided */
- int flags; /* gzip header method and flags (0 if zlib) */
+ int flags; /* gzip header method and flags, 0 if zlib, or
+ -1 if raw or no header yet */
unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */
unsigned long check; /* protected copy of check value */
unsigned long total; /* protected copy of output count */
diff --git a/deps/v8/third_party/zlib/patches/0009-infcover-oob.patch b/deps/v8/third_party/zlib/patches/0009-infcover-oob.patch
new file mode 100644
index 0000000000..648360f332
--- /dev/null
+++ b/deps/v8/third_party/zlib/patches/0009-infcover-oob.patch
@@ -0,0 +1,24 @@
+From 75690b2683667be5535ac6243438115dc9c40f6a Mon Sep 17 00:00:00 2001
+From: Florian Mayer <fmayer@google.com>
+Date: Wed, 16 Mar 2022 16:38:36 -0700
+Subject: [PATCH] Fix out of bounds in infcover.c.
+
+---
+ test/infcover.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/test/infcover.c b/test/infcover.c
+index 2be01646c..a6d83693c 100644
+--- a/test/infcover.c
++++ b/test/infcover.c
+@@ -373,7 +373,9 @@ local void cover_support(void)
+ mem_setup(&strm);
+ strm.avail_in = 0;
+ strm.next_in = Z_NULL;
+- ret = inflateInit_(&strm, ZLIB_VERSION - 1, (int)sizeof(z_stream));
++ char versioncpy[] = ZLIB_VERSION;
++ versioncpy[0] -= 1;
++ ret = inflateInit_(&strm, versioncpy, (int)sizeof(z_stream));
+ assert(ret == Z_VERSION_ERROR);
+ mem_done(&strm, "wrong version");
+
diff --git a/deps/v8/third_party/zlib/trees.c b/deps/v8/third_party/zlib/trees.c
index 5f89d056ef..decaeb7c3c 100644
--- a/deps/v8/third_party/zlib/trees.c
+++ b/deps/v8/third_party/zlib/trees.c
@@ -149,7 +149,7 @@ local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
local void compress_block OF((deflate_state *s, const ct_data *ltree,
const ct_data *dtree));
local int detect_data_type OF((deflate_state *s));
-local unsigned bi_reverse OF((unsigned value, int length));
+local unsigned bi_reverse OF((unsigned code, int len));
local void bi_windup OF((deflate_state *s));
local void bi_flush OF((deflate_state *s));
@@ -870,7 +870,8 @@ void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last)
bi_windup(s); /* align on byte boundary */
put_short(s, (ush)stored_len);
put_short(s, (ush)~stored_len);
- zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len);
+ if (stored_len)
+ zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len);
s->pending += stored_len;
#ifdef ZLIB_DEBUG
s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
diff --git a/deps/v8/third_party/zlib/zlib.h b/deps/v8/third_party/zlib/zlib.h
index 99fd467f6b..589f865eec 100644
--- a/deps/v8/third_party/zlib/zlib.h
+++ b/deps/v8/third_party/zlib/zlib.h
@@ -543,8 +543,7 @@ ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
int strategy));
This is another version of deflateInit with more compression options. The
- fields next_in, zalloc, zfree and opaque must be initialized before by the
- caller.
+ fields zalloc, zfree and opaque must be initialized before by the caller.
The method parameter is the compression method. It must be Z_DEFLATED in
this version of the library.
@@ -712,11 +711,12 @@ ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm,
used to switch between compression and straight copy of the input data, or
to switch to a different kind of input data requiring a different strategy.
If the compression approach (which is a function of the level) or the
- strategy is changed, and if any input has been consumed in a previous
- deflate() call, then the input available so far is compressed with the old
- level and strategy using deflate(strm, Z_BLOCK). There are three approaches
- for the compression levels 0, 1..3, and 4..9 respectively. The new level
- and strategy will take effect at the next call of deflate().
+ strategy is changed, and if there have been any deflate() calls since the
+ state was initialized or reset, then the input available so far is
+ compressed with the old level and strategy using deflate(strm, Z_BLOCK).
+ There are three approaches for the compression levels 0, 1..3, and 4..9
+ respectively. The new level and strategy will take effect at the next call
+ of deflate().
If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does
not have enough output space to complete, then the parameter change will not
@@ -865,9 +865,11 @@ ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
detection, or add 16 to decode only the gzip format (the zlib format will
return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a
CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see
- below), inflate() will not automatically decode concatenated gzip streams.
- inflate() will return Z_STREAM_END at the end of the gzip stream. The state
- would need to be reset to continue decoding a subsequent gzip stream.
+ below), inflate() will *not* automatically decode concatenated gzip members.
+ inflate() will return Z_STREAM_END at the end of the gzip member. The state
+ would need to be reset to continue decoding a subsequent gzip member. This
+ *must* be done if there is more data after a gzip member, in order for the
+ decompression to be compliant with the gzip standard (RFC 1952).
inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
@@ -1739,7 +1741,7 @@ ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
if (crc != original_crc) error();
*/
-ZEXTERN uLong ZEXPORT crc32_z OF((uLong adler, const Bytef *buf,
+ZEXTERN uLong ZEXPORT crc32_z OF((uLong crc, const Bytef *buf,
z_size_t len));
/*
Same as crc32(), but with a size_t length.
@@ -1916,7 +1918,7 @@ ZEXTERN int ZEXPORT inflateValidate OF((z_streamp, int));
ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF ((z_streamp));
ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp));
ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp));
-#if (defined(_WIN32) || defined(__CYGWIN__)) && !defined(Z_SOLO)
+#if defined(_WIN32) && !defined(Z_SOLO)
ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path,
const char *mode));
#endif
diff --git a/deps/v8/third_party/zlib/zutil.c b/deps/v8/third_party/zlib/zutil.c
index a76c6b0c7e..dcab28a0d5 100644
--- a/deps/v8/third_party/zlib/zutil.c
+++ b/deps/v8/third_party/zlib/zutil.c
@@ -136,8 +136,8 @@ const char * ZEXPORT zError(err)
return ERR_MSG(err);
}
-#if defined(_WIN32_WCE)
- /* The Microsoft C Run-Time Library for Windows CE doesn't have
+#if defined(_WIN32_WCE) && _WIN32_WCE < 0x800
+ /* The older Microsoft C Run-Time Library for Windows CE doesn't have
* errno. We define it as a global variable to simplify porting.
* Its value is always 0 and should not be used.
*/
diff --git a/deps/v8/third_party/zlib/zutil.h b/deps/v8/third_party/zlib/zutil.h
index 4425bcf75e..ec1993f3f7 100644
--- a/deps/v8/third_party/zlib/zutil.h
+++ b/deps/v8/third_party/zlib/zutil.h
@@ -44,10 +44,6 @@
# endif
#endif
-#ifdef Z_SOLO
- typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */
-#endif
-
#ifndef local
# define local static
#endif
@@ -185,10 +181,6 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX
# if defined(_WIN32_WCE)
# define fdopen(fd,mode) NULL /* No fdopen() */
-# ifndef _PTRDIFF_T_DEFINED
- typedef int ptrdiff_t;
-# define _PTRDIFF_T_DEFINED
-# endif
# else
# define fdopen(fd,type) _fdopen(fd,type)
# endif
diff --git a/deps/v8/tools/PRESUBMIT.py b/deps/v8/tools/PRESUBMIT.py
index b9fa8238e6..ded0016793 100644
--- a/deps/v8/tools/PRESUBMIT.py
+++ b/deps/v8/tools/PRESUBMIT.py
@@ -9,5 +9,6 @@ USE_PYTHON3 = True
def CheckChangeOnCommit(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, 'unittests', files_to_check=[r'.+_test\.py$'])
+ input_api, output_api, 'unittests', files_to_check=[r'.+_test\.py$'],
+ run_on_python2=False)
return input_api.RunTests(tests)
diff --git a/deps/v8/tools/chrome/linux-perf-renderer-cmd.sh b/deps/v8/tools/chrome/linux-perf-renderer-cmd.sh
new file mode 100755
index 0000000000..4fe4e516bd
--- /dev/null
+++ b/deps/v8/tools/chrome/linux-perf-renderer-cmd.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+PERF_DATA_DIR="."
+PERF_DATA_PREFIX="chrome_renderer"
+RENDERER_ID="0"
+for i in "$@"; do
+ case $i in
+ --help)
+ echo "Usage: path/to/chrome --renderer-cmd-prefix='$0 [OPTION]' [CHROME OPTIONS]"
+ echo "This script is mostly used in conjuction with linux_perf.py to run linux-perf"
+ echo "for each renderer process."
+ echo "It generates perf.data files that can be read by pprof or linux-perf."
+ echo ""
+ echo 'File naming: ${OUT_DIR}/${PREFIX}_${PARENT_PID}_${RENDERER_ID}.perf.data'
+ echo ""
+ echo "Options:"
+ echo " --perf-data-dir=OUT_DIR Change the location where perf.data is written."
+ echo " Default: '$PERF_DATA_DIR'"
+ echo " --perf-data-prefix=PREFIX Set a custom prefex for all generated perf.data files."
+ echo " Default: '$PERF_DATA_PREFIX'"
+ exit
+ ;;
+ --perf-data-dir=*)
+ PERF_DATA_DIR="${i#*=}"
+ shift
+ ;;
+ --perf-data-prefix=*)
+ PERF_DATA_PREFIX="${i#*=}"
+ shift
+ ;;
+ --renderer-client-id=*)
+ # Don't shift this option since it is passed in (and used by) chrome.
+ RENDERER_ID="${i#*=}"
+ ;;
+ *)
+ ;;
+ esac
+done
+
+
+PERF_OUTPUT="$PERF_DATA_DIR/${PERF_DATA_PREFIX}_${PPID}_${RENDERER_ID}.perf.data"
+perf record --call-graph=fp --clockid=mono --freq=max --output="${PERF_OUTPUT}" -- $@
diff --git a/deps/v8/tools/chrome/linux_perf.py b/deps/v8/tools/chrome/linux_perf.py
new file mode 100755
index 0000000000..91d3085724
--- /dev/null
+++ b/deps/v8/tools/chrome/linux_perf.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python3
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+from pathlib import Path
+from re import A
+import os
+import shlex
+from signal import SIGQUIT
+import subprocess
+import signal
+import tempfile
+import time
+import psutil
+import multiprocessing
+
+from unittest import result
+
+renderer_cmd_file = Path(__file__).parent / 'linux-perf-renderer-cmd.sh'
+assert renderer_cmd_file.is_file()
+renderer_cmd_prefix = f"{renderer_cmd_file} --perf-data-prefix=chrome_renderer"
+
+# ==============================================================================
+
+usage = """Usage: %prog $CHROME_BIN [OPTION]... -- [CHROME_OPTION]... [URL]
+
+This script runs linux-perf on all render process with custom V8 logging to get
+support to resolve JS function names.
+
+The perf data is written to OUT_DIR separate by renderer process.
+
+See http://v8.dev//linux-perf for more detailed instructions.
+"""
+parser = optparse.OptionParser(usage=usage)
+parser.add_option(
+ '--perf-data-dir',
+ default=None,
+ metavar="OUT_DIR",
+ help="Output directory for linux perf profile files")
+parser.add_option(
+ "--profile-browser-process",
+ action="store_true",
+ default=False,
+ help="Also start linux-perf for the browser process. "
+ "By default only renderer processes are sampled. "
+ "Outputs 'browser_*.perf.data' in the CDW")
+parser.add_option("--timeout", type=int, help="Stop chrome after N seconds")
+
+chrome_options = optparse.OptionGroup(
+ parser, "Chrome-forwarded Options",
+ "These convenience for a better script experience that are forward directly"
+ "to chrome. Any other chrome option can be passed after the '--' arguments"
+ "separator.")
+chrome_options.add_option("--user-data-dir", dest="user_data_dir", default=None)
+chrome_options.add_option("--js-flags", dest="js_flags")
+chrome_options.add_option(
+ "--renderer-cmd-prefix",
+ default=None,
+ help=f"Set command prefix, used for each new chrome renderer process."
+ "Default: {renderer_cmd_prefix}")
+FEATURES_DOC = "See chrome's base/feature_list.h source file for more dertails"
+chrome_options.add_option(
+ "--enable-features",
+ help="Comma-separated list of enabled chrome features. " + FEATURES_DOC)
+chrome_options.add_option(
+ "--disable-features",
+ help="Command-separated list of disabled chrome features. " + FEATURES_DOC)
+parser.add_option_group(chrome_options)
+
+
+# ==============================================================================
+def log(*args):
+ print("")
+ print("=" * 80)
+ print(*args)
+ print("=" * 80)
+
+
+# ==============================================================================
+
+(options, args) = parser.parse_args()
+
+if len(args) == 0:
+ parser.error("No chrome binary provided")
+
+chrome_bin = Path(args.pop(0))
+if not chrome_bin.exists():
+ parser.error(f"Chrome '{chrome_bin}' does not exist")
+
+if options.renderer_cmd_prefix is not None:
+ if options.perf_data_dir is not None:
+ parser.error("Cannot specify --perf-data-dir "
+ "if a custom --renderer-cmd-prefix is provided")
+else:
+ options.renderer_cmd_prefix = str(renderer_cmd_file)
+
+if options.perf_data_dir is None:
+ options.perf_data_dir = Path.cwd()
+else:
+ options.perf_data_dir = Path(options.perf_data_dir).absolute()
+
+if not options.perf_data_dir.is_dir():
+ parser.error(f"--perf-data-dir={options.perf_data_dir} "
+ "is not an directory or does not exist.")
+
+if options.timeout and options.timeout < 2:
+ parser.error("--timeout should be more than 2 seconds")
+
+# ==============================================================================
+old_cwd = Path.cwd()
+os.chdir(options.perf_data_dir)
+
+# ==============================================================================
+JS_FLAGS_PERF = ("--perf-prof --no-write-protect-code-memory "
+ "--interpreted-frames-native-stack")
+
+with tempfile.TemporaryDirectory(prefix="chrome-") as tmp_dir_path:
+ tempdir = Path(tmp_dir_path)
+ cmd = [
+ str(chrome_bin),
+ ]
+ if options.user_data_dir is None:
+ cmd.append(f"--user-data-dir={tempdir}")
+ cmd += [
+ "--no-sandbox", "--incognito", "--enable-benchmarking", "--no-first-run",
+ "--no-default-browser-check",
+ f"--renderer-cmd-prefix={options.renderer_cmd_prefix}",
+ f"--js-flags={JS_FLAGS_PERF}"
+ ]
+ if options.js_flags:
+ cmd += [f"--js-flags={options.js_flags}"]
+ if options.enable_features:
+ cmd += [f"--enable-features={options.enable_features}"]
+ if options.disable_features:
+ cmd += [f"--disable-features={options.disable_features}"]
+ cmd += args
+ log("CHROME CMD: ", shlex.join(cmd))
+
+ if options.profile_browser_process:
+ perf_data_file = f"{tempdir.name}_browser.perf.data"
+ perf_cmd = [
+ "perf", "record", "--call-graph=fp", "--freq=max", "--clockid=mono",
+ f"--output={perf_data_file}", "--"
+ ]
+ cmd = perf_cmd + cmd
+ log("LINUX PERF CMD: ", shlex.join(cmd))
+
+ if options.timeout is None:
+ subprocess.run(cmd)
+ else:
+ process = subprocess.Popen(cmd)
+ time.sleep(options.timeout)
+ log(f"QUITING chrome child processes after {options.timeout}s timeout")
+ current_process = psutil.Process()
+ children = current_process.children(recursive=True)
+ for child in children:
+ if "chrome" in child.name() or "content_shell" in child.name():
+ print(f" quitting PID={child.pid}")
+ child.send_signal(signal.SIGQUIT)
+ # Wait for linux-perf to write out files
+ time.sleep(1)
+ process.send_signal(signal.SIGQUIT)
+ process.wait()
+
+# ==============================================================================
+log("PARALLEL POST PROCESSING: Injecting JS symbols")
+
+
+def inject_v8_symbols(perf_dat_file):
+ output_file = perf_dat_file.with_suffix(".data.jitted")
+ cmd = [
+ "perf", "inject", "--jit", f"--input={perf_dat_file}",
+ f"--output={output_file}"
+ ]
+ try:
+ subprocess.run(cmd)
+ print(f"Processed: {output_file}")
+ except:
+ print(shlex.join(cmd))
+ return None
+ return output_file
+
+
+results = []
+with multiprocessing.Pool() as pool:
+ results = list(
+ pool.imap_unordered(inject_v8_symbols,
+ options.perf_data_dir.glob("*perf.data")))
+
+results = list(filter(lambda x: x is not None, results))
+if len(results) == 0:
+ print("No perf files were successfully processed"
+ " Check for errors or partial results in '{options.perf_data_dir}'")
+ exit(1)
+log(f"RESULTS in '{options.perf_data_dir}'")
+results.sort(key=lambda x: x.stat().st_size)
+BYTES_TO_MIB = 1 / 1024 / 1024
+for output_file in reversed(results):
+ print(
+ f"{output_file.name:67}{(output_file.stat().st_size*BYTES_TO_MIB):10.2f}MiB"
+ )
+
+log("PPROF EXAMPLE")
+path_strings = map(lambda f: str(f.relative_to(old_cwd)), results)
+print(f"pprof -flame { ' '.join(path_strings)}")
diff --git a/deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py b/deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py
index 59bf2ee557..cc94c5146d 100644
--- a/deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/PRESUBMIT.py
@@ -10,8 +10,10 @@ USE_PYTHON3 = True
def _RunTests(input_api, output_api):
- return input_api.RunTests(input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, '.', files_to_check=['v8_foozzie_test.py$']))
+ return input_api.RunTests(
+ input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '.', files_to_check=[r'.+_test\.py$']))
+
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
diff --git a/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
index 656bc89ed3..485d1d601a 100755
--- a/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py
index a8ba74364b..fe149620f9 100755
--- a/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py b/deps/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py
index b1f892c64a..e046d9f43e 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,6 +7,9 @@
"""
Launcher for the foozzie differential-fuzzing harness. Wraps foozzie
with Python2 for backwards-compatibility when bisecting.
+
+Obsolete now after switching to Python3 entirely. We keep the launcher
+for a transition period.
"""
import os
@@ -22,6 +25,7 @@ if __name__ == '__main__':
args = sys.argv[2:]
else:
args = sys.argv[1:]
- process = subprocess.Popen(['python2'] + args)
+ process = subprocess.Popen(['python3'] + args)
+ process = subprocess.Popen(args)
process.communicate()
sys.exit(process.returncode)
diff --git a/deps/v8/tools/csvparser.mjs b/deps/v8/tools/csvparser.mjs
index 273bf89776..2f94d79106 100644
--- a/deps/v8/tools/csvparser.mjs
+++ b/deps/v8/tools/csvparser.mjs
@@ -38,17 +38,17 @@ export class CsvParser {
escapeField(string) {
let nextPos = string.indexOf("\\");
if (nextPos === -1) return string;
- let result = string.substring(0, nextPos);
+ let result = [string.substring(0, nextPos)];
// Escape sequences of the form \x00 and \u0000;
let pos = 0;
while (nextPos !== -1) {
- const escapeIdentifier = string.charAt(nextPos + 1);
+ const escapeIdentifier = string[nextPos + 1];
pos = nextPos + 2;
if (escapeIdentifier === 'n') {
- result += '\n';
+ result.push('\n');
nextPos = pos;
} else if (escapeIdentifier === '\\') {
- result += '\\';
+ result.push('\\');
nextPos = pos;
} else {
if (escapeIdentifier === 'x') {
@@ -61,9 +61,9 @@ export class CsvParser {
// Convert the selected escape sequence to a single character.
const escapeChars = string.substring(pos, nextPos);
if (escapeChars === '2C') {
- result += ',';
+ result.push(',');
} else {
- result += String.fromCharCode(parseInt(escapeChars, 16));
+ result.push(String.fromCharCode(parseInt(escapeChars, 16)));
}
}
@@ -72,13 +72,13 @@ export class CsvParser {
nextPos = string.indexOf("\\", pos);
// If there are no more escape sequences consume the rest of the string.
if (nextPos === -1) {
- result += string.substr(pos);
+ result.push(string.substr(pos));
break;
} else if (pos !== nextPos) {
- result += string.substring(pos, nextPos);
+ result.push(string.substring(pos, nextPos));
}
}
- return result;
+ return result.join('');
}
/**
diff --git a/deps/v8/tools/debug_helper/gen-heap-constants.py b/deps/v8/tools/debug_helper/gen-heap-constants.py
index 0a7907b020..3ea5be6821 100644
--- a/deps/v8/tools/debug_helper/gen-heap-constants.py
+++ b/deps/v8/tools/debug_helper/gen-heap-constants.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/disasm.py b/deps/v8/tools/disasm.py
index a91d0dbff4..c21e7f9853 100644
--- a/deps/v8/tools/disasm.py
+++ b/deps/v8/tools/disasm.py
@@ -81,7 +81,7 @@ def GetDisasmLines(filename, offset, size, arch, inplace, arch_flags=""):
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = process.communicate()
- lines = out.split("\n")
+ lines = out.decode('utf-8').split("\n")
header_line = 0
for i, line in enumerate(lines):
if _DISASM_HEADER_RE.match(line):
diff --git a/deps/v8/tools/dumpcpp.mjs b/deps/v8/tools/dumpcpp.mjs
index e92ee9ab5a..d271c8c25a 100644
--- a/deps/v8/tools/dumpcpp.mjs
+++ b/deps/v8/tools/dumpcpp.mjs
@@ -12,13 +12,13 @@ export {
export class CppProcessor extends LogReader {
constructor(cppEntriesProvider, timedRange, pairwiseTimedRange) {
- super({}, timedRange, pairwiseTimedRange);
- this.dispatchTable_ = {
+ super(timedRange, pairwiseTimedRange);
+ this.setDispatchTable({
__proto__: null,
'shared-library': {
parsers: [parseString, parseInt, parseInt, parseInt],
processor: this.processSharedLibrary }
- };
+ });
this.cppEntriesProvider_ = cppEntriesProvider;
this.codeMap_ = new CodeMap();
this.lastLogFileName_ = null;
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index 3d0841913f..9354f24ff8 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -15,7 +15,7 @@ group("v8_gcmole_files") {
"gcmole-test.cc",
"gcmole-tools/",
"run-gcmole.py",
- "suspects.whitelist",
+ "suspects.allowlist",
"ignored_files",
"test-expectations.txt",
diff --git a/deps/v8/tools/gcmole/gcmole.cc b/deps/v8/tools/gcmole/gcmole.cc
index 9881f4c5b1..74208b27ee 100644
--- a/deps/v8/tools/gcmole/gcmole.cc
+++ b/deps/v8/tools/gcmole/gcmole.cc
@@ -387,9 +387,6 @@ static void LoadSuspectsAllowList() {
// TODO(cbruni): clean up once fully migrated
std::ifstream fin("tools/gcmole/suspects.allowlist");
- if (!fin.is_open()) {
- fin = std::ifstream("tools/gcmole/suspects.whitelist");
- }
std::string s;
while (fin >> s) suspects_allowlist.insert(s);
diff --git a/deps/v8/tools/gcmole/gcmole.py b/deps/v8/tools/gcmole/gcmole.py
index a77c57355d..df1d33c3cf 100755
--- a/deps/v8/tools/gcmole/gcmole.py
+++ b/deps/v8/tools/gcmole/gcmole.py
@@ -6,10 +6,8 @@
# This is main driver for gcmole tool. See README for more details.
# Usage: CLANG_BIN=clang-bin-dir python tools/gcmole/gcmole.py [arm|arm64|ia32|x64]
-# for py2/py3 compatibility
-from __future__ import print_function
-
from multiprocessing import cpu_count
+from pathlib import Path
import collections
import difflib
@@ -20,62 +18,7 @@ import re
import subprocess
import sys
import threading
-
-if sys.version_info.major > 2:
- from pathlib import Path
- import queue
-else:
- import Queue as queue
- default_open = open
-
- def open(path, *args, **kwargs):
- return default_open(str(path), *args, **kwargs)
-
- class Path(object):
-
- def __init__(self, path, *args):
- if args:
- self._path = os.path.join(str(path), *args)
- else:
- self._path = str(path)
-
- def __div__(self, other):
- return Path(self._path, str(other))
-
- def __str__(self):
- return self._path
-
- def resolve(self):
- return Path(os.path.abspath(self._path))
-
- @property
- def parent(self):
- return Path(os.path.dirname(self._path))
-
- @property
- def parents(self):
- current = self
- parents = []
- while current._path != "" and current._path != "/":
- current = current.parent
- parents.append(current)
- return parents
-
- def is_file(self):
- return os.path.isfile(self._path)
-
- def is_dir(self):
- return os.path.isdir(self._path)
-
- def exists(self):
- return os.path.exists(self._path)
-
- def mkdir(self, parents=False, exist_ok=False):
- if parents and not self.parent.exists():
- self.parent.mkdir(parents=True, exist_ok=True)
- if exist_ok and self.exists():
- return
- os.mkdir(self._path)
+import queue
ArchCfg = collections.namedtuple(
diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py
index cfcb2dd410..b3b0b68d9d 100755
--- a/deps/v8/tools/gcmole/run-gcmole.py
+++ b/deps/v8/tools/gcmole/run-gcmole.py
@@ -1,11 +1,8 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import os
import os.path
import signal
diff --git a/deps/v8/tools/gcmole/suspects.whitelist b/deps/v8/tools/gcmole/suspects.allowlist
index 1ac855f2f7..1ac855f2f7 100644
--- a/deps/v8/tools/gcmole/suspects.whitelist
+++ b/deps/v8/tools/gcmole/suspects.allowlist
diff --git a/deps/v8/tools/get_landmines.py b/deps/v8/tools/get_landmines.py
index bf8efa595e..a2ace649f6 100755
--- a/deps/v8/tools/get_landmines.py
+++ b/deps/v8/tools/get_landmines.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,9 +8,6 @@ This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
-# for py2/py3 compatibility
-from __future__ import print_function
-
import os
import sys
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 1d73b43ace..1e10b36f8b 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
@@ -30,12 +30,9 @@
# flake8: noqa # https://bugs.chromium.org/p/v8/issues/detail?id=8784
-# for py2/py3 compatibility
-from __future__ import print_function
-
-import BaseHTTPServer
+import http.server as http_server
import bisect
-import cgi
+import html
import cmd
import codecs
import ctypes
@@ -46,11 +43,10 @@ import mmap
import optparse
import os
import re
-import StringIO
+import io
import sys
import types
-import urllib
-import urlparse
+import urllib.parse
import v8heapconst
import webbrowser
@@ -609,12 +605,14 @@ class FuncSymbol:
def Covers(self, addr):
return (self.start <= addr) and (addr < self.end)
+
class MinidumpReader(object):
"""Minidump (.dmp) reader."""
_HEADER_MAGIC = 0x504d444d
def __init__(self, options, minidump_name):
+ self._reset()
self.minidump_name = minidump_name
if sys.platform == 'win32':
self.minidump_file = open(minidump_name, "a+")
@@ -626,11 +624,19 @@ class MinidumpReader(object):
if self.header.signature != MinidumpReader._HEADER_MAGIC:
print("Warning: Unsupported minidump header magic!", file=sys.stderr)
DebugPrint(self.header)
- directories = []
offset = self.header.stream_directories_rva
+ directories = []
for _ in range(self.header.stream_count):
directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset))
offset += MINIDUMP_DIRECTORY.size
+
+ self.symdir = options.symdir
+ self._ReadArchitecture(directories)
+ self._ReadDirectories(directories)
+ self._FindObjdump(options)
+
+ def _reset(self):
+ self.header = None
self.arch = None
self.exception = None
self.exception_context = None
@@ -639,13 +645,9 @@ class MinidumpReader(object):
self.module_list = None
self.thread_map = {}
- self.symdir = options.symdir
self.modules_with_symbols = []
self.symbols = []
- self._ReadArchitecture(directories)
- self._ReadDirectories(directories)
- self._FindObjdump(options)
def _ReadArchitecture(self, directories):
# Find MDRawSystemInfo stream and determine arch.
@@ -739,7 +741,7 @@ class MinidumpReader(object):
return None
print(("# Looking for platform specific (%s) objdump in "
"third_party directory.") % platform_filter)
- objdumps = filter(lambda file: platform_filter in file >= 0, objdumps)
+ objdumps = list(filter(lambda file: platform_filter in file >= 0, objdumps))
if len(objdumps) == 0:
print("# Could not find platform specific objdump in third_party.")
print("# Make sure you installed the correct SDK.")
@@ -838,7 +840,7 @@ class MinidumpReader(object):
def ReadAsciiPtr(self, address):
ascii_content = [
- c if c >= '\x20' and c < '\x7f' else '.'
+ chr(c) if c >= 0x20 and c < 0x7f else '.'
for c in self.ReadBytes(address, self.MachinePointerSize())
]
return ''.join(ascii_content)
@@ -994,6 +996,7 @@ class MinidumpReader(object):
def Dispose(self):
+ self._reset()
self.minidump.close()
self.minidump_file.close()
@@ -2306,7 +2309,7 @@ class InspectionPadawan(object):
count += 1
if count <= 5 or len(possible_context) == 0: return
# Find entry with highest count
- possible_context = possible_context.items()
+ possible_context = list(possible_context.items())
possible_context.sort(key=lambda pair: pair[1])
address,count = possible_context[-1]
if count <= 4: return
@@ -2615,11 +2618,11 @@ WEB_FOOTER = """
class WebParameterError(Exception):
- def __init__(self, message):
- Exception.__init__(self, message)
+ pass
-class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+class InspectionWebHandler(http_server.BaseHTTPRequestHandler):
+
def formatter(self, query_components):
name = query_components.get("dump", [None])[0]
return self.server.get_dump_formatter(name)
@@ -2633,40 +2636,39 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
self.end_headers()
return
+ def write(self, string):
+ self.wfile.write(string.encode('utf-8'))
+
def do_GET(self):
try:
- parsedurl = urlparse.urlparse(self.path)
- query_components = urlparse.parse_qs(parsedurl.query)
+ parsedurl = urllib.parse.urlparse(self.path)
+ query_components = urllib.parse.parse_qs(parsedurl.query)
+ out_buffer = io.StringIO()
if parsedurl.path == "/dumps.html":
self.send_success_html_headers()
- out_buffer = StringIO.StringIO()
self.server.output_dumps(out_buffer)
- self.wfile.write(out_buffer.getvalue())
+ self.write(out_buffer.getvalue())
elif parsedurl.path == "/summary.html":
self.send_success_html_headers()
- out_buffer = StringIO.StringIO()
self.formatter(query_components).output_summary(out_buffer)
- self.wfile.write(out_buffer.getvalue())
+ self.write(out_buffer.getvalue())
elif parsedurl.path == "/info.html":
self.send_success_html_headers()
- out_buffer = StringIO.StringIO()
self.formatter(query_components).output_info(out_buffer)
- self.wfile.write(out_buffer.getvalue())
+ self.write(out_buffer.getvalue())
elif parsedurl.path == "/modules.html":
self.send_success_html_headers()
- out_buffer = StringIO.StringIO()
self.formatter(query_components).output_modules(out_buffer)
- self.wfile.write(out_buffer.getvalue())
+ self.write(out_buffer.getvalue())
elif parsedurl.path == "/search.html" or parsedurl.path == "/s":
address = query_components.get("val", [])
if len(address) != 1:
self.send_error(404, "Invalid params")
return
self.send_success_html_headers()
- out_buffer = StringIO.StringIO()
self.formatter(query_components).output_search_res(
out_buffer, address[0])
- self.wfile.write(out_buffer.getvalue())
+ self.write(out_buffer.getvalue())
elif parsedurl.path == "/disasm.html":
address = query_components.get("val", [])
exact = query_components.get("exact", ["on"])
@@ -2674,19 +2676,17 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
self.send_error(404, "Invalid params")
return
self.send_success_html_headers()
- out_buffer = StringIO.StringIO()
self.formatter(query_components).output_disasm(
out_buffer, address[0], exact[0])
- self.wfile.write(out_buffer.getvalue())
+ self.write(out_buffer.getvalue())
elif parsedurl.path == "/data.html":
address = query_components.get("val", [])
datakind = query_components.get("type", ["address"])
if len(address) == 1 and len(datakind) == 1:
self.send_success_html_headers()
- out_buffer = StringIO.StringIO()
self.formatter(query_components).output_data(
out_buffer, address[0], datakind[0])
- self.wfile.write(out_buffer.getvalue())
+ self.write(out_buffer.getvalue())
else:
self.send_error(404,'Invalid params')
elif parsedurl.path == "/setdumpdesc":
@@ -2697,7 +2697,7 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
description = description[0]
if self.server.set_dump_desc(name, description):
self.send_success_html_headers()
- self.wfile.write("OK")
+ self.write("OK")
return
self.send_error(404,'Invalid params')
elif parsedurl.path == "/setcomment":
@@ -2708,7 +2708,7 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
comment = comment[0]
self.formatter(query_components).set_comment(address, comment)
self.send_success_html_headers()
- self.wfile.write("OK")
+ self.write("OK")
else:
self.send_error(404,'Invalid params')
elif parsedurl.path == "/setpageaddress":
@@ -2719,7 +2719,7 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
address = address[0]
self.formatter(query_components).set_page_address(kind, address)
self.send_success_html_headers()
- self.wfile.write("OK")
+ self.write("OK")
else:
self.send_error(404,'Invalid params')
else:
@@ -2741,7 +2741,7 @@ class InspectionWebFormatter(object):
def __init__(self, switches, minidump_name, http_server):
self.dumpfilename = os.path.split(minidump_name)[1]
- self.encfilename = urllib.urlencode({ 'dump' : self.dumpfilename })
+ self.encfilename = urllib.parse.urlencode({'dump': self.dumpfilename})
self.reader = MinidumpReader(switches, minidump_name)
self.server = http_server
@@ -2811,9 +2811,10 @@ class InspectionWebFormatter(object):
(style_class, self.encfilename, struncompressed, straddress))
def output_header(self, f):
- f.write(WEB_HEADER %
- { "query_dump" : self.encfilename,
- "dump_name" : cgi.escape(self.dumpfilename) })
+ f.write(WEB_HEADER % {
+ "query_dump": self.encfilename,
+ "dump_name": html.escape(self.dumpfilename)
+ })
def output_footer(self, f):
f.write(WEB_FOOTER)
@@ -2943,7 +2944,7 @@ class InspectionWebFormatter(object):
def format_object(self, address):
heap_object = self.padawan.SenseObject(address)
- return cgi.escape(str(heap_object or ""))
+ return html.escape(str(heap_object or ""))
def output_data(self, f, straddress, datakind):
try:
@@ -3172,7 +3173,7 @@ class InspectionWebFormatter(object):
object_info = self.padawan.SenseObject(maybe_address)
if not object_info:
continue
- extra.append(cgi.escape(str(object_info)))
+ extra.append(html.escape(str(object_info)))
if len(extra) == 0:
return line
return ("%s <span class=disasmcomment>;; %s</span>" %
@@ -3236,7 +3237,7 @@ class InspectionWebFormatter(object):
comment = self.comments.get_comment(address)
value = ""
if comment:
- value = " value=\"%s\"" % cgi.escape(comment)
+ value = " value=\"%s\"" % html.escape(comment)
f.write("<input type=text class=ci "
"id=%s-address-0x%s onchange=c()%s>" %
(prefix,
@@ -3400,10 +3401,10 @@ WEB_DUMPS_FOOTER = """
DUMP_FILE_RE = re.compile(r"[-_0-9a-zA-Z][-\._0-9a-zA-Z]*\.dmp$")
-class InspectionWebServer(BaseHTTPServer.HTTPServer):
+class InspectionWebServer(http_server.HTTPServer):
+
def __init__(self, port_number, switches, minidump_name):
- BaseHTTPServer.HTTPServer.__init__(
- self, ('localhost', port_number), InspectionWebHandler)
+ super().__init__(('localhost', port_number), InspectionWebHandler)
splitpath = os.path.split(minidump_name)
self.dumppath = splitpath[0]
self.dumpfilename = splitpath[1]
@@ -3421,7 +3422,7 @@ class InspectionWebServer(BaseHTTPServer.HTTPServer):
desc = ""
f.write("<input type=\"text\" class=\"dumpcomments\" "
"id=\"dump-%s\" onchange=\"dump_comment()\" value=\"%s\">\n" %
- (cgi.escape(name), desc))
+ (html.escape(name), desc))
def set_dump_desc(self, name, description):
if not DUMP_FILE_RE.match(name):
@@ -3472,8 +3473,8 @@ class InspectionWebServer(BaseHTTPServer.HTTPServer):
fnames = dumps_by_time[mtime]
for fname in fnames:
f.write("<tr>\n")
- f.write("<td><a href=\"summary.html?%s\">%s</a></td>\n" % (
- (urllib.urlencode({ 'dump' : fname }), fname)))
+ f.write("<td><a href=\"summary.html?%s\">%s</a></td>\n" %
+ ((urllib.parse.urlencode({'dump': fname}), fname)))
f.write("<td>&nbsp;&nbsp;&nbsp;")
f.write(datetime.datetime.fromtimestamp(mtime))
f.write("</td>")
@@ -3896,6 +3897,13 @@ def PrintModuleDetails(reader, module):
def AnalyzeMinidump(options, minidump_name):
reader = MinidumpReader(options, minidump_name)
+ # Use a separate function to prevent leaking the minidump buffer through
+ # ctypes in local variables.
+ _AnalyzeMinidump(options, reader)
+ reader.Dispose()
+
+
+def _AnalyzeMinidump(options, reader):
heap = None
stack_top = reader.ExceptionSP()
@@ -3993,7 +4001,6 @@ def AnalyzeMinidump(options, minidump_name):
print("Annotated stack (from exception.esp to bottom):")
stack_start = padawan.PrintStackTraceMessage()
padawan.InterpretMemory(stack_start, stack_bottom)
- reader.Dispose()
if __name__ == "__main__":
diff --git a/deps/v8/tools/js/helper.mjs b/deps/v8/tools/js/helper.mjs
index 04df6b5421..1da31b7e55 100644
--- a/deps/v8/tools/js/helper.mjs
+++ b/deps/v8/tools/js/helper.mjs
@@ -8,7 +8,7 @@ export const GB = MB * KB;
export const kMillis2Seconds = 1 / 1000;
export const kMicro2Milli = 1 / 1000;
-export function formatBytes(bytes) {
+export function formatBytes(bytes, digits = 2) {
const units = ['B', 'KiB', 'MiB', 'GiB'];
const divisor = 1024;
let index = 0;
@@ -16,7 +16,7 @@ export function formatBytes(bytes) {
index++;
bytes /= divisor;
}
- return bytes.toFixed(2) + units[index];
+ return bytes.toFixed(digits) + units[index];
}
export function formatMicroSeconds(micro) {
@@ -51,3 +51,18 @@ export function calcOffsetInVMCage(address) {
let ret = Number(address & mask);
return ret;
}
+
+export function delay(time) {
+ return new Promise(resolver => setTimeout(resolver, time));
+}
+
+export function defer() {
+ let resolve_func, reject_func;
+ const p = new Promise((resolve, reject) => {
+ resolve_func = resolve;
+ reject_func = resolve;
+ });
+ p.resolve = resolve_func;
+ p.reject = reject_func;
+ return p;
+}
diff --git a/deps/v8/tools/js/log-file-reader-template.html b/deps/v8/tools/js/log-file-reader-template.html
index f9e31eed8b..416c7e33d3 100644
--- a/deps/v8/tools/js/log-file-reader-template.html
+++ b/deps/v8/tools/js/log-file-reader-template.html
@@ -40,6 +40,7 @@ found in the LICENSE file. -->
#loader {
display: none;
+ will-change: rotate;
}
.loading #loader {
@@ -53,17 +54,38 @@ found in the LICENSE file. -->
background-color: var(--file-reader-background-color);
}
- #spinner {
+ #spinner, #progress, #progressText {
position: absolute;
- width: 100px;
- height: 100px;
+ width: 120px;
+ height: 120px;
top: 40%;
left: 50%;
- margin-left: -50px;
- border: 30px solid var(--surface-color);
- border-top: 30px solid var(--primary-color);
+ margin-left: calc(-60px - 10px);
border-radius: 50%;
- animation: spin 1s ease-in-out infinite;
+ }
+ #spinner {
+ border: 20px solid var(--surface-color);
+ border-top: 20px solid var(--primary-color);
+ animation: spin 1s linear infinite;
+ will-change: transform;
+ transform: scale(1.1);
+ }
+
+ #progress, #progressText {
+ padding: 20px;
+ }
+
+ #progress {
+ transition: all 0.5s ease-in-out;
+ }
+
+ #progressText {
+ line-height: 120px;
+ font-size: 28px;
+ transform: scale(0.55);
+ text-align: center;
+ vertical-align: middle;
+ background-color: var(--surface-color);
}
#label {
@@ -88,6 +110,8 @@ found in the LICENSE file. -->
<input id="file" type="file" name="file" />
</div>
<div id="loader">
+ <div id="progress"></div>
<div id="spinner"></div>
+ <div id="progressText"></div>
</div>
</div>
diff --git a/deps/v8/tools/js/web-api-helper.mjs b/deps/v8/tools/js/web-api-helper.mjs
index 15a23e1070..8c6ecc1ceb 100644
--- a/deps/v8/tools/js/web-api-helper.mjs
+++ b/deps/v8/tools/js/web-api-helper.mjs
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+import {delay, formatBytes} from './helper.mjs';
+
export class V8CustomElement extends HTMLElement {
_updateTimeoutId;
_updateCallback = this.forceUpdate.bind(this);
@@ -34,23 +36,30 @@ export class V8CustomElement extends HTMLElement {
}
forceUpdate() {
+ this._updateTimeoutId = undefined;
this._update();
}
_update() {
throw Error('Subclass responsibility');
}
+
+ get isFocused() {
+ return document.activeElement === this;
+ }
}
export class FileReader extends V8CustomElement {
constructor(templateText) {
super(templateText);
- this.addEventListener('click', (e) => this.handleClick(e));
- this.addEventListener('dragover', (e) => this.handleDragOver(e));
- this.addEventListener('drop', (e) => this.handleChange(e));
- this.$('#file').addEventListener('change', (e) => this.handleChange(e));
- this.$('#fileReader')
- .addEventListener('keydown', (e) => this.handleKeyEvent(e));
+ this.addEventListener('click', this.handleClick.bind(this));
+ this.addEventListener('dragover', this.handleDragOver.bind(this));
+ this.addEventListener('drop', this.handleChange.bind(this));
+ this.$('#file').addEventListener('change', this.handleChange.bind(this));
+ this.fileReader = this.$('#fileReader');
+ this.fileReader.addEventListener('keydown', this.handleKeyEvent.bind(this));
+ this.progressNode = this.$('#progress');
+ this.progressTextNode = this.$('#progressText');
}
set error(message) {
@@ -73,8 +82,6 @@ export class FileReader extends V8CustomElement {
handleChange(event) {
// Used for drop and file change.
event.preventDefault();
- this.dispatchEvent(
- new CustomEvent('fileuploadstart', {bubbles: true, composed: true}));
const host = event.dataTransfer ? event.dataTransfer : event.target;
this.readFile(host.files[0]);
}
@@ -87,26 +94,50 @@ export class FileReader extends V8CustomElement {
this.fileReader.focus();
}
- get fileReader() {
- return this.$('#fileReader');
- }
-
get root() {
return this.$('#root');
}
+ setProgress(progress, processedBytes = 0) {
+ this.progress = Math.max(0, Math.min(progress, 1));
+ this.processedBytes = processedBytes;
+ }
+
+ updateProgressBar() {
+ // Create a circular progress bar, starting at 12 o'clock.
+ this.progressNode.style.backgroundImage = `conic-gradient(
+ var(--primary-color) 0%,
+ var(--primary-color) ${this.progress * 100}%,
+ var(--surface-color) ${this.progress * 100}%)`;
+ this.progressTextNode.innerText =
+ this.processedBytes ? formatBytes(this.processedBytes, 1) : '';
+ if (this.root.className == 'loading') {
+ window.requestAnimationFrame(() => this.updateProgressBar());
+ }
+ }
+
readFile(file) {
+ this.dispatchEvent(new CustomEvent('fileuploadstart', {
+ bubbles: true,
+ composed: true,
+ detail: {
+ progressCallback: this.setProgress.bind(this),
+ totalSize: file.size,
+ }
+ }));
if (!file) {
this.error = 'Failed to load file.';
return;
}
this.fileReader.blur();
+ this.setProgress(0);
this.root.className = 'loading';
// Delay the loading a bit to allow for CSS animations to happen.
window.requestAnimationFrame(() => this.asyncReadFile(file));
}
async asyncReadFile(file) {
+ this.updateProgressBar();
const decoder = globalThis.TextDecoderStream;
if (decoder) {
await this._streamFile(file, decoder);
@@ -132,7 +163,10 @@ export class FileReader extends V8CustomElement {
const readResult = await reader.read();
chunk = readResult.value;
readerDone = readResult.done;
- if (chunk) this._handleFileChunk(chunk);
+ if (!chunk) break;
+ this._handleFileChunk(chunk);
+ // Artificial delay to allow for layout updates.
+ await delay(5);
} while (!readerDone);
}
diff --git a/deps/v8/tools/logreader.mjs b/deps/v8/tools/logreader.mjs
index 26a6106a01..d6815733d1 100644
--- a/deps/v8/tools/logreader.mjs
+++ b/deps/v8/tools/logreader.mjs
@@ -32,64 +32,65 @@
// Parses dummy variable for readability;
-export const parseString = 'parse-string';
+export function parseString(field) { return field };
export const parseVarArgs = 'parse-var-args';
/**
* Base class for processing log files.
*
- * @param {Array.<Object>} dispatchTable A table used for parsing and processing
- * log records.
* @param {boolean} timedRange Ignore ticks outside timed range.
* @param {boolean} pairwiseTimedRange Ignore ticks outside pairs of timer
* markers.
* @constructor
*/
export class LogReader {
- constructor (dispatchTable, timedRange, pairwiseTimedRange) {
- /**
- * @type {Array.<Object>}
- */
- this.dispatchTable_ = dispatchTable;
-
- /**
- * @type {boolean}
- */
+ constructor(timedRange=false, pairwiseTimedRange=false) {
+ this.dispatchTable_ = new Map();
this.timedRange_ = timedRange;
-
- /**
- * @type {boolean}
- */
this.pairwiseTimedRange_ = pairwiseTimedRange;
- if (pairwiseTimedRange) {
- this.timedRange_ = true;
- }
-
- /**
- * Current line.
- * @type {number}
- */
+ if (pairwiseTimedRange) this.timedRange_ = true;
this.lineNum_ = 0;
-
- /**
- * CSV lines parser.
- * @type {CsvParser}
- */
this.csvParser_ = new CsvParser();
-
- /**
- * Keeps track of whether we've seen a "current-time" tick yet.
- * @type {boolean}
- */
+ // Variables for tracking of 'current-time' log entries:
this.hasSeenTimerMarker_ = false;
-
- /**
- * List of log lines seen since last "current-time" tick.
- * @type {Array.<String>}
- */
this.logLinesSinceLastTimerMarker_ = [];
}
+/**
+ * @param {Object} table A table used for parsing and processing
+ * log records.
+ * exampleDispatchTable = {
+ * "log-entry-XXX": {
+ * parser: [parseString, parseInt, ..., parseVarArgs],
+ * processor: this.processXXX.bind(this)
+ * },
+ * ...
+ * }
+ */
+ setDispatchTable(table) {
+ if (Object.getPrototypeOf(table) !== null) {
+ throw new Error("Dispatch expected table.__proto__=null for speedup");
+ }
+ for (let name in table) {
+ const parser = table[name];
+ if (parser === undefined) continue;
+ if (!parser.isAsync) parser.isAsync = false;
+ if (!Array.isArray(parser.parsers)) {
+ throw new Error(`Invalid parsers: dispatchTable['${
+ name}'].parsers should be an Array.`);
+ }
+ let type = typeof parser.processor;
+ if (type !== 'function') {
+ throw new Error(`Invalid processor: typeof dispatchTable['${
+ name}'].processor is '${type}' instead of 'function'`);
+ }
+ if (!parser.processor.name.startsWith('bound ')) {
+ parser.processor = parser.processor.bind(this);
+ }
+ this.dispatchTable_.set(name, parser);
+ }
+ }
+
/**
* A thin wrapper around shell's 'read' function showing a file name on error.
@@ -118,7 +119,18 @@ export class LogReader {
* @param {string} chunk A portion of log.
*/
async processLogChunk(chunk) {
- await this.processLog_(chunk.split('\n'));
+ let end = chunk.length;
+ let current = 0;
+ // Kept for debugging in case of parsing errors.
+ let lineNumber = 0;
+ while (current < end) {
+ const next = chunk.indexOf("\n", current);
+ if (next === -1) break;
+ lineNumber++;
+ const line = chunk.substring(current, next);
+ current = next + 1;
+ await this.processLogLine(line);
+ }
}
/**
@@ -162,18 +174,19 @@ export class LogReader {
processStack(pc, func, stack) {
const fullStack = func ? [pc, func] : [pc];
let prevFrame = pc;
- for (let i = 0, n = stack.length; i < n; ++i) {
+ const length = stack.length;
+ for (let i = 0, n = length; i < n; ++i) {
const frame = stack[i];
- const firstChar = frame.charAt(0);
- if (firstChar == '+' || firstChar == '-') {
+ const firstChar = frame[0];
+ if (firstChar === '+' || firstChar === '-') {
// An offset from the previous frame.
prevFrame += parseInt(frame, 16);
fullStack.push(prevFrame);
// Filter out possible 'overflow' string.
- } else if (firstChar != 'o') {
+ } else if (firstChar !== 'o') {
fullStack.push(parseInt(frame, 16));
} else {
- console.error(`dropping: ${frame}`);
+ console.error(`Dropping unknown tick frame: ${frame}`);
}
}
return fullStack;
@@ -188,29 +201,23 @@ export class LogReader {
async dispatchLogRow_(fields) {
// Obtain the dispatch.
const command = fields[0];
- const dispatch = this.dispatchTable_[command];
+ const dispatch = this.dispatchTable_.get(command);
if (dispatch === undefined) return;
const parsers = dispatch.parsers;
const length = parsers.length;
// Parse fields.
- const parsedFields = [];
+ const parsedFields = new Array(length);
for (let i = 0; i < length; ++i) {
const parser = parsers[i];
- if (parser === parseString) {
- parsedFields.push(fields[1 + i]);
- } else if (typeof parser == 'function') {
- parsedFields.push(parser(fields[1 + i]));
- } else if (parser === parseVarArgs) {
- // var-args
- parsedFields.push(fields.slice(1 + i));
+ if (parser === parseVarArgs) {
+ parsedFields[i] = fields.slice(1 + i);
break;
} else {
- throw new Error(`Invalid log field parser: ${parser}`);
+ parsedFields[i] = parser(fields[1 + i]);
}
}
-
// Run the processor.
- await dispatch.processor.apply(this, parsedFields);
+ await dispatch.processor(...parsedFields);
}
/**
diff --git a/deps/v8/tools/mb/PRESUBMIT.py b/deps/v8/tools/mb/PRESUBMIT.py
index bda4ef3f43..150de90e88 100644
--- a/deps/v8/tools/mb/PRESUBMIT.py
+++ b/deps/v8/tools/mb/PRESUBMIT.py
@@ -18,8 +18,7 @@ def _CommonChecks(input_api, output_api):
# Run the MB unittests.
results.extend(
input_api.canned_checks.RunUnitTestsInDirectory(input_api, output_api,
- '.',
- [r'^.+_unittest\.py$']))
+ '.', [r'^.+_test\.py$']))
# Validate the format of the mb_config.pyl file.
cmd = [input_api.python_executable, 'mb.py', 'validate']
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_test.py
index 86d9cd403b..fb59c4aa29 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_test.py
@@ -3,7 +3,6 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Tests for mb.py."""
import json
@@ -16,6 +15,7 @@ import mb
class FakeMBW(mb.MetaBuildWrapper):
+
def __init__(self, win32=False):
super(FakeMBW, self).__init__()
@@ -93,6 +93,7 @@ class FakeMBW(mb.MetaBuildWrapper):
class FakeFile(object):
+
def __init__(self, files):
self.name = '/tmp/file'
self.buf = ''
@@ -102,7 +103,7 @@ class FakeFile(object):
self.buf += contents
def close(self):
- self.files[self.name] = self.buf
+ self.files[self.name] = self.buf
TEST_CONFIG = """\
@@ -152,7 +153,6 @@ TEST_CONFIG = """\
}
"""
-
TRYSERVER_CONFIG = """\
{
'builder_groups': {
@@ -177,12 +177,12 @@ TRYSERVER_CONFIG = """\
class UnitTest(unittest.TestCase):
+
def fake_mbw(self, files=None, win32=False):
mbw = FakeMBW(win32=win32)
mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
mbw.files.setdefault(
- mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'),
- '''{
+ mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'), '''{
"foo_unittests": {
"label": "//foo:foo_unittests",
"type": "console_test_launcher",
@@ -211,69 +211,94 @@ class UnitTest(unittest.TestCase):
return mbw
def test_analyze(self):
- files = {'/tmp/in.json': '''{\
+ files = {
+ '/tmp/in.json':
+ '''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
- '/tmp/out.json.gn': '''{\
+ '/tmp/out.json.gn':
+ '''{\
"status": "Found dependency",
"compile_targets": ["//foo:foo_unittests"],
"test_targets": ["//foo:foo_unittests"]
- }'''}
+ }'''
+ }
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'debug_goma', '//out/Default',
- '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+ self.check([
+ 'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
+ '/tmp/out.json'
+ ],
+ mbw=mbw,
+ ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
- self.assertEqual(out, {
- 'status': 'Found dependency',
- 'compile_targets': ['foo:foo_unittests'],
- 'test_targets': ['foo_unittests']
- })
+ self.assertEqual(
+ out, {
+ 'status': 'Found dependency',
+ 'compile_targets': ['foo:foo_unittests'],
+ 'test_targets': ['foo_unittests']
+ })
def test_analyze_optimizes_compile_for_all(self):
- files = {'/tmp/in.json': '''{\
+ files = {
+ '/tmp/in.json':
+ '''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
- '/tmp/out.json.gn': '''{\
+ '/tmp/out.json.gn':
+ '''{\
"status": "Found dependency",
"compile_targets": ["//foo:foo_unittests", "all"],
"test_targets": ["//foo:foo_unittests"]
- }'''}
+ }'''
+ }
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'debug_goma', '//out/Default',
- '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+ self.check([
+ 'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
+ '/tmp/out.json'
+ ],
+ mbw=mbw,
+ ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
# check that 'foo_unittests' is not in the compile_targets
self.assertEqual(['all'], out['compile_targets'])
def test_analyze_handles_other_toolchains(self):
- files = {'/tmp/in.json': '''{\
+ files = {
+ '/tmp/in.json':
+ '''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
- '/tmp/out.json.gn': '''{\
+ '/tmp/out.json.gn':
+ '''{\
"status": "Found dependency",
"compile_targets": ["//foo:foo_unittests",
"//foo:foo_unittests(bar)"],
"test_targets": ["//foo:foo_unittests"]
- }'''}
+ }'''
+ }
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'debug_goma', '//out/Default',
- '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+ self.check([
+ 'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
+ '/tmp/out.json'
+ ],
+ mbw=mbw,
+ ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
# crbug.com/736215: If GN returns a label containing a toolchain,
@@ -285,22 +310,30 @@ class UnitTest(unittest.TestCase):
def test_analyze_handles_way_too_many_results(self):
too_many_files = ', '.join(['"//foo:foo%d"' % i for i in range(4 * 1024)])
- files = {'/tmp/in.json': '''{\
+ files = {
+ '/tmp/in.json':
+ '''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
- '/tmp/out.json.gn': '''{\
+ '/tmp/out.json.gn':
+ '''{\
"status": "Found dependency",
"compile_targets": [''' + too_many_files + '''],
"test_targets": ["//foo:foo_unittests"]
- }'''}
+ }'''
+ }
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'debug_goma', '//out/Default',
- '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+ self.check([
+ 'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
+ '/tmp/out.json'
+ ],
+ mbw=mbw,
+ ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
# If GN returns so many compile targets that we might have command-line
@@ -312,7 +345,8 @@ class UnitTest(unittest.TestCase):
def test_gen(self):
mbw = self.fake_mbw()
self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'],
- mbw=mbw, ret=0)
+ mbw=mbw,
+ ret=0)
self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
('goma_dir = "/goma"\n'
'is_debug = true\n'
@@ -325,18 +359,22 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(win32=True)
self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'],
- mbw=mbw, ret=0)
+ mbw=mbw,
+ ret=0)
self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
('goma_dir = "c:\\\\goma"\n'
'is_debug = true\n'
'use_goma = true\n'))
- self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
- '--check\n', mbw.out)
+ self.assertIn(
+ 'c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
+ '--check\n', mbw.out)
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_bot',
- '//out/Debug'],
- mbw=mbw, ret=0)
+ self.check([
+ 'gen', '-m', 'fake_builder_group', '-b', 'fake_args_bot', '//out/Debug'
+ ],
+ mbw=mbw,
+ ret=0)
# TODO(almuthanna): disable test temporarily to
# solve this issue https://crbug.com/v8/11102
# self.assertEqual(
@@ -345,17 +383,23 @@ class UnitTest(unittest.TestCase):
def test_gen_args_file_mixins(self):
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_file',
- '//out/Debug'], mbw=mbw, ret=0)
+ self.check([
+ 'gen', '-m', 'fake_builder_group', '-b', 'fake_args_file', '//out/Debug'
+ ],
+ mbw=mbw,
+ ret=0)
- self.assertEqual(
- mbw.files['/fake_src/out/Debug/args.gn'],
- ('import("//build/args/fake.gn")\n'
- 'use_goma = true\n'))
+ self.assertEqual(mbw.files['/fake_src/out/Debug/args.gn'],
+ ('import("//build/args/fake.gn")\n'
+ 'use_goma = true\n'))
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_file_twice',
- '//out/Debug'], mbw=mbw, ret=1)
+ self.check([
+ 'gen', '-m', 'fake_builder_group', '-b', 'fake_args_file_twice',
+ '//out/Debug'
+ ],
+ mbw=mbw,
+ ret=1)
def test_gen_fails(self):
mbw = self.fake_mbw()
@@ -364,167 +408,162 @@ class UnitTest(unittest.TestCase):
def test_gen_swarming(self):
files = {
- '/tmp/swarming_targets': 'base_unittests\n',
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'base_unittests': {"
- " 'label': '//base:base_unittests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- '/fake_src/out/Default/base_unittests.runtime_deps': (
- "base_unittests\n"
- ),
+ '/tmp/swarming_targets':
+ 'base_unittests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+ ("{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"),
+ '/fake_src/out/Default/base_unittests.runtime_deps':
+ ("base_unittests\n"),
}
mbw = self.fake_mbw(files)
- self.check(['gen',
- '-c', 'debug_goma',
- '--swarming-targets-file', '/tmp/swarming_targets',
- '//out/Default'], mbw=mbw, ret=0)
- self.assertIn('/fake_src/out/Default/base_unittests.isolate',
- mbw.files)
+ self.check([
+ 'gen', '-c', 'debug_goma', '--swarming-targets-file',
+ '/tmp/swarming_targets', '//out/Default'
+ ],
+ mbw=mbw,
+ ret=0)
+ self.assertIn('/fake_src/out/Default/base_unittests.isolate', mbw.files)
self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
mbw.files)
def test_gen_swarming_script(self):
files = {
- '/tmp/swarming_targets': 'cc_perftests\n',
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'cc_perftests': {"
- " 'label': '//cc:cc_perftests',"
- " 'type': 'script',"
- " 'script': '/fake_src/out/Default/test_script.py',"
- " 'args': [],"
- "}}\n"
- ),
- 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
- "cc_perftests\n"
- ),
+ '/tmp/swarming_targets':
+ 'cc_perftests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+ ("{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'script',"
+ " 'script': '/fake_src/out/Default/test_script.py',"
+ " 'args': [],"
+ "}}\n"),
+ 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps':
+ ("cc_perftests\n"),
}
mbw = self.fake_mbw(files=files, win32=True)
- self.check(['gen',
- '-c', 'debug_goma',
- '--swarming-targets-file', '/tmp/swarming_targets',
- '--isolate-map-file',
- '/fake_src/testing/buildbot/gn_isolate_map.pyl',
- '//out/Default'], mbw=mbw, ret=0)
- self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
- mbw.files)
+ self.check([
+ 'gen', '-c', 'debug_goma', '--swarming-targets-file',
+ '/tmp/swarming_targets', '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl', '//out/Default'
+ ],
+ mbw=mbw,
+ ret=0)
+ self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate', mbw.files)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
mbw.files)
-
def test_multiple_isolate_maps(self):
files = {
- '/tmp/swarming_targets': 'cc_perftests\n',
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'cc_perftests': {"
- " 'label': '//cc:cc_perftests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
- "{'cc_perftests2': {"
- " 'label': '//cc:cc_perftests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
- "cc_perftests\n"
- ),
+ '/tmp/swarming_targets':
+ 'cc_perftests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+ ("{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"),
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl':
+ ("{'cc_perftests2': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"),
+ 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps':
+ ("cc_perftests\n"),
}
mbw = self.fake_mbw(files=files, win32=True)
- self.check(['gen',
- '-c', 'debug_goma',
- '--swarming-targets-file', '/tmp/swarming_targets',
- '--isolate-map-file',
- '/fake_src/testing/buildbot/gn_isolate_map.pyl',
- '--isolate-map-file',
- '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
- '//out/Default'], mbw=mbw, ret=0)
- self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
- mbw.files)
+ self.check([
+ 'gen', '-c', 'debug_goma', '--swarming-targets-file',
+ '/tmp/swarming_targets', '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl', '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl', '//out/Default'
+ ],
+ mbw=mbw,
+ ret=0)
+ self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate', mbw.files)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
mbw.files)
-
def test_duplicate_isolate_maps(self):
files = {
- '/tmp/swarming_targets': 'cc_perftests\n',
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'cc_perftests': {"
- " 'label': '//cc:cc_perftests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
- "{'cc_perftests': {"
- " 'label': '//cc:cc_perftests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
- "cc_perftests\n"
- ),
+ '/tmp/swarming_targets':
+ 'cc_perftests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+ ("{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"),
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl':
+ ("{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"),
+ 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps':
+ ("cc_perftests\n"),
}
mbw = self.fake_mbw(files=files, win32=True)
# Check that passing duplicate targets into mb fails.
- self.check(['gen',
- '-c', 'debug_goma',
- '--swarming-targets-file', '/tmp/swarming_targets',
- '--isolate-map-file',
- '/fake_src/testing/buildbot/gn_isolate_map.pyl',
- '--isolate-map-file',
- '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
- '//out/Default'], mbw=mbw, ret=1)
+ self.check([
+ 'gen', '-c', 'debug_goma', '--swarming-targets-file',
+ '/tmp/swarming_targets', '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl', '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl', '//out/Default'
+ ],
+ mbw=mbw,
+ ret=1)
def test_isolate(self):
files = {
- '/fake_src/out/Default/toolchain.ninja': "",
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'base_unittests': {"
- " 'label': '//base:base_unittests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- '/fake_src/out/Default/base_unittests.runtime_deps': (
- "base_unittests\n"
- ),
+ '/fake_src/out/Default/toolchain.ninja':
+ "",
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+ ("{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"),
+ '/fake_src/out/Default/base_unittests.runtime_deps':
+ ("base_unittests\n"),
}
- self.check(['isolate', '-c', 'debug_goma', '//out/Default',
- 'base_unittests'], files=files, ret=0)
+ self.check(
+ ['isolate', '-c', 'debug_goma', '//out/Default', 'base_unittests'],
+ files=files,
+ ret=0)
# test running isolate on an existing build_dir
files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
- files=files, ret=0)
+ files=files,
+ ret=0)
self.check(['isolate', '//out/Default', 'base_unittests'],
- files=files, ret=0)
+ files=files,
+ ret=0)
def test_run(self):
files = {
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'base_unittests': {"
- " 'label': '//base:base_unittests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- '/fake_src/out/Default/base_unittests.runtime_deps': (
- "base_unittests\n"
- ),
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+ ("{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"),
+ '/fake_src/out/Default/base_unittests.runtime_deps':
+ ("base_unittests\n"),
}
- self.check(['run', '-c', 'debug_goma', '//out/Default',
- 'base_unittests'], files=files, ret=0)
+ self.check(['run', '-c', 'debug_goma', '//out/Default', 'base_unittests'],
+ files=files,
+ ret=0)
def test_lookup(self):
- self.check(['lookup', '-c', 'debug_goma'], ret=0,
+ self.check(['lookup', '-c', 'debug_goma'],
+ ret=0,
out=('\n'
'Writing """\\\n'
'is_debug = true\n'
@@ -533,12 +572,14 @@ class UnitTest(unittest.TestCase):
'/fake_src/buildtools/linux64/gn gen _path_\n'))
def test_quiet_lookup(self):
- self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0,
+ self.check(['lookup', '-c', 'debug_goma', '--quiet'],
+ ret=0,
out=('is_debug = true\n'
'use_goma = true\n'))
def test_lookup_goma_dir_expansion(self):
- self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
+ self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'],
+ ret=0,
out=('\n'
'Writing """\\\n'
'enable_doom_melon = true\n'
@@ -560,43 +601,52 @@ class UnitTest(unittest.TestCase):
def test_multiple_phases(self):
# Check that not passing a --phase to a multi-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_builder_group',
- '-b', 'fake_multi_phase'],
- ret=1)
+ mbw = self.check(
+ ['lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase'], ret=1)
self.assertIn('Must specify a build --phase', mbw.out)
# Check that passing a --phase to a single-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_builder_group',
- '-b', 'fake_builder',
- '--phase', 'phase_1'], ret=1)
+ mbw = self.check([
+ 'lookup', '-m', 'fake_builder_group', '-b', 'fake_builder', '--phase',
+ 'phase_1'
+ ],
+ ret=1)
self.assertIn('Must not specify a build --phase', mbw.out)
# Check that passing a wrong phase key to a multi-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_builder_group',
- '-b', 'fake_multi_phase',
- '--phase', 'wrong_phase'], ret=1)
+ mbw = self.check([
+ 'lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase',
+ '--phase', 'wrong_phase'
+ ],
+ ret=1)
self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out)
# Check that passing a correct phase key to a multi-phase builder passes.
- mbw = self.check(['lookup', '-m', 'fake_builder_group',
- '-b', 'fake_multi_phase',
- '--phase', 'phase_1'], ret=0)
+ mbw = self.check([
+ 'lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase',
+ '--phase', 'phase_1'
+ ],
+ ret=0)
self.assertIn('phase = 1', mbw.out)
- mbw = self.check(['lookup', '-m', 'fake_builder_group',
- '-b', 'fake_multi_phase',
- '--phase', 'phase_2'], ret=0)
+ mbw = self.check([
+ 'lookup', '-m', 'fake_builder_group', '-b', 'fake_multi_phase',
+ '--phase', 'phase_2'
+ ],
+ ret=0)
self.assertIn('phase = 2', mbw.out)
def test_recursive_lookup(self):
files = {
- '/fake_src/build/args/fake.gn': (
- 'enable_doom_melon = true\n'
- 'enable_antidoom_banana = true\n'
- )
+ '/fake_src/build/args/fake.gn': ('enable_doom_melon = true\n'
+ 'enable_antidoom_banana = true\n')
}
- self.check(['lookup', '-m', 'fake_builder_group', '-b', 'fake_args_file',
- '--recursive'], files=files, ret=0,
+ self.check([
+ 'lookup', '-m', 'fake_builder_group', '-b', 'fake_args_file',
+ '--recursive'
+ ],
+ files=files,
+ ret=0,
out=('enable_antidoom_banana = true\n'
'enable_doom_melon = true\n'
'use_goma = true\n'))
@@ -608,7 +658,8 @@ class UnitTest(unittest.TestCase):
def test_buildbucket(self):
mbw = self.fake_mbw()
mbw.files[mbw.default_config] = TRYSERVER_CONFIG
- self.check(['gerrit-buildbucket-config'], mbw=mbw,
+ self.check(['gerrit-buildbucket-config'],
+ mbw=mbw,
ret=0,
out=('# This file was generated using '
'"tools/mb/mb.py gerrit-buildbucket-config".\n'
diff --git a/deps/v8/tools/parse-processor.mjs b/deps/v8/tools/parse-processor.mjs
index e604908db8..fc5868b008 100644
--- a/deps/v8/tools/parse-processor.mjs
+++ b/deps/v8/tools/parse-processor.mjs
@@ -746,7 +746,7 @@ function startOf(timestamp, time) {
export class ParseProcessor extends LogReader {
constructor() {
super();
- this.dispatchTable_ = {
+ this.setDispatchTable({
// Avoid accidental leaking of __proto__ properties and force this object
// to be in dictionary-mode.
__proto__: null,
@@ -780,7 +780,7 @@ export class ParseProcessor extends LogReader {
parsers: [parseInt, parseString, parseString],
processor: this.processScriptSource
},
- };
+ });
this.functionEventDispatchTable_ = {
// Avoid accidental leaking of __proto__ properties and force this object
// to be in dictionary-mode.
@@ -820,20 +820,7 @@ export class ParseProcessor extends LogReader {
}
processString(string) {
- let end = string.length;
- let current = 0;
- let next = 0;
- let line;
- let i = 0;
- let entry;
- while (current < end) {
- next = string.indexOf("\n", current);
- if (next === -1) break;
- i++;
- line = string.substring(current, next);
- current = next + 1;
- this.processLogLine(line);
- }
+ this.processLogChunk(string);
this.postProcess();
}
diff --git a/deps/v8/tools/profile.mjs b/deps/v8/tools/profile.mjs
index c62ebcf177..ba2a523fb5 100644
--- a/deps/v8/tools/profile.mjs
+++ b/deps/v8/tools/profile.mjs
@@ -234,6 +234,35 @@ export class Script {
}
+const kOffsetPairRegex = /C([0-9]+)O([0-9]+)/g;
+class SourcePositionTable {
+ constructor(encodedTable) {
+ this._offsets = [];
+ while (true) {
+ const regexResult = kOffsetPairRegex.exec(encodedTable);
+ if (!regexResult) break;
+ const codeOffset = parseInt(regexResult[1]);
+ const scriptOffset = parseInt(regexResult[2]);
+ if (isNaN(codeOffset) || isNaN(scriptOffset)) continue;
+ this._offsets.push({code: codeOffset, script: scriptOffset});
+ }
+ }
+
+ getScriptOffset(codeOffset) {
+ if (codeOffset < 0) {
+ throw new Exception(`Invalid codeOffset=${codeOffset}, should be >= 0`);
+ }
+ for (let i = this.offsetTable.length - 1; i >= 0; i--) {
+ const offset = this._offsets[i];
+ if (offset.code <= codeOffset) {
+ return offset.script;
+ }
+ }
+ return this._offsets[0].script;
+ }
+}
+
+
class SourceInfo {
script;
start;
@@ -243,13 +272,16 @@ class SourceInfo {
fns;
disassemble;
- setSourcePositionInfo(script, startPos, endPos, sourcePositionTable, inliningPositions, inlinedFunctions) {
+ setSourcePositionInfo(
+ script, startPos, endPos, sourcePositionTableData, inliningPositions,
+ inlinedFunctions) {
this.script = script;
this.start = startPos;
this.end = endPos;
- this.positions = sourcePositionTable;
+ this.positions = sourcePositionTableData;
this.inlined = inliningPositions;
this.fns = inlinedFunctions;
+ this.sourcePositionTable = new SourcePositionTable(sourcePositionTableData);
}
setDisassemble(code) {
diff --git a/deps/v8/tools/run.py b/deps/v8/tools/run.py
index 59b3c15e68..99ccea1f6c 100755
--- a/deps/v8/tools/run.py
+++ b/deps/v8/tools/run.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -6,8 +6,6 @@
"""This program wraps an arbitrary command since gn currently can only execute
scripts."""
-from __future__ import print_function
-
import subprocess
import sys
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 4dd7d87996..5b2862fdd9 100644
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -102,11 +103,9 @@ Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
"""
-# for py2/py3 compatibility
-from __future__ import print_function
-from functools import reduce
-
from collections import OrderedDict
+from math import sqrt
+from statistics import mean, stdev
import copy
import json
import logging
@@ -124,21 +123,6 @@ from testrunner.local import command
from testrunner.local import utils
from testrunner.objects.output import Output, NULL_OUTPUT
-from math import sqrt
-# NOTE: added import here to prevent breakages during the py2/3 migration,
-# once we enable python3 only, we can move the import up
-try:
- from numpy import mean
- from numpy import std as stdev
-except ImportError:
- from statistics import mean, stdev
-
-
-# for py2/py3 compatibility
-try:
- basestring # Python 2
-except NameError: # Python 3
- basestring = str
SUPPORTED_ARCHS = ['arm',
'ia32',
@@ -276,7 +260,8 @@ class ResultTracker(object):
avg = mean(results)
avg_stderr = stdev(results) / sqrt(len(results))
logging.debug(' Mean: %.2f, mean_stderr: %.2f', avg, avg_stderr)
- logging.info('>>> Confidence level is %.2f', avg / (1000.0 * avg_stderr))
+ logging.info('>>> Confidence level is %.2f',
+ avg / max(1000.0 * avg_stderr, .1))
return confidence_level * avg_stderr < avg / 1000.0
def __str__(self): # pragma: no cover
@@ -349,7 +334,7 @@ class GraphConfig(Node):
assert isinstance(suite.get('path', []), list)
assert isinstance(suite.get('owners', []), list)
- assert isinstance(suite['name'], basestring)
+ assert isinstance(suite['name'], str)
assert isinstance(suite.get('flags', []), list)
assert isinstance(suite.get('test_flags', []), list)
assert isinstance(suite.get('resources', []), list)
diff --git a/deps/v8/tools/system-analyzer/app-model.mjs b/deps/v8/tools/system-analyzer/app-model.mjs
index 4e339cb0d5..5bc15fe6a6 100644
--- a/deps/v8/tools/system-analyzer/app-model.mjs
+++ b/deps/v8/tools/system-analyzer/app-model.mjs
@@ -17,7 +17,6 @@ class State {
_mapTimeline;
_deoptTimeline;
_codeTimeline;
- _apiTimeline;
_tickTimeline;
_timerTimeline;
_minStartTime = Number.POSITIVE_INFINITY;
@@ -42,13 +41,12 @@ class State {
}
setTimelines(
- mapTimeline, icTimeline, deoptTimeline, codeTimeline, apiTimeline,
- tickTimeline, timerTimeline) {
+ mapTimeline, icTimeline, deoptTimeline, codeTimeline, tickTimeline,
+ timerTimeline) {
this._mapTimeline = mapTimeline;
this._icTimeline = icTimeline;
this._deoptTimeline = deoptTimeline;
this._codeTimeline = codeTimeline;
- this._apiTimeline = apiTimeline;
this._tickTimeline = tickTimeline;
this._timerTimeline = timerTimeline;
for (let timeline of arguments) {
@@ -78,10 +76,6 @@ class State {
return this._codeTimeline;
}
- get apiTimeline() {
- return this._apiTimeline;
- }
-
get tickTimeline() {
return this._tickTimeline;
}
@@ -93,8 +87,7 @@ class State {
get timelines() {
return [
this._mapTimeline, this._icTimeline, this._deoptTimeline,
- this._codeTimeline, this._apiTimeline, this._tickTimeline,
- this._timerTimeline
+ this._codeTimeline, this._tickTimeline, this._timerTimeline
];
}
diff --git a/deps/v8/tools/system-analyzer/helper.mjs b/deps/v8/tools/system-analyzer/helper.mjs
index a50e06d3be..717faca5d5 100644
--- a/deps/v8/tools/system-analyzer/helper.mjs
+++ b/deps/v8/tools/system-analyzer/helper.mjs
@@ -2,21 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export function delay(time) {
- return new Promise(resolver => setTimeout(resolver, time));
-}
-
-export function defer() {
- let resolve_func, reject_func;
- const p = new Promise((resolve, reject) => {
- resolve_func = resolve;
- reject_func = resolve;
- });
- p.resolve = resolve_func;
- p.reject = reject_func;
- return p;
-}
-
export class Group {
constructor(key, id, parentTotal, entries) {
this.key = key;
@@ -73,4 +58,25 @@ export function arrayEquals(left, right) {
return true;
}
+export function entriesEquals(left, right) {
+ if (left == right) return true;
+ if (left == undefined) return right == undefined;
+ const leftEntries = Object.entries(left);
+ const rightEntries = Object.entries(right);
+ if (leftEntries.length !== rightEntries.length) return false;
+ for (let i = 0; i < leftEntries.length; i++) {
+ const l = leftEntries[i];
+ const r = rightEntries[i];
+ if (l[0] != r[0]) return false;
+ if (l[1] != r[1]) return false;
+ }
+ return true;
+}
+
+export function keysEquals(left, right) {
+ if (left == right) return true;
+ if (left == undefined) return right == undefined;
+ return arrayEquals(Object.keys(left), Object.keys(right));
+}
+
export * from '../js/helper.mjs'
diff --git a/deps/v8/tools/system-analyzer/index.css b/deps/v8/tools/system-analyzer/index.css
index b2fb95843f..4d2cda9035 100644
--- a/deps/v8/tools/system-analyzer/index.css
+++ b/deps/v8/tools/system-analyzer/index.css
@@ -1,4 +1,5 @@
:root {
+ --code-font: Consolas, Monaco, Menlo, monospace;
--background-color: #000000;
--surface-color-rgb: 18, 18, 18;
--surface-color: rgb(var(--surface-color-rgb));
@@ -69,6 +70,10 @@ kbd {
white-space: nowrap;
}
+kbd, code, pre {
+ font-family: var(--code-font);
+}
+
a {
color: var(--primary-color);
text-decoration: none;
@@ -174,9 +179,9 @@ button:hover {
.colorbox {
display: inline-block;
- width: 10px;
- height: 10px;
- border: 1px var(--background-color) solid;
+ width: 8px;
+ height: 8px;
+ border: 2px var(--background-color) solid;
border-radius: 50%;
}
@@ -228,6 +233,7 @@ button:hover,
.mark:hover,
.clickable:active,
.mark:active {
+ border-radius: 3px;
background-color: var(--primary-color);
color: var(--on-primary-color);
cursor: pointer;
@@ -244,17 +250,16 @@ button:hover,
padding: 0 10px 0 10px;
}
.legend dt {
- font-family: monospace;
+ font-family: var(--code-font);
}
.legend h3 {
margin-top: 10px;
}
-
.panelCloserLabel {
float: left;
cursor: zoom-out;
- margin: 0 10px 0 0;
+ margin: 0 5px 0 0;
transition: transform 0.2s ease-out;
user-select: none;
}
@@ -275,4 +280,4 @@ button:hover,
}
.panelCloserInput:checked ~ * {
display: none;
-} \ No newline at end of file
+}
diff --git a/deps/v8/tools/system-analyzer/index.html b/deps/v8/tools/system-analyzer/index.html
index 0a333dd18f..f8b99f3dda 100644
--- a/deps/v8/tools/system-analyzer/index.html
+++ b/deps/v8/tools/system-analyzer/index.html
@@ -47,7 +47,7 @@ found in the LICENSE file. -->
</head>
<body>
- <tool-tip id="tool-tip"></tool-tip>
+ <tool-tip id="tool-tip" style="will-change: transform"></tool-tip>
<section id="file-reader">
<log-file-reader id="log-file-reader"></log-file-reader>
@@ -61,7 +61,6 @@ found in the LICENSE file. -->
<timeline-track id="ic-track" title="IC"></timeline-track>
<timeline-track id="deopt-track" title="Deopt"></timeline-track>
<timeline-track id="code-track" title="Code"></timeline-track>
- <timeline-track id="api-track" title="API"></timeline-track>
</timeline-panel>
<div class="panels">
@@ -92,7 +91,6 @@ found in the LICENSE file. -->
<list-panel id="map-list" title="Map Events"></list-panel>
<list-panel id="deopt-list" title="Deopt Events"></list-panel>
<list-panel id="code-list" title="Code Events"></list-panel>
- <list-panel id="api-list" title="API Events"></list-panel>
</div>
</section>
@@ -155,8 +153,8 @@ found in the LICENSE file. -->
</dt>
<dd>Log detailed generated generated code</dd>
<dt>
- <a href="https://source.chromium.org/search?q=FLAG_log_api">
- <code>--log-api</code>
+ <a href="https://source.chromium.org/search?q=FLAG_log_code">
+ <code>--log-code</code>
</a>
</dt>
<dd>Log details about deoptimized code</dd>
@@ -176,6 +174,12 @@ found in the LICENSE file. -->
<h3>Keyboard Shortcuts for Navigation</h3>
<dl>
+ <dt><kbd>A</kbd></dt>
+ <dd>Scroll left</dd>
+
+ <dt><kbd>D</kbd></dt>
+ <dd>Sroll right</dd>
+
<dt><kbd>SHIFT</kbd> + <kbd>Arrow Up</kbd></dt>
<dd>Follow Map transition forward (first child)</dd>
diff --git a/deps/v8/tools/system-analyzer/index.mjs b/deps/v8/tools/system-analyzer/index.mjs
index 41463d9484..21254aa74e 100644
--- a/deps/v8/tools/system-analyzer/index.mjs
+++ b/deps/v8/tools/system-analyzer/index.mjs
@@ -5,7 +5,6 @@
import {Script, SourcePosition} from '../profile.mjs';
import {State} from './app-model.mjs';
-import {ApiLogEntry} from './log/api.mjs';
import {CodeLogEntry} from './log/code.mjs';
import {DeoptLogEntry} from './log/code.mjs';
import {SharedLibLogEntry} from './log/code.mjs';
@@ -15,9 +14,8 @@ import {MapLogEntry} from './log/map.mjs';
import {TickLogEntry} from './log/tick.mjs';
import {TimerLogEntry} from './log/timer.mjs';
import {Processor} from './processor.mjs';
-import {Timeline} from './timeline.mjs'
import {FocusEvent, SelectionEvent, SelectRelatedEvent, SelectTimeEvent, ToolTipEvent,} from './view/events.mjs';
-import {$, CSSColor, groupBy} from './view/helper.mjs';
+import {$, groupBy} from './view/helper.mjs';
class App {
_state;
@@ -35,14 +33,12 @@ class App {
icTrack: $('#ic-track'),
deoptTrack: $('#deopt-track'),
codeTrack: $('#code-track'),
- apiTrack: $('#api-track'),
timerTrack: $('#timer-track'),
icList: $('#ic-list'),
mapList: $('#map-list'),
codeList: $('#code-list'),
deoptList: $('#deopt-list'),
- apiList: $('#api-list'),
mapPanel: $('#map-panel'),
codePanel: $('#code-panel'),
@@ -51,11 +47,11 @@ class App {
toolTip: $('#tool-tip'),
};
this._view.logFileReader.addEventListener(
- 'fileuploadstart', (e) => this.handleFileUploadStart(e));
+ 'fileuploadstart', this.handleFileUploadStart.bind(this));
this._view.logFileReader.addEventListener(
- 'fileuploadchunk', (e) => this.handleFileUploadChunk(e));
+ 'fileuploadchunk', this.handleFileUploadChunk.bind(this));
this._view.logFileReader.addEventListener(
- 'fileuploadend', (e) => this.handleFileUploadEnd(e));
+ 'fileuploadend', this.handleFileUploadEnd.bind(this));
this._startupPromise = this._loadCustomElements();
this._view.codeTrack.svg = true;
}
@@ -65,7 +61,6 @@ class App {
SourcePosition,
MapLogEntry,
IcLogEntry,
- ApiLogEntry,
CodeLogEntry,
DeoptLogEntry,
SharedLibLogEntry,
@@ -91,14 +86,14 @@ class App {
document.addEventListener(
'keydown', e => this._navigation?.handleKeyDown(e));
document.addEventListener(
- SelectRelatedEvent.name, e => this.handleSelectRelatedEntries(e));
+ SelectRelatedEvent.name, this.handleSelectRelatedEntries.bind(this));
document.addEventListener(
- SelectionEvent.name, e => this.handleSelectEntries(e))
+ SelectionEvent.name, this.handleSelectEntries.bind(this))
document.addEventListener(
- FocusEvent.name, e => this.handleFocusLogEntry(e));
+ FocusEvent.name, this.handleFocusLogEntry.bind(this));
document.addEventListener(
- SelectTimeEvent.name, e => this.handleTimeRangeSelect(e));
- document.addEventListener(ToolTipEvent.name, e => this.handleToolTip(e));
+ SelectTimeEvent.name, this.handleTimeRangeSelect.bind(this));
+ document.addEventListener(ToolTipEvent.name, this.handleToolTip.bind(this));
}
handleSelectRelatedEntries(e) {
@@ -126,7 +121,6 @@ class App {
entries = entry.entries.concat(entry.sourcePositions);
break;
case TimerLogEntry:
- case ApiLogEntry:
case CodeLogEntry:
case TickLogEntry:
case SharedLibLogEntry:
@@ -176,8 +170,6 @@ class App {
return this.showMapEntries(entries, focusView);
case IcLogEntry:
return this.showIcEntries(entries, focusView);
- case ApiLogEntry:
- return this.showApiEntries(entries, focusView);
case CodeLogEntry:
return this.showCodeEntries(entries, focusView);
case DeoptLogEntry:
@@ -216,11 +208,6 @@ class App {
if (focusView) this._view.codePanel.show();
}
- showApiEntries(entries, focusView = true) {
- this._view.apiList.selectedLogEntries = entries;
- if (focusView) this._view.apiList.show();
- }
-
showTickEntries(entries, focusView = true) {}
showTimerEntries(entries, focusView = true) {}
@@ -231,19 +218,18 @@ class App {
handleTimeRangeSelect(e) {
e.stopImmediatePropagation();
- this.selectTimeRange(e.start, e.end);
+ this.selectTimeRange(e.start, e.end, e.focus, e.zoom);
}
- selectTimeRange(start, end) {
+ selectTimeRange(start, end, focus = false, zoom = false) {
this._state.selectTimeRange(start, end);
this.showMapEntries(this._state.mapTimeline.selectionOrSelf, false);
this.showIcEntries(this._state.icTimeline.selectionOrSelf, false);
this.showDeoptEntries(this._state.deoptTimeline.selectionOrSelf, false);
this.showCodeEntries(this._state.codeTimeline.selectionOrSelf, false);
- this.showApiEntries(this._state.apiTimeline.selectionOrSelf, false);
this.showTickEntries(this._state.tickTimeline.selectionOrSelf, false);
this.showTimerEntries(this._state.timerTimeline.selectionOrSelf, false);
- this._view.timelinePanel.timeSelection = {start, end};
+ this._view.timelinePanel.timeSelection = {start, end, focus, zoom};
}
handleFocusLogEntry(e) {
@@ -261,8 +247,6 @@ class App {
return this.focusMapLogEntry(entry);
case IcLogEntry:
return this.focusIcLogEntry(entry);
- case ApiLogEntry:
- return this.focusApiLogEntry(entry);
case CodeLogEntry:
return this.focusCodeLogEntry(entry);
case DeoptLogEntry:
@@ -313,12 +297,6 @@ class App {
// no-op.
}
- focusApiLogEntry(entry) {
- this._state.apiLogEntry = entry;
- this._view.apiTrack.focusedEntry = entry;
- this.focusSourcePosition(entry.sourcePosition);
- }
-
focusTickLogEntry(entry) {
this._state.tickLogEntry = entry;
this._view.tickTrack.focusedEntry = entry;
@@ -362,6 +340,8 @@ class App {
this.restartApp();
$('#container').className = 'initial';
this._processor = new Processor();
+ this._processor.setProgressCallback(
+ e.detail.totalSize, e.detail.progressCallback);
}
async handleFileUploadChunk(e) {
@@ -379,18 +359,16 @@ class App {
const icTimeline = processor.icTimeline;
const deoptTimeline = processor.deoptTimeline;
const codeTimeline = processor.codeTimeline;
- const apiTimeline = processor.apiTimeline;
const tickTimeline = processor.tickTimeline;
const timerTimeline = processor.timerTimeline;
this._state.setTimelines(
- mapTimeline, icTimeline, deoptTimeline, codeTimeline, apiTimeline,
- tickTimeline, timerTimeline);
+ mapTimeline, icTimeline, deoptTimeline, codeTimeline, tickTimeline,
+ timerTimeline);
this._view.mapPanel.timeline = mapTimeline;
this._view.icList.timeline = icTimeline;
this._view.mapList.timeline = mapTimeline;
this._view.deoptList.timeline = deoptTimeline;
this._view.codeList.timeline = codeTimeline;
- this._view.apiList.timeline = apiTimeline;
this._view.scriptPanel.scripts = processor.scripts;
this._view.codePanel.timeline = codeTimeline;
this._view.codePanel.timeline = codeTimeline;
@@ -409,7 +387,6 @@ class App {
this._view.icTrack.data = this._state.icTimeline;
this._view.deoptTrack.data = this._state.deoptTimeline;
this._view.codeTrack.data = this._state.codeTimeline;
- this._view.apiTrack.data = this._state.apiTimeline;
this._view.tickTrack.data = this._state.tickTimeline;
this._view.timerTrack.data = this._state.timerTimeline;
}
@@ -421,115 +398,53 @@ class Navigation {
this.state = state;
this._view = view;
}
+
get map() {
return this.state.map
}
+
set map(value) {
this.state.map = value
}
+
get chunks() {
return this.state.mapTimeline.chunks;
}
+
increaseTimelineResolution() {
this._view.timelinePanel.nofChunks *= 1.5;
this.state.nofChunks *= 1.5;
}
+
decreaseTimelineResolution() {
this._view.timelinePanel.nofChunks /= 1.5;
this.state.nofChunks /= 1.5;
}
- selectNextEdge() {
- if (!this.map) return;
- if (this.map.children.length != 1) return;
- this.map = this.map.children[0].to;
- this._view.mapTrack.selectedEntry = this.map;
- this.updateUrl();
- this._view.mapPanel.map = this.map;
- }
- selectPrevEdge() {
- if (!this.map) return;
- if (!this.map.parent) return;
- this.map = this.map.parent;
- this._view.mapTrack.selectedEntry = this.map;
- this.updateUrl();
- this._view.mapPanel.map = this.map;
- }
- selectDefaultMap() {
- this.map = this.chunks[0].at(0);
- this._view.mapTrack.selectedEntry = this.map;
- this.updateUrl();
- this._view.mapPanel.map = this.map;
- }
- moveInChunks(next) {
- if (!this.map) return this.selectDefaultMap();
- let chunkIndex = this.map.chunkIndex(this.chunks);
- let chunk = this.chunks[chunkIndex];
- let index = chunk.indexOf(this.map);
- if (next) {
- chunk = chunk.next(this.chunks);
- } else {
- chunk = chunk.prev(this.chunks);
- }
- if (!chunk) return;
- index = Math.min(index, chunk.size() - 1);
- this.map = chunk.at(index);
- this._view.mapTrack.selectedEntry = this.map;
- this.updateUrl();
- this._view.mapPanel.map = this.map;
- }
- moveInChunk(delta) {
- if (!this.map) return this.selectDefaultMap();
- let chunkIndex = this.map.chunkIndex(this.chunks)
- let chunk = this.chunks[chunkIndex];
- let index = chunk.indexOf(this.map) + delta;
- let map;
- if (index < 0) {
- map = chunk.prev(this.chunks).last();
- } else if (index >= chunk.size()) {
- map = chunk.next(this.chunks).first()
- } else {
- map = chunk.at(index);
- }
- this.map = map;
- this._view.mapTrack.selectedEntry = this.map;
- this.updateUrl();
- this._view.mapPanel.map = this.map;
- }
+
updateUrl() {
let entries = this.state.entries;
let params = new URLSearchParams(entries);
window.history.pushState(entries, '', '?' + params.toString());
}
+
+ scrollLeft() {}
+
+ scrollRight() {}
+
handleKeyDown(event) {
switch (event.key) {
- case 'ArrowUp':
- event.preventDefault();
- if (event.shiftKey) {
- this.selectPrevEdge();
- } else {
- this.moveInChunk(-1);
- }
+ case 'd':
+ this.scrollLeft();
return false;
- case 'ArrowDown':
- event.preventDefault();
- if (event.shiftKey) {
- this.selectNextEdge();
- } else {
- this.moveInChunk(1);
- }
+ case 'a':
+ this.scrollRight();
return false;
- case 'ArrowLeft':
- this.moveInChunks(false);
- break;
- case 'ArrowRight':
- this.moveInChunks(true);
- break;
case '+':
this.increaseTimelineResolution();
- break;
+ return false;
case '-':
this.decreaseTimelineResolution();
- break;
+ return false;
}
}
}
diff --git a/deps/v8/tools/system-analyzer/log/api.mjs b/deps/v8/tools/system-analyzer/log/api.mjs
deleted file mode 100644
index 8e29cb39d5..0000000000
--- a/deps/v8/tools/system-analyzer/log/api.mjs
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-import {LogEntry} from './log.mjs';
-
-export class ApiLogEntry extends LogEntry {
- constructor(type, time, name, argument) {
- super(type, time);
- this._name = name;
- this._argument = argument;
- }
-
- get name() {
- return this._name;
- }
-
- get argument() {
- return this._argument;
- }
-
- static get propertyNames() {
- return ['type', 'name', 'argument'];
- }
-}
diff --git a/deps/v8/tools/system-analyzer/log/code.mjs b/deps/v8/tools/system-analyzer/log/code.mjs
index 4e8ca40f5e..06051a2e52 100644
--- a/deps/v8/tools/system-analyzer/log/code.mjs
+++ b/deps/v8/tools/system-analyzer/log/code.mjs
@@ -2,9 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import {formatBytes} from '../helper.mjs';
-
import {LogEntry} from './log.mjs';
+class CodeString {
+ constructor(string) {
+ if (typeof string !== 'string') {
+ throw new Error('Expected string');
+ }
+ this.string = string;
+ }
+
+ get isCode() {
+ return true;
+ }
+
+ toString() {
+ return this.string;
+ }
+}
+
export class DeoptLogEntry extends LogEntry {
constructor(
type, time, entry, deoptReason, deoptLocation, scriptOffset,
@@ -48,14 +64,33 @@ export class DeoptLogEntry extends LogEntry {
}
}
-export class CodeLogEntry extends LogEntry {
- constructor(type, time, kindName, kind, entry) {
+class CodeLikeLogEntry extends LogEntry {
+ constructor(type, time, profilerEntry) {
super(type, time);
+ this._entry = profilerEntry;
+ profilerEntry.logEntry = this;
+ this._relatedEntries = [];
+ }
+
+ get entry() {
+ return this._entry;
+ }
+
+ add(entry) {
+ this._relatedEntries.push(entry);
+ }
+
+ relatedEntries() {
+ return this._relatedEntries;
+ }
+}
+
+export class CodeLogEntry extends CodeLikeLogEntry {
+ constructor(type, time, kindName, kind, profilerEntry) {
+ super(type, time, profilerEntry);
this._kind = kind;
this._kindName = kindName;
- this._entry = entry;
this._feedbackVector = undefined;
- entry.logEntry = this;
}
get kind() {
@@ -74,10 +109,6 @@ export class CodeLogEntry extends LogEntry {
return this._kindName;
}
- get entry() {
- return this._entry;
- }
-
get functionName() {
return this._entry.functionName ?? this._entry.getRawName();
}
@@ -117,6 +148,8 @@ export class CodeLogEntry extends LogEntry {
get toolTipDict() {
const dict = super.toolTipDict;
dict.size = formatBytes(dict.size);
+ dict.source = new CodeString(dict.source);
+ dict.code = new CodeString(dict.code);
return dict;
}
@@ -182,20 +215,15 @@ export class FeedbackVectorEntry extends LogEntry {
}
}
-export class SharedLibLogEntry extends LogEntry {
- constructor(entry) {
- super('SHARED_LIB', 0);
- this._entry = entry;
+export class SharedLibLogEntry extends CodeLikeLogEntry {
+ constructor(profilerEntry) {
+ super('SHARED_LIB', 0, profilerEntry);
}
get name() {
return this._entry.name;
}
- get entry() {
- return this._entry;
- }
-
toString() {
return `SharedLib`;
}
diff --git a/deps/v8/tools/system-analyzer/log/timer.mjs b/deps/v8/tools/system-analyzer/log/timer.mjs
index d2ca02a46c..01bfbd0421 100644
--- a/deps/v8/tools/system-analyzer/log/timer.mjs
+++ b/deps/v8/tools/system-analyzer/log/timer.mjs
@@ -30,7 +30,7 @@ export class TimerLogEntry extends LogEntry {
}
get duration() {
- return this._endTime - this._time;
+ return Math.max(0, this._endTime - this._time);
}
covers(time) {
@@ -53,4 +53,4 @@ export class TimerLogEntry extends LogEntry {
'duration',
];
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/tools/system-analyzer/processor.mjs b/deps/v8/tools/system-analyzer/processor.mjs
index 38f3a46b9e..f5dba619cd 100644
--- a/deps/v8/tools/system-analyzer/processor.mjs
+++ b/deps/v8/tools/system-analyzer/processor.mjs
@@ -6,7 +6,6 @@ import {LogReader, parseString, parseVarArgs} from '../logreader.mjs';
import {Profile} from '../profile.mjs';
import {RemoteLinuxCppEntriesProvider, RemoteMacOSCppEntriesProvider} from '../tickprocessor.mjs'
-import {ApiLogEntry} from './log/api.mjs';
import {CodeLogEntry, DeoptLogEntry, FeedbackVectorEntry, SharedLibLogEntry} from './log/code.mjs';
import {IcLogEntry} from './log/ic.mjs';
import {Edge, MapLogEntry} from './log/map.mjs';
@@ -49,7 +48,6 @@ class AsyncConsumer {
export class Processor extends LogReader {
_profile = new Profile();
- _apiTimeline = new Timeline();
_codeTimeline = new Timeline();
_deoptTimeline = new Timeline();
_icTimeline = new Timeline();
@@ -61,6 +59,11 @@ export class Processor extends LogReader {
_lastCodeLogEntry;
_lastTickLogEntry;
_chunkRemainder = '';
+
+ _totalInputBytes = 0;
+ _processedInputChars = 0;
+ _progressCallback;
+
MAJOR_VERSION = 7;
MINOR_VERSION = 6;
constructor() {
@@ -71,18 +74,19 @@ export class Processor extends LogReader {
parseInt, parseInt, parseInt, parseInt, parseString, parseString,
parseString, parseString, parseString, parseString
];
- this.dispatchTable_ = {
+ this.setDispatchTable({
__proto__: null,
'v8-version': {
parsers: [
parseInt,
parseInt,
],
- processor: this.processV8Version
+ processor: this.processV8Version,
},
'shared-library': {
parsers: [parseString, parseInt, parseInt, parseInt],
- processor: this.processSharedLibrary
+ processor: this.processSharedLibrary.bind(this),
+ isAsync: true,
},
'code-creation': {
parsers: [
@@ -190,7 +194,7 @@ export class Processor extends LogReader {
parsers: [parseString, parseVarArgs],
processor: this.processApiEvent
},
- };
+ });
// TODO(cbruni): Choose correct cpp entries provider
this._cppEntriesProvider = new RemoteLinuxCppEntriesProvider();
}
@@ -204,11 +208,27 @@ export class Processor extends LogReader {
this._chunkConsumer.push(chunk)
}
+ setProgressCallback(totalSize, callback) {
+ this._totalInputBytes = totalSize;
+ this._progressCallback = callback;
+ }
+
+ async _updateProgress() {
+ if (!this._progressCallback) return;
+ // We use chars and bytes interchangeably for simplicity. This causes us to
+ // slightly underestimate progress.
+ this._progressCallback(
+ this._processedInputChars / this._totalInputBytes,
+ this._processedInputChars);
+ }
+
async _processChunk(chunk) {
+ const prevProcessedInputChars = this._processedInputChars;
let end = chunk.length;
let current = 0;
let next = 0;
let line;
+ let lineNumber = 1;
try {
while (current < end) {
next = chunk.indexOf('\n', current);
@@ -222,10 +242,14 @@ export class Processor extends LogReader {
this._chunkRemainder = '';
}
current = next + 1;
+ lineNumber++;
await this.processLogLine(line);
+ this._processedInputChars = prevProcessedInputChars + current;
}
+ this._updateProgress();
} catch (e) {
- console.error(`Error occurred during parsing, trying to continue: ${e}`);
+ console.error(
+ `Could not parse log line ${lineNumber}, trying to continue: ${e}`);
}
}
@@ -314,12 +338,13 @@ export class Processor extends LogReader {
timestamp, codeSize, instructionStart, inliningId, scriptOffset,
deoptKind, deoptLocation, deoptReason) {
this._lastTimestamp = timestamp;
- const codeEntry = this._profile.findEntry(instructionStart);
+ const profCodeEntry = this._profile.findEntry(instructionStart);
const logEntry = new DeoptLogEntry(
- deoptKind, timestamp, codeEntry, deoptReason, deoptLocation,
+ deoptKind, timestamp, profCodeEntry, deoptReason, deoptLocation,
scriptOffset, instructionStart, codeSize, inliningId);
+ profCodeEntry.logEntry.add(logEntry);
this._deoptTimeline.push(logEntry);
- this.addSourcePosition(codeEntry, logEntry);
+ this.addSourcePosition(profCodeEntry, logEntry);
logEntry.functionSourcePosition = logEntry.sourcePosition;
// custom parse deopt location
if (deoptLocation === '<unknown>') return;
@@ -328,7 +353,7 @@ export class Processor extends LogReader {
if (inlinedPos > 0) {
deoptLocation = deoptLocation.substring(0, inlinedPos)
}
- const script = this.getProfileEntryScript(codeEntry);
+ const script = this.getProfileEntryScript(profCodeEntry);
if (!script) return;
const colSeparator = deoptLocation.lastIndexOf(':');
const rowSeparator = deoptLocation.lastIndexOf(':', colSeparator - 1);
@@ -342,16 +367,16 @@ export class Processor extends LogReader {
processFeedbackVector(
timestamp, fbv_address, fbv_length, instructionStart, optimization_marker,
optimization_tier, invocation_count, profiler_ticks, fbv_string) {
- const codeEntry = this._profile.findEntry(instructionStart);
- if (!codeEntry) {
+ const profCodeEntry = this._profile.findEntry(instructionStart);
+ if (!profCodeEntry) {
console.warn('Didn\'t find code for FBV', {fbv, instructionStart});
return;
}
const fbv = new FeedbackVectorEntry(
- timestamp, codeEntry.logEntry, fbv_address, fbv_length,
+ timestamp, profCodeEntry.logEntry, fbv_address, fbv_length,
optimization_marker, optimization_tier, invocation_count,
profiler_ticks, fbv_string);
- codeEntry.logEntry.setFeedbackVector(fbv);
+ profCodeEntry.logEntry.setFeedbackVector(fbv);
}
processScriptSource(scriptId, url, source) {
@@ -488,14 +513,18 @@ export class Processor extends LogReader {
return;
}
}
- // TODO: use SourcePosition directly.
- let edge = new Edge(type, name, reason, time, from_, to_);
- const codeEntry = this._profile.findEntry(pc)
- to_.entry = codeEntry;
- let script = this.getProfileEntryScript(codeEntry);
- if (script) {
- to_.sourcePosition = script.addSourcePosition(line, column, to_)
+ if (pc) {
+ const profCodeEntry = this._profile.findEntry(pc);
+ if (profCodeEntry) {
+ to_.entry = profCodeEntry;
+ profCodeEntry.logEntry.add(to_);
+ let script = this.getProfileEntryScript(profCodeEntry);
+ if (script) {
+ to_.sourcePosition = script.addSourcePosition(line, column, to_);
+ }
+ }
}
+ let edge = new Edge(type, name, reason, time, from_, to_);
if (to_.parent !== undefined && to_.parent === from_) {
// Fix bug where we double log transitions.
console.warn('Fixing up double transition');
@@ -549,19 +578,7 @@ export class Processor extends LogReader {
}
processApiEvent(type, varArgs) {
- let name, arg1;
- if (varArgs.length == 0) {
- const index = type.indexOf(':');
- if (index > 0) {
- name = type;
- type = type.substr(0, index);
- }
- } else {
- name = varArgs[0];
- arg1 = varArgs[1];
- }
- this._apiTimeline.push(
- new ApiLogEntry(type, this._lastTimestamp, name, arg1));
+ // legacy events that are no longer supported
}
processTimerEventStart(type, time) {
@@ -598,10 +615,6 @@ export class Processor extends LogReader {
return this._codeTimeline;
}
- get apiTimeline() {
- return this._apiTimeline;
- }
-
get tickTimeline() {
return this._tickTimeline;
}
diff --git a/deps/v8/tools/system-analyzer/view/code-panel-template.html b/deps/v8/tools/system-analyzer/view/code-panel-template.html
index d237ac3a51..13c1923639 100644
--- a/deps/v8/tools/system-analyzer/view/code-panel-template.html
+++ b/deps/v8/tools/system-analyzer/view/code-panel-template.html
@@ -23,6 +23,10 @@ found in the LICENSE file. -->
.addr:hover {
cursor: pointer;
}
+ .basicBlock:hover {
+ background-color: var(--border-color);
+ border-radius: 2px;
+ }
</style>
<div class="panel">
diff --git a/deps/v8/tools/system-analyzer/view/code-panel.mjs b/deps/v8/tools/system-analyzer/view/code-panel.mjs
index 42fe7b3d4c..3d8e02697a 100644
--- a/deps/v8/tools/system-analyzer/view/code-panel.mjs
+++ b/deps/v8/tools/system-analyzer/view/code-panel.mjs
@@ -1,23 +1,10 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {LinuxCppEntriesProvider} from '../../tickprocessor.mjs';
+
import {SelectRelatedEvent} from './events.mjs';
import {CollapsableElement, DOM, formatBytes, formatMicroSeconds} from './helper.mjs';
-const kRegisters = ['rsp', 'rbp', 'rax', 'rbx', 'rcx', 'rdx', 'rsi', 'rdi'];
-// Make sure we dont match register on bytecode: Star1 or Star2
-const kAvoidBytecodeOps = '(.*?[^a-zA-Z])'
-// Look for registers in strings like: movl rbx,[rcx-0x30]
-const kRegisterRegexp = `(${kRegisters.join('|')}|r[0-9]+)`
-const kRegisterRegexpSplit =
- new RegExp(`${kAvoidBytecodeOps}${kRegisterRegexp}`)
-const kIsRegisterRegexp = new RegExp(`^${kRegisterRegexp}$`);
-
-const kFullAddressRegexp = /(0x[0-9a-f]{8,})/;
-const kRelativeAddressRegexp = /([+-]0x[0-9a-f]+)/;
-const kAnyAddressRegexp = /([+-]?0x[0-9a-f]+)/;
-
DOM.defineCustomElement('view/code-panel',
(templateText) =>
class CodePanel extends CollapsableElement {
@@ -132,36 +119,145 @@ DOM.defineCustomElement('view/code-panel',
}
});
+const kRegisters = ['rsp', 'rbp', 'rax', 'rbx', 'rcx', 'rdx', 'rsi', 'rdi'];
+// Make sure we dont match register on bytecode: Star1 or Star2
+const kAvoidBytecodeOpsRegexpSource = '(.*?[^a-zA-Z])'
+// Look for registers in strings like: movl rbx,[rcx-0x30]
+const kRegisterRegexpSource = `(?<register>${kRegisters.join('|')}|r[0-9]+)`
+const kRegisterSplitRegexp =
+ new RegExp(`${kAvoidBytecodeOpsRegexpSource}${kRegisterRegexpSource}`)
+const kIsRegisterRegexp = new RegExp(`^${kRegisterRegexpSource}$`);
+
+const kFullAddressRegexp = /(0x[0-9a-f]{8,})/;
+const kRelativeAddressRegexp = /([+-]0x[0-9a-f]+)/;
+const kAnyAddressRegexp = /(?<address>[+-]?0x[0-9a-f]+)/;
+
+const kJmpRegexp = new RegExp(`jmp ${kRegisterRegexpSource}`);
+const kMovRegexp =
+ new RegExp(`mov. ${kRegisterRegexpSource},${kAnyAddressRegexp.source}`);
+
class AssemblyFormatter {
constructor(codeLogEntry) {
this._fragment = new DocumentFragment();
this._entry = codeLogEntry;
- codeLogEntry.code.split('\n').forEach(line => this._addLine(line));
+ this._lines = new Map();
+ this._previousLine = undefined;
+ this._parseLines();
+ this._format();
}
get fragment() {
return this._fragment;
}
- _addLine(line) {
+ _format() {
+ let block = DOM.div(['basicBlock', 'header']);
+ this._lines.forEach(line => {
+ if (!block || line.isBlockStart) {
+ this._fragment.appendChild(block);
+ block = DOM.div('basicBlock');
+ }
+ block.appendChild(line.format())
+ });
+ this._fragment.appendChild(block);
+ }
+
+ _parseLines() {
+ this._entry.code.split('\n').forEach(each => this._parseLine(each));
+ this._findBasicBlocks();
+ }
+
+ _parseLine(line) {
const parts = line.split(' ');
- let lineAddress = 0;
- if (kFullAddressRegexp.test(parts[0])) {
- lineAddress = parseInt(parts[0]);
+ // Use unique placeholder for address:
+ let lineAddress = -this._lines.size;
+ for (let part of parts) {
+ if (kFullAddressRegexp.test(part)) {
+ lineAddress = parseInt(part);
+ break;
+ }
+ }
+ const newLine = new AssemblyLine(lineAddress, parts);
+ // special hack for: mov reg 0x...; jmp reg;
+ if (lineAddress <= 0 && this._previousLine) {
+ const jmpMatch = line.match(kJmpRegexp);
+ if (jmpMatch) {
+ const register = jmpMatch.groups.register;
+ const movMatch = this._previousLine.line.match(kMovRegexp);
+ if (movMatch.groups.register === register) {
+ newLine.outgoing.push(movMatch.groups.address);
+ }
+ }
+ }
+ this._lines.set(lineAddress, newLine);
+ this._previousLine = newLine;
+ }
+
+ _findBasicBlocks() {
+ const lines = Array.from(this._lines.values());
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i];
+ let forceBasicBlock = i == 0;
+ if (i > 0 && i < lines.length - 1) {
+ const prevHasAddress = lines[i - 1].address > 0;
+ const currentHasAddress = lines[i].address > 0;
+ const nextHasAddress = lines[i + 1].address > 0;
+ if (prevHasAddress !== currentHasAddress &&
+ currentHasAddress == nextHasAddress) {
+ forceBasicBlock = true;
+ }
+ }
+ if (forceBasicBlock) {
+ // Add fake-incoming address to mark a block start.
+ line.addIncoming(0);
+ }
+ line.outgoing.forEach(address => {
+ const outgoing = this._lines.get(address);
+ if (outgoing) outgoing.addIncoming(line.address);
+ })
}
- const content = DOM.span({textContent: parts.join(' ') + '\n'});
- let formattedCode = content.innerHTML.split(kRegisterRegexpSplit)
+ }
+}
+
+class AssemblyLine {
+ constructor(address, parts) {
+ this.address = address;
+ this.outgoing = [];
+ this.incoming = [];
+ parts.forEach(part => {
+ const fullMatch = part.match(kFullAddressRegexp);
+ if (fullMatch) {
+ let inlineAddress = parseInt(fullMatch[0]);
+ if (inlineAddress != this.address) this.outgoing.push(inlineAddress);
+ if (Number.isNaN(inlineAddress)) throw 'invalid address';
+ } else if (kRelativeAddressRegexp.test(part)) {
+ this.outgoing.push(this._toAbsoluteAddress(part));
+ }
+ });
+ this.line = parts.join(' ');
+ }
+
+ get isBlockStart() {
+ return this.incoming.length > 0;
+ }
+
+ addIncoming(address) {
+ this.incoming.push(address);
+ }
+
+ format() {
+ const content = DOM.span({textContent: this.line + '\n'});
+ let formattedCode = content.innerHTML.split(kRegisterSplitRegexp)
.map(part => this._formatRegisterPart(part))
.join('');
- formattedCode = formattedCode.split(kAnyAddressRegexp)
- .map(
- (part, index) => this._formatAddressPart(
- part, index, lineAddress))
- .join('');
+ formattedCode =
+ formattedCode.split(kAnyAddressRegexp)
+ .map((part, index) => this._formatAddressPart(part, index))
+ .join('');
// Let's replace the base-address since it doesn't add any value.
// TODO
content.innerHTML = formattedCode;
- this._fragment.appendChild(content);
+ return content;
}
_formatRegisterPart(part) {
@@ -169,7 +265,7 @@ class AssemblyFormatter {
return `<span class="reg ${part}">${part}</span>`
}
- _formatAddressPart(part, index, lineAddress) {
+ _formatAddressPart(part, index) {
if (kFullAddressRegexp.test(part)) {
// The first or second address must be the line address
if (index <= 1) {
@@ -177,12 +273,16 @@ class AssemblyFormatter {
}
return `<span class=addr data-addr="${part}">${part}</span>`;
} else if (kRelativeAddressRegexp.test(part)) {
- const targetAddress = (lineAddress + parseInt(part)).toString(16);
- return `<span class=addr data-addr="0x${targetAddress}">${part}</span>`;
+ return `<span class=addr data-addr="0x${
+ this._toAbsoluteAddress(part).toString(16)}">${part}</span>`;
} else {
return part;
}
}
+
+ _toAbsoluteAddress(part) {
+ return this.address + parseInt(part);
+ }
}
class SelectionHandler {
diff --git a/deps/v8/tools/system-analyzer/view/events.mjs b/deps/v8/tools/system-analyzer/view/events.mjs
index f91fd6ffe8..024ed27f3c 100644
--- a/deps/v8/tools/system-analyzer/view/events.mjs
+++ b/deps/v8/tools/system-analyzer/view/events.mjs
@@ -50,10 +50,12 @@ export class SelectTimeEvent extends AppEvent {
return 'timerangeselect';
}
- constructor(start = 0, end = Infinity) {
+ constructor(start = 0, end = Infinity, focus = false, zoom = false) {
super(SelectTimeEvent.name);
this.start = start;
this.end = end;
+ this.focus = focus;
+ this.zoom = zoom;
}
}
diff --git a/deps/v8/tools/system-analyzer/view/property-link-table-template.html b/deps/v8/tools/system-analyzer/view/property-link-table-template.html
index 85f2cdc178..29724a9034 100644
--- a/deps/v8/tools/system-analyzer/view/property-link-table-template.html
+++ b/deps/v8/tools/system-analyzer/view/property-link-table-template.html
@@ -6,34 +6,58 @@ found in the LICENSE file. -->
</head>
<style>
-.properties td {
+h3 {
+ margin-block-start: 0em;
+}
+
+.properties {
+ overflow: auto;
+ max-height: 300px;
+}
+
+.properties table {
+ overflow: visible;
+ min-width: 350px;
+ border-collapse: collapse;
+}
+
+.properties table td {
vertical-align: top;
}
-.properties > tbody > tr > td:nth-child(2n+1):after {
+
+.properties table > tbody > tr > td:nth-child(2n+1):after {
content: ':';
}
-.properties > tbody > tr > td:nth-child(2n+1) {
+
+.properties table > tbody > tr > td:nth-child(2n+1) {
padding-right: 3px;
}
-.properties > tbody > tr > td:nth-child(2n+2) {
+.properties table > tbody > tr > td:nth-child(2n+2) {
width: 100%;
}
-.properties > tfoot {
- text-align: right;
+.properties table select {
+ width: 100%;
}
-.properties {
- min-width: 350px;
- border-collapse: collapse;
+.code {
+ font-family: var(--code-font);
}
-h3 {
- margin-block-start: 0em;
+.footer {
+ margin-top: 10px;
+ text-align: right;
+ width: 100%;
+ display: flex;
+ align-content: space-between;
+}
+
+.footer button {
+ flex: 1 1 0;
}
</style>
<div id="body">
<div id="content"></div>
-</div> \ No newline at end of file
+</div>
diff --git a/deps/v8/tools/system-analyzer/view/property-link-table.mjs b/deps/v8/tools/system-analyzer/view/property-link-table.mjs
index 2c81bc6536..95c496d261 100644
--- a/deps/v8/tools/system-analyzer/view/property-link-table.mjs
+++ b/deps/v8/tools/system-analyzer/view/property-link-table.mjs
@@ -3,8 +3,9 @@
// found in the LICENSE file.
import {App} from '../index.mjs'
+
import {FocusEvent, SelectRelatedEvent} from './events.mjs';
-import {DOM, ExpandableText, V8CustomElement} from './helper.mjs';
+import {DOM, entriesEquals, ExpandableText, V8CustomElement} from './helper.mjs';
DOM.defineCustomElement('view/property-link-table',
template =>
@@ -27,7 +28,7 @@ DOM.defineCustomElement('view/property-link-table',
}
set propertyDict(propertyDict) {
- if (this._propertyDict === propertyDict) return;
+ if (entriesEquals(this._propertyDict, propertyDict)) return;
if (typeof propertyDict !== 'object') {
throw new Error(
`Invalid property dict, expected object: ${propertyDict}`);
@@ -38,13 +39,16 @@ DOM.defineCustomElement('view/property-link-table',
_update() {
this._fragment = new DocumentFragment();
- this._table = DOM.table('properties');
+ this._table = DOM.table();
for (let key in this._propertyDict) {
const value = this._propertyDict[key];
this._addKeyValue(key, value);
}
- this._addFooter();
- this._fragment.appendChild(this._table);
+
+ const tableDiv = DOM.div('properties');
+ tableDiv.appendChild(this._table);
+ this._fragment.appendChild(tableDiv);
+ this._createFooter();
const newContent = DOM.div();
newContent.appendChild(this._fragment);
@@ -69,12 +73,14 @@ DOM.defineCustomElement('view/property-link-table',
if (Array.isArray(value)) {
cell.appendChild(this._addArrayValue(value));
return;
- }
- if (App.isClickable(value)) {
+ } else if (App.isClickable(value)) {
cell.className = 'clickable';
cell.onclick = this._showHandler;
cell.data = value;
}
+ if (value.isCode) {
+ cell.classList.add('code');
+ }
new ExpandableText(cell, value.toString());
}
@@ -101,21 +107,21 @@ DOM.defineCustomElement('view/property-link-table',
this._fragment.appendChild(title);
}
- _addFooter() {
+ _createFooter() {
if (this._object === undefined) return;
if (!this._instanceLinkButtons) return;
- const td = this._table.createTFoot().insertRow().insertCell();
- td.colSpan = 2;
- let showButton = td.appendChild(DOM.button('Show', this._showHandler));
+ const footer = DOM.div('footer');
+ let showButton = footer.appendChild(DOM.button('Show', this._showHandler));
showButton.data = this._object;
if (this._object.sourcePosition) {
- let showSourcePositionButton = td.appendChild(
+ let showSourcePositionButton = footer.appendChild(
DOM.button('Source Position', this._showSourcePositionHandler));
showSourcePositionButton.data = this._object;
}
- let showRelatedButton =
- td.appendChild(DOM.button('Show Related', this._showRelatedHandler));
+ let showRelatedButton = footer.appendChild(
+ DOM.button('Show Related', this._showRelatedHandler));
showRelatedButton.data = this._object;
+ this._fragment.appendChild(footer);
}
_handleArrayValueSelect(event) {
diff --git a/deps/v8/tools/system-analyzer/view/script-panel-template.html b/deps/v8/tools/system-analyzer/view/script-panel-template.html
index de3150366a..1039a8c452 100644
--- a/deps/v8/tools/system-analyzer/view/script-panel-template.html
+++ b/deps/v8/tools/system-analyzer/view/script-panel-template.html
@@ -7,7 +7,7 @@ found in the LICENSE file. -->
</head>
<style>
.scriptNode {
- font-family: Consolas, monospace;
+ font-family: var(--code-font);
}
.scriptNode span {
diff --git a/deps/v8/tools/system-analyzer/view/script-panel.mjs b/deps/v8/tools/system-analyzer/view/script-panel.mjs
index f6b24733be..a9de42205a 100644
--- a/deps/v8/tools/system-analyzer/view/script-panel.mjs
+++ b/deps/v8/tools/system-analyzer/view/script-panel.mjs
@@ -1,11 +1,10 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {arrayEquals, defer, groupBy} from '../helper.mjs';
import {App} from '../index.mjs'
import {SelectionEvent, SelectRelatedEvent, ToolTipEvent} from './events.mjs';
-import {CollapsableElement, CSSColor, delay, DOM, formatBytes, gradientStopsFromGroups, LazyTable} from './helper.mjs';
+import {arrayEquals, CollapsableElement, CSSColor, defer, delay, DOM, formatBytes, gradientStopsFromGroups, groupBy, LazyTable} from './helper.mjs';
// A source mapping proxy for source maps that don't have CORS headers.
// TODO(leszeks): Make this configurable.
diff --git a/deps/v8/tools/system-analyzer/view/timeline-panel.mjs b/deps/v8/tools/system-analyzer/view/timeline-panel.mjs
index 35d8f02893..88021aed65 100644
--- a/deps/v8/tools/system-analyzer/view/timeline-panel.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline-panel.mjs
@@ -24,10 +24,8 @@ DOM.defineCustomElement(
}
set nofChunks(count) {
- const time = this.currentTime
for (const track of this.timelineTracks) {
track.nofChunks = count;
- track.currentTime = time;
}
}
@@ -35,6 +33,12 @@ DOM.defineCustomElement(
return this.timelineTracks[0].nofChunks;
}
+ set currentTime(time) {
+ for (const track of this.timelineTracks) {
+ track.currentTime = time;
+ }
+ }
+
get currentTime() {
return this.timelineTracks[0].currentTime;
}
@@ -54,12 +58,23 @@ DOM.defineCustomElement(
this.timeSelection = {start: event.start, end: event.end};
}
- set timeSelection(timeSelection) {
- if (timeSelection.start > timeSelection.end) {
+ set timeSelection(selection) {
+ if (selection.start > selection.end) {
throw new Error('Invalid time range');
}
- for (const track of this.timelineTracks) {
- track.timeSelection = timeSelection;
+ const tracks = Array.from(this.timelineTracks);
+ if (selection.zoom) {
+ // To avoid inconsistencies copy the zoom/nofChunks from the first
+ // track
+ const firstTrack = tracks.pop();
+ firstTrack.timeSelection = selection;
+ selection.zoom = false;
+ for (const track of tracks) track.timeSelection = selection;
+ this.nofChunks = firstTrack.nofChunks;
+ } else {
+ for (const track of this.timelineTracks) {
+ track.timeSelection = selection;
+ }
}
}
});
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
index 678817399d..24ca397513 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {delay} from '../../helper.mjs';
import {kChunkHeight, kChunkVisualWidth, kChunkWidth} from '../../log/map.mjs';
import {SelectionEvent, SelectTimeEvent, SynchronizeSelectionEvent, ToolTipEvent,} from '../events.mjs';
-import {CSSColor, DOM, formatDurationMicros, SVG, V8CustomElement} from '../helper.mjs';
+import {CSSColor, delay, DOM, formatDurationMicros, V8CustomElement} from '../helper.mjs';
export const kTimelineHeight = 200;
@@ -37,15 +36,18 @@ export class TimelineTrackBase extends V8CustomElement {
this.timelineMarkersNode = this.$('#timelineMarkers');
this._scalableContentNode = this.$('#scalableContent');
this.isLocked = false;
+ this.setAttribute('tabindex', 0);
}
_initEventListeners() {
- this._legend.onFilter = (type) => this._handleFilterTimeline();
+ this._legend.onFilter = this._handleFilterTimeline.bind(this);
this.timelineNode.addEventListener(
- 'scroll', e => this._handleTimelineScroll(e));
+ 'scroll', this._handleTimelineScroll.bind(this));
this.hitPanelNode.onclick = this._handleClick.bind(this);
this.hitPanelNode.ondblclick = this._handleDoubleClick.bind(this);
this.hitPanelNode.onmousemove = this._handleMouseMove.bind(this);
+ this.$('#selectionForeground')
+ .addEventListener('mousemove', this._handleMouseMove.bind(this));
window.addEventListener('resize', () => this._resetCachedDimensions());
}
@@ -61,6 +63,7 @@ export class TimelineTrackBase extends V8CustomElement {
_handleFilterTimeline(type) {
this._updateChunks();
+ this._legend.update(true);
}
set data(timeline) {
@@ -72,9 +75,24 @@ export class TimelineTrackBase extends V8CustomElement {
this._updateChunks();
}
- set timeSelection(selection) {
- this._selectionHandler.timeSelection = selection;
+ set timeSelection({start, end, focus = false, zoom = false}) {
+ this._selectionHandler.timeSelection = {start, end};
this.updateSelection();
+ if (focus || zoom) {
+ if (!Number.isFinite(start) || !Number.isFinite(end)) {
+ throw new Error('Invalid number ranges');
+ }
+ if (focus) {
+ this.currentTime = (start + end) / 2;
+ }
+ if (zoom) {
+ const margin = 0.2;
+ const newVisibleTime = (end - start) * (1 + 2 * margin);
+ const currentVisibleTime =
+ this._cachedTimelineBoundingClientRect.width / this._timeToPixel;
+ this.nofChunks = this.nofChunks * (currentVisibleTime / newVisibleTime);
+ }
+ }
}
updateSelection() {
@@ -125,8 +143,14 @@ export class TimelineTrackBase extends V8CustomElement {
}
set nofChunks(count) {
+ const centerTime = this.currentTime;
+ const kMinNofChunks = 100;
+ if (count < kMinNofChunks) count = kMinNofChunks;
+ const kMaxNofChunks = 10 * 1000;
+ if (count > kMaxNofChunks) count = kMaxNofChunks;
this._nofChunks = count | 0;
this._updateChunks();
+ this.currentTime = centerTime;
}
get nofChunks() {
@@ -150,18 +174,42 @@ export class TimelineTrackBase extends V8CustomElement {
set selectedEntry(value) {
this._selectedEntry = value;
- this.drawAnnotations(value);
}
get selectedEntry() {
return this._selectedEntry;
}
+ get focusedEntry() {
+ return this._focusedEntry;
+ }
+
+ set focusedEntry(entry) {
+ this._focusedEntry = entry;
+ if (entry) this._drawAnnotations(entry);
+ }
+
set scrollLeft(offset) {
this.timelineNode.scrollLeft = offset;
this._cachedTimelineScrollLeft = offset;
}
+ get scrollLeft() {
+ return this._cachedTimelineScrollLeft;
+ }
+
+ set currentTime(time) {
+ const position = this.timeToPosition(time);
+ const centerOffset = this._timelineBoundingClientRect.width / 2;
+ this.scrollLeft = Math.max(0, position - centerOffset);
+ }
+
+ get currentTime() {
+ const centerOffset =
+ this._timelineBoundingClientRect.width / 2 + this.scrollLeft;
+ return this.relativePositionToTime(centerOffset);
+ }
+
handleEntryTypeDoubleClick(e) {
this.dispatchEvent(new SelectionEvent(e.target.parentNode.entries));
}
@@ -387,12 +435,20 @@ class SelectionHandler {
constructor(timeline) {
this._timeline = timeline;
+ this._timelineNode = this._timeline.$('#timeline');
this._timelineNode.addEventListener(
- 'mousedown', e => this._handleTimeSelectionMouseDown(e));
+ 'mousedown', this._handleMouseDown.bind(this));
this._timelineNode.addEventListener(
- 'mouseup', e => this._handleTimeSelectionMouseUp(e));
+ 'mouseup', this._handleMouseUp.bind(this));
this._timelineNode.addEventListener(
- 'mousemove', e => this._handleTimeSelectionMouseMove(e));
+ 'mousemove', this._handleMouseMove.bind(this));
+ this._selectionNode = this._timeline.$('#selection');
+ this._selectionForegroundNode = this._timeline.$('#selectionForeground');
+ this._selectionForegroundNode.addEventListener(
+ 'dblclick', this._handleDoubleClick.bind(this));
+ this._selectionBackgroundNode = this._timeline.$('#selectionBackground');
+ this._leftHandleNode = this._timeline.$('#leftHandle');
+ this._rightHandleNode = this._timeline.$('#rightHandle');
}
update() {
@@ -406,9 +462,10 @@ class SelectionHandler {
this._leftHandleNode.style.left = startPosition + 'px';
this._rightHandleNode.style.left = endPosition + 'px';
const delta = endPosition - startPosition;
- const selectionNode = this._selectionBackgroundNode;
- selectionNode.style.left = startPosition + 'px';
- selectionNode.style.width = delta + 'px';
+ this._selectionForegroundNode.style.left = startPosition + 'px';
+ this._selectionForegroundNode.style.width = delta + 'px';
+ this._selectionBackgroundNode.style.left = startPosition + 'px';
+ this._selectionBackgroundNode.style.width = delta + 'px';
}
set timeSelection(selection) {
@@ -437,26 +494,6 @@ class SelectionHandler {
this._timeSelection.end != Infinity;
}
- get _timelineNode() {
- return this._timeline.$('#timeline');
- }
-
- get _selectionNode() {
- return this._timeline.$('#selection');
- }
-
- get _selectionBackgroundNode() {
- return this._timeline.$('#selectionBackground');
- }
-
- get _leftHandleNode() {
- return this._timeline.$('#leftHandle');
- }
-
- get _rightHandleNode() {
- return this._timeline.$('#rightHandle');
- }
-
get _leftHandlePosX() {
return this._leftHandleNode.getBoundingClientRect().x;
}
@@ -475,7 +512,7 @@ class SelectionHandler {
SelectionHandler.SELECTION_OFFSET;
}
- _handleTimeSelectionMouseDown(event) {
+ _handleMouseDown(event) {
if (event.button !== 0) return;
let xPosition = event.clientX
// Update origin time in case we click on a handle.
@@ -488,7 +525,7 @@ class SelectionHandler {
this._selectionOriginTime = this.positionToTime(xPosition);
}
- _handleTimeSelectionMouseMove(event) {
+ _handleMouseMove(event) {
if (event.button !== 0) return;
if (!this.isSelecting) return;
const currentTime = this.positionToTime(event.clientX);
@@ -497,7 +534,7 @@ class SelectionHandler {
Math.max(this._selectionOriginTime, currentTime)));
}
- _handleTimeSelectionMouseUp(event) {
+ _handleMouseUp(event) {
if (event.button !== 0) return;
this._selectionOriginTime = -1;
if (this._timeSelection.start === -1) return;
@@ -506,10 +543,18 @@ class SelectionHandler {
this._timeline.dispatchEvent(new SelectTimeEvent(
this._timeSelection.start, this._timeSelection.end));
}
+
+ _handleDoubleClick(event) {
+ if (!this.hasSelection) return;
+ // Focus and zoom to the current selection.
+ this._timeline.dispatchEvent(new SelectTimeEvent(
+ this._timeSelection.start, this._timeSelection.end, true, true));
+ }
}
class Legend {
_timeline;
+ _lastSelection;
_typesFilters = new Map();
_typeClickHandler = this._handleTypeClick.bind(this);
_filterPredicate = this.filter.bind(this);
@@ -552,22 +597,39 @@ class Legend {
return this._typesFilters.get(logEntry.type);
}
- update() {
+ update(force = false) {
+ if (!force && this._lastSelection === this.selection) return;
+ this._lastSelection = this.selection;
const tbody = DOM.tbody();
const missingTypes = new Set(this._typesFilters.keys());
this._checkDurationField();
- this.selection.getBreakdown(undefined, this._enableDuration)
- .forEach(group => {
- tbody.appendChild(this._addTypeRow(group));
- missingTypes.delete(group.key);
- });
- missingTypes.forEach(
- key => tbody.appendChild(this._addRow('', key, 0, '0%')));
+ let selectionDuration = 0;
+ const breakdown =
+ this.selection.getBreakdown(undefined, this._enableDuration);
+ if (this._enableDuration) {
+ if (this.selection.cachedDuration === undefined) {
+ this.selection.cachedDuration = this._breakdownTotalDuration(breakdown);
+ }
+ selectionDuration = this.selection.cachedDuration;
+ }
+ breakdown.forEach(group => {
+ tbody.appendChild(this._addTypeRow(group, selectionDuration));
+ missingTypes.delete(group.key);
+ });
+ missingTypes.forEach(key => {
+ const emptyGroup = {key, length: 0, duration: 0};
+ tbody.appendChild(this._addTypeRow(emptyGroup, selectionDuration));
+ });
if (this._timeline.selection) {
- tbody.appendChild(
- this._addRow('', 'Selection', this.selection.length, '100%'));
+ tbody.appendChild(this._addRow(
+ '', 'Selection', this.selection.length, '100%', selectionDuration,
+ '100%'));
}
- tbody.appendChild(this._addRow('', 'All', this._timeline.length, ''));
+ // Showing 100% for 'All' and for 'Selection' would be confusing.
+ const allPercent = this._timeline.selection ? '' : '100%';
+ tbody.appendChild(this._addRow(
+ '', 'All', this._timeline.length, allPercent,
+ this._timeline.cachedDuration, allPercent));
this._table.tBodies[0].replaceWith(tbody);
}
@@ -581,8 +643,9 @@ class Legend {
_addRow(colorNode, type, count, countPercent, duration, durationPercent) {
const row = DOM.tr();
- row.appendChild(DOM.td(colorNode));
- const typeCell = row.appendChild(DOM.td(type));
+ const colorCell = row.appendChild(DOM.td(colorNode, 'color'));
+ colorCell.setAttribute('title', `Toggle '${type}' entries.`);
+ const typeCell = row.appendChild(DOM.td(type, 'text'));
typeCell.setAttribute('title', type);
row.appendChild(DOM.td(count.toString()));
row.appendChild(DOM.td(countPercent));
@@ -593,26 +656,31 @@ class Legend {
return row
}
- _addTypeRow(group) {
+ _addTypeRow(group, selectionDuration) {
const color = this.colorForType(group.key);
- const colorDiv = DOM.div('colorbox');
+ const classes = ['colorbox'];
+ if (group.length == 0) classes.push('empty');
+ const colorDiv = DOM.div(classes);
+ colorDiv.style.borderColor = color;
if (this._typesFilters.get(group.key)) {
colorDiv.style.backgroundColor = color;
} else {
- colorDiv.style.borderColor = color;
colorDiv.style.backgroundColor = CSSColor.backgroundImage;
}
let duration = 0;
+ let durationPercent = '';
if (this._enableDuration) {
- const entries = group.entries;
- for (let i = 0; i < entries.length; i++) {
- duration += entries[i].duration;
- }
+ // group.duration was added in _breakdownTotalDuration.
+ duration = group.duration;
+ durationPercent = selectionDuration == 0 ?
+ '0%' :
+ this._formatPercent(duration / selectionDuration);
}
- let countPercent =
- `${(group.length / this.selection.length * 100).toFixed(1)}%`;
+ const countPercent =
+ this._formatPercent(group.length / this.selection.length);
const row = this._addRow(
- colorDiv, group.key, group.length, countPercent, duration, '');
+ colorDiv, group.key, group.length, countPercent, duration,
+ durationPercent);
row.className = 'clickable';
row.onclick = this._typeClickHandler;
row.data = group.key;
@@ -624,4 +692,26 @@ class Legend {
this._typesFilters.set(type, !this._typesFilters.get(type));
this.onFilter(type);
}
+
+ _breakdownTotalDuration(breakdown) {
+ let duration = 0;
+ breakdown.forEach(group => {
+ group.duration = this._groupDuration(group);
+ duration += group.duration;
+ })
+ return duration;
+ }
+
+ _groupDuration(group) {
+ let duration = 0;
+ const entries = group.entries;
+ for (let i = 0; i < entries.length; i++) {
+ duration += entries[i].duration;
+ }
+ return duration;
+ }
+
+ _formatPercent(ratio) {
+ return `${(ratio * 100).toFixed(1)}%`;
+ }
}
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-map.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-map.mjs
index 639acc0312..cc1a9ea152 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-map.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-map.mjs
@@ -3,6 +3,7 @@
// found in the LICENSE file.
import {kChunkVisualWidth, MapLogEntry} from '../../log/map.mjs';
+import {FocusEvent} from '../events.mjs';
import {CSSColor, DOM} from '../helper.mjs';
import {TimelineTrackBase} from './timeline-track-base.mjs'
@@ -12,8 +13,11 @@ DOM.defineCustomElement('view/timeline/timeline-track', 'timeline-track-map',
class TimelineTrackMap extends TimelineTrackBase {
constructor() {
super(templateText);
+ this.navigation = new Navigation(this)
}
+ _handleKeyDown(event) {}
+
getMapStyle(map) {
return map.edge && map.edge.from ? CSSColor.onBackgroundColor :
CSSColor.onPrimaryColor;
@@ -135,4 +139,117 @@ DOM.defineCustomElement('view/timeline/timeline-track', 'timeline-track-map',
}
return buffer;
}
-}) \ No newline at end of file
+})
+
+class Navigation {
+ constructor(track) {
+ this._track = track;
+ this._track.addEventListener('keydown', this._handleKeyDown.bind(this));
+ this._map = undefined;
+ }
+
+ _handleKeyDown(event) {
+ if (!this._track.isFocused) return;
+ let handled = false;
+ switch (event.key) {
+ case 'ArrowDown':
+ handled = true;
+ if (event.shiftKey) {
+ this.selectPrevEdge();
+ } else {
+ this.moveInChunk(-1);
+ }
+ break;
+ case 'ArrowUp':
+ handled = true;
+ if (event.shiftKey) {
+ this.selectNextEdge();
+ } else {
+ this.moveInChunk(1);
+ }
+ break;
+ case 'ArrowLeft':
+ handled = true;
+ this.moveInChunks(false);
+ break;
+ case 'ArrowRight':
+ handled = true;
+ this.moveInChunks(true);
+ break;
+ case 'Enter':
+ handled = true;
+ this.selectMap();
+ break
+ }
+ if (handled) {
+ event.stopPropagation();
+ event.preventDefault();
+ return false;
+ }
+ }
+
+ get map() {
+ return this._track.focusedEntry;
+ }
+
+ set map(map) {
+ this._track.focusedEntry = map;
+ }
+
+ get chunks() {
+ return this._track.chunks;
+ }
+
+ selectMap() {
+ if (!this.map) return;
+ this._track.dispatchEvent(new FocusEvent(this.map))
+ }
+
+ selectNextEdge() {
+ if (!this.map) return;
+ if (this.map.children.length != 1) return;
+ this.show(this.map.children[0].to);
+ }
+
+ selectPrevEdge() {
+ if (!this.map) return;
+ if (!this.map.parent) return;
+ this.map = this.map.parent;
+ }
+
+ selectDefaultMap() {
+ this.map = this.chunks[0].at(0);
+ }
+
+ moveInChunks(next) {
+ if (!this.map) return this.selectDefaultMap();
+ let chunkIndex = this.map.chunkIndex(this.chunks);
+ let currentChunk = this.chunks[chunkIndex];
+ let currentIndex = currentChunk.indexOf(this.map);
+ let newChunk;
+ if (next) {
+ newChunk = chunk.next(this.chunks);
+ } else {
+ newChunk = chunk.prev(this.chunks);
+ }
+ if (!newChunk) return;
+ let newIndex = Math.min(currentIndex, newChunk.size() - 1);
+ this.map = newChunk.at(newIndex);
+ }
+
+ moveInChunk(delta) {
+ if (!this.map) return this.selectDefaultMap();
+ let chunkIndex = this.map.chunkIndex(this.chunks)
+ let chunk = this.chunks[chunkIndex];
+ let index = chunk.indexOf(this.map) + delta;
+ let map;
+ if (index < 0) {
+ map = chunk.prev(this.chunks).last();
+ } else if (index >= chunk.size()) {
+ map = chunk.next(this.chunks).first()
+ } else {
+ map = chunk.at(index);
+ }
+ this.map = map;
+ }
+}
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
index 5d60a5fa09..54703b701f 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {delay} from '../../helper.mjs';
import {Timeline} from '../../timeline.mjs';
import {SelectTimeEvent} from '../events.mjs';
-import {CSSColor, DOM, SVG} from '../helper.mjs';
+import {CSSColor, delay, SVG} from '../helper.mjs';
import {TimelineTrackBase} from './timeline-track-base.mjs'
@@ -108,19 +107,19 @@ export class TimelineTrackStackedBase extends TimelineTrackBase {
}
_drawItem(item, i, outline = false) {
- const x = this.timeToPosition(item.time);
+ const x = roundTo3Digits(this.timeToPosition(item.time));
const y = (item.depth + 1) * kItemHeight;
- let width = item.duration * this._timeToPixel;
+ let width = roundTo3Digits(item.duration * this._timeToPixel);
if (outline) {
return `<rect x=${x} y=${y} width=${width} height=${
- kItemHeight - 1} class=flameSelected />`;
+ kItemHeight - 1} class=fs />`;
}
let color = this._legend.colorForType(item.type);
if (i % 2 == 1) {
color = CSSColor.darken(color, 20);
}
return `<rect x=${x} y=${y} width=${width} height=${kItemHeight - 1} fill=${
- color} class=flame />`;
+ color} class=f />`;
}
_drawItemText(item) {
@@ -141,4 +140,8 @@ export class TimelineTrackStackedBase extends TimelineTrackBase {
buffer += `<text x=${x + 1} y=${y - 3} class=txt>${text}</text>`
return buffer;
}
-} \ No newline at end of file
+}
+
+function roundTo3Digits(value) {
+ return ((value * 1000) | 0) / 1000;
+}
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html
index 9114763606..d1a0a29687 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html
@@ -69,14 +69,21 @@ found in the LICENSE file. -->
height: calc(var(--view-height) + 12px);
overflow-y: scroll;
margin-right: -10px;
- padding-right: 2px;
+ padding: 0 2px 0 2px;
+ width: 400px;
+ border-left: 1px solid var(--border-color);
}
#legendTable {
- width: 280px;
+ width: 100%;
border-collapse: collapse;
}
+ thead {
+ border-top: 1px solid var(--border-color);
+ border-bottom: 1px solid var(--border-color);
+ }
+
th,
td {
padding: 1px 3px 2px 3px;
@@ -84,24 +91,20 @@ found in the LICENSE file. -->
#legendTable td {
padding-top: 3px;
+ text-align: right;
}
/* Center colors */
- #legendTable td:nth-of-type(4n+1) {
+ #legendTable .color {
text-align: center;
}
/* Left align text*/
- #legendTable td:nth-of-type(4n+2) {
+ #legendTable .text {
text-align: left;
width: 100%;
max-width: 200px;
overflow: hidden;
text-overflow: ellipsis;
}
- /* right align numbers */
- #legendTable td:nth-of-type(4n+3),
- #legendTable td:nth-of-type(4n+4) {
- text-align: right;
- }
.timeline {
background-color: var(--timeline-background-color);
@@ -130,7 +133,15 @@ found in the LICENSE file. -->
border-right: 1px solid var(--on-surface-color);
margin-left: -5px;
}
-
+ #selectionForeground{
+ z-index: 2;
+ cursor: grab;
+ height: 100%;
+ position: absolute;
+ }
+ #selectionForeground:active {
+ cursor: grabbing;
+ }
#selectionBackground {
background-color: rgba(133, 68, 163, 0.5);
height: 100%;
@@ -155,11 +166,14 @@ found in the LICENSE file. -->
.legend {
flex: initial;
}
+ .colorbox.empty {
+ opacity: 0.5;
+ }
</style>
<style>
/* SVG styles */
.txt {
- font: 8px monospace;
+ font: 8px var(--code-font);
transform: var(--txt-scale);
}
.annotationLabel {
@@ -184,10 +198,10 @@ found in the LICENSE file. -->
dominant-baseline: hanging;
font-size: 9px;
}
- .flame {
+ .flame, .f {
stroke-width: 0;
}
- .flameSelected {
+ .flameSelected, .fs {
fill: var(--on-background-color);
fill-opacity: 0.1;
stroke: var(--on-background-color);
@@ -208,6 +222,7 @@ found in the LICENSE file. -->
<div id="timeline">
<div id="selection" class="dataSized">
<div id="leftHandle"></div>
+ <div id="selectionForeground"></div>
<div id="selectionBackground"></div>
<div id="rightHandle"></div>
</div>
@@ -226,7 +241,7 @@ found in the LICENSE file. -->
<thead>
<tr>
<td></td>
- <td>Type</td>
+ <td class="text">Type</td>
<td>Count</td>
<td></td>
</tr>
@@ -234,4 +249,4 @@ found in the LICENSE file. -->
<tbody></tbody>
</table>
</div>
-</div> \ No newline at end of file
+</div>
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs
index 0f376ea355..bbc8ec4698 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-tick.mjs
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {delay} from '../../helper.mjs';
import {TickLogEntry} from '../../log/tick.mjs';
import {Timeline} from '../../timeline.mjs';
-import {DOM, SVG} from '../helper.mjs';
+import {delay, DOM, SVG} from '../helper.mjs';
+
import {TimelineTrackStackedBase} from './timeline-track-stacked-base.mjs'
class Flame {
@@ -74,7 +74,6 @@ DOM.defineCustomElement(
const flameStack = [];
const ticks = this._timeline.values;
let maxDepth = 0;
-
for (let tickIndex = 0; tickIndex < ticks.length; tickIndex++) {
const tick = ticks[tickIndex];
const tickStack = tick.stack;
@@ -152,7 +151,7 @@ class Annotations {
const start = this._flames.find(time);
let offset = 0;
// Draw annotations gradually outwards starting form the given time.
- let deadline = performance.now() + 500;
+ let deadline = performance.now() + 100;
for (let range = 0; range < this._flames.length; range += 10000) {
this._markFlames(start - range, start - offset);
this._markFlames(start + offset, start + range);
@@ -165,7 +164,7 @@ class Annotations {
// Abort if we started another update asynchronously.
if (this._logEntry != logEntry) return;
- deadline = performance.now() + 500;
+ deadline = performance.now() + 100;
}
this._drawBuffer();
}
diff --git a/deps/v8/tools/system-analyzer/view/tool-tip-template.html b/deps/v8/tools/system-analyzer/view/tool-tip-template.html
index 78fcca920a..635b3336aa 100644
--- a/deps/v8/tools/system-analyzer/view/tool-tip-template.html
+++ b/deps/v8/tools/system-analyzer/view/tool-tip-template.html
@@ -8,6 +8,15 @@ found in the LICENSE file. -->
:host {
position: absolute;
z-index: 100;
+ will-change: transform;
+ }
+
+ #body {
+ display: none;
+ position: absolute;
+ --tip-offset: 10px;
+ --tip-width: 10px;
+ --tip-height: 40px;
}
#content {
@@ -20,7 +29,6 @@ found in the LICENSE file. -->
max-width: 400px;
min-height: 100px;
max-height: 400px;
- overflow: auto;
box-shadow: 0px 0px 10px rgba(0,0,0,0.5);
}
@@ -29,22 +37,13 @@ found in the LICENSE file. -->
}
.textContent {
- font-family: monospace;
+ font-family: var(--code-font);
white-space: pre;
overflow-wrap: anywhere;
overflow-x: hidden;
max-width: 500px;
}
- #body {
- display: none;
- position: absolute;
- z-index: 99999;
- --tip-offset: 10px;
- --tip-width: 10px;
- --tip-height: 40px;
- }
-
#body.top {
bottom: var(--tip-height);
}
diff --git a/deps/v8/tools/system-analyzer/view/tool-tip.mjs b/deps/v8/tools/system-analyzer/view/tool-tip.mjs
index 5be98ae214..08714047bc 100644
--- a/deps/v8/tools/system-analyzer/view/tool-tip.mjs
+++ b/deps/v8/tools/system-analyzer/view/tool-tip.mjs
@@ -87,6 +87,9 @@ DOM.defineCustomElement(
set content(content) {
if (!content) return this.hide();
this.show();
+ if (this._content === content) return;
+ this._content = content;
+
if (typeof content === 'string') {
this.contentNode.innerHTML = content;
this.contentNode.className = 'textContent';
@@ -112,12 +115,15 @@ DOM.defineCustomElement(
}
hide() {
+ this._content = undefined;
+ if (this._isHidden) return;
this._isHidden = true;
this.bodyNode.style.display = 'none';
this.targetNode = undefined;
}
show() {
+ if (!this._isHidden) return;
this.bodyNode.style.display = 'block';
this._isHidden = false;
}
diff --git a/deps/v8/tools/testrunner/PRESUBMIT.py b/deps/v8/tools/testrunner/PRESUBMIT.py
index a01f55ee29..e7dd88a5a4 100644
--- a/deps/v8/tools/testrunner/PRESUBMIT.py
+++ b/deps/v8/tools/testrunner/PRESUBMIT.py
@@ -8,13 +8,16 @@ USE_PYTHON3 = True
def _CommonChecks(input_api, output_api):
- return input_api.RunTests(input_api.canned_checks.GetUnitTestsRecursively(
- input_api,
- output_api,
- input_api.os_path.join(input_api.PresubmitLocalPath()),
- files_to_check=[r'.+_unittest\.py$'],
- files_to_skip=[],
- ))
+ return input_api.RunTests(
+ input_api.canned_checks.GetUnitTestsRecursively(
+ input_api,
+ output_api,
+ input_api.os_path.join(input_api.PresubmitLocalPath()),
+ files_to_check=[r'.+_test\.py$'],
+ files_to_skip=[],
+ run_on_python2=False,
+ ))
+
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 0c7f839b35..bda8d3422e 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -2,8 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
from functools import reduce
from collections import OrderedDict, namedtuple
@@ -387,9 +385,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -837,9 +832,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index 6942d1b9a4..1ef894424c 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -2,9 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
from contextlib import contextmanager
import os
import re
@@ -19,8 +16,6 @@ from ..local.android import (
from ..local import utils
from ..objects import output
-PYTHON3 = sys.version_info >= (3, 0)
-
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , '..', '..'))
@@ -115,17 +110,11 @@ class BaseCommand(object):
timer.cancel()
- def convert(stream):
- if PYTHON3:
- return stream.decode('utf-8', 'replace')
- else:
- return stream.decode('utf-8', 'replace').encode('utf-8')
-
return output.Output(
process.returncode,
timeout_occured[0],
- convert(stdout),
- convert(stderr),
+ stdout.decode('utf-8', 'replace'),
+ stderr.decode('utf-8', 'replace'),
process.pid,
duration
)
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index f3f2e9dadc..619af3037f 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -1,11 +1,8 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
from contextlib import contextmanager
from multiprocessing import Process, Queue
import os
diff --git a/deps/v8/tools/testrunner/local/pool_unittest.py b/deps/v8/tools/testrunner/local/pool_test.py
index 240cd563f8..a5a62638b8 100755
--- a/deps/v8/tools/testrunner/local/pool_unittest.py
+++ b/deps/v8/tools/testrunner/local/pool_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,18 +8,21 @@ import sys
import unittest
# Needed because the test runner contains relative imports.
-TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.local.pool import Pool
+
def Run(x):
if x == 10:
raise Exception("Expected exception triggered by test.")
return x
+
class PoolTest(unittest.TestCase):
+
def testNormal(self):
results = set()
pool = Pool(3)
@@ -28,7 +31,7 @@ class PoolTest(unittest.TestCase):
# Any result can be a heartbeat due to timings.
continue
results.add(result.value)
- self.assertEquals(set(range(0, 10)), results)
+ self.assertEqual(set(range(0, 10)), results)
def testException(self):
results = set()
@@ -42,7 +45,7 @@ class PoolTest(unittest.TestCase):
results.add(result.value)
expect = set(range(0, 12))
expect.remove(10)
- self.assertEquals(expect, results)
+ self.assertEqual(expect, results)
def testAdd(self):
results = set()
@@ -54,9 +57,9 @@ class PoolTest(unittest.TestCase):
results.add(result.value)
if result.value < 30:
pool.add([result.value + 20])
- self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
- results)
+ self.assertEqual(
+ set(range(0, 10)) | set(range(20, 30)) | set(range(40, 50)), results)
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 174bd27a5f..17a705fab0 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -25,10 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-from __future__ import absolute_import
-
import os
import re
diff --git a/deps/v8/tools/testrunner/local/statusfile_unittest.py b/deps/v8/tools/testrunner/local/statusfile_test.py
index 3e2493c0ce..fda571834f 100755
--- a/deps/v8/tools/testrunner/local/statusfile_unittest.py
+++ b/deps/v8/tools/testrunner/local/statusfile_test.py
@@ -1,28 +1,24 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
-from __future__ import absolute_import
import os
import sys
import unittest
-TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.local import statusfile
from testrunner.local.utils import Freeze
-
TEST_VARIABLES = {
- 'system': 'linux',
- 'mode': 'release',
+ 'system': 'linux',
+ 'mode': 'release',
}
-
TEST_STATUS_FILE = """
[
[ALWAYS, {
@@ -46,6 +42,7 @@ def make_variables():
class UtilsTest(unittest.TestCase):
+
def test_freeze(self):
self.assertEqual(2, Freeze({1: [2]})[1][0])
self.assertEqual(set([3]), Freeze({1: [2], 2: set([3])})[2])
@@ -67,32 +64,29 @@ class UtilsTest(unittest.TestCase):
class StatusFileTest(unittest.TestCase):
+
def test_eval_expression(self):
variables = make_variables()
variables.update(statusfile.VARIABLES)
self.assertTrue(
- statusfile._EvalExpression(
- 'system==linux and mode==release', variables))
+ statusfile._EvalExpression('system==linux and mode==release',
+ variables))
self.assertTrue(
- statusfile._EvalExpression(
- 'system==linux or variant==default', variables))
+ statusfile._EvalExpression('system==linux or variant==default',
+ variables))
self.assertFalse(
- statusfile._EvalExpression(
- 'system==linux and mode==debug', variables))
+ statusfile._EvalExpression('system==linux and mode==debug', variables))
self.assertRaises(
- AssertionError,
- lambda: statusfile._EvalExpression(
+ AssertionError, lambda: statusfile._EvalExpression(
'system==linux and mode==foo', variables))
self.assertRaises(
- SyntaxError,
- lambda: statusfile._EvalExpression(
+ SyntaxError, lambda: statusfile._EvalExpression(
'system==linux and mode=release', variables))
self.assertEquals(
statusfile.VARIANT_EXPRESSION,
- statusfile._EvalExpression(
- 'system==linux and variant==default', variables)
- )
+ statusfile._EvalExpression('system==linux and variant==default',
+ variables))
def test_read_statusfile_section_true(self):
rules, prefix_rules = statusfile.ReadStatusFile(
@@ -100,14 +94,14 @@ class StatusFileTest(unittest.TestCase):
self.assertEquals(
{
- 'foo/bar': set(['PASS', 'SKIP']),
- 'baz/bar': set(['PASS', 'FAIL', 'SLOW']),
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL', 'SLOW']),
},
rules[''],
)
self.assertEquals(
{
- 'foo/': set(['SLOW', 'FAIL']),
+ 'foo/': set(['SLOW', 'FAIL']),
},
prefix_rules[''],
)
@@ -120,14 +114,14 @@ class StatusFileTest(unittest.TestCase):
self.assertEquals(
{
- 'foo/bar': set(['PASS', 'SKIP']),
- 'baz/bar': set(['PASS', 'FAIL']),
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL']),
},
rules[''],
)
self.assertEquals(
{
- 'foo/': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
prefix_rules[''],
)
@@ -142,30 +136,30 @@ class StatusFileTest(unittest.TestCase):
self.assertEquals(
{
- 'foo/bar': set(['PASS', 'SKIP']),
- 'baz/bar': set(['PASS', 'FAIL']),
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL']),
},
rules[''],
)
self.assertEquals(
{
- 'foo/': set(['PASS', 'SLOW']),
+ 'foo/': set(['PASS', 'SLOW']),
},
prefix_rules[''],
)
self.assertEquals(
{
- 'baz/bar': set(['PASS', 'SLOW']),
+ 'baz/bar': set(['PASS', 'SLOW']),
},
rules['default'],
)
self.assertEquals(
{
- 'foo/': set(['FAIL']),
+ 'foo/': set(['FAIL']),
},
prefix_rules['default'],
)
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_test.py
index b74fef1842..50ba6421f2 100755
--- a/deps/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/deps/v8/tools/testrunner/local/testsuite_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -10,8 +10,8 @@ import tempfile
import unittest
# Needed because the test runner contains relative imports.
-TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.local.testsuite import TestSuite, TestGenerator
@@ -20,6 +20,7 @@ from testrunner.test_config import TestConfig
class TestSuiteTest(unittest.TestCase):
+
def setUp(self):
test_dir = os.path.dirname(__file__)
self.test_root = os.path.join(test_dir, "fake_testsuite")
@@ -37,8 +38,8 @@ class TestSuiteTest(unittest.TestCase):
verbose=False,
)
- self.suite = TestSuite.Load(
- self.test_root, self.test_config, "standard_runner")
+ self.suite = TestSuite.Load(self.test_root, self.test_config,
+ "standard_runner")
def testLoadingTestSuites(self):
self.assertEquals(self.suite.name, "fake_testsuite")
@@ -49,8 +50,8 @@ class TestSuiteTest(unittest.TestCase):
self.assertIsNone(self.suite.statusfile)
def testLoadingTestsFromDisk(self):
- tests = self.suite.load_tests_from_disk(
- statusfile_variables={})
+ tests = self.suite.load_tests_from_disk(statusfile_variables={})
+
def is_generator(iterator):
return iterator == iter(iterator)
@@ -65,10 +66,8 @@ class TestSuiteTest(unittest.TestCase):
self.assertIsNotNone(self.suite.statusfile)
def testMergingTestGenerators(self):
- tests = self.suite.load_tests_from_disk(
- statusfile_variables={})
- more_tests = self.suite.load_tests_from_disk(
- statusfile_variables={})
+ tests = self.suite.load_tests_from_disk(statusfile_variables={})
+ more_tests = self.suite.load_tests_from_disk(statusfile_variables={})
# Merge the test generators
tests.merge(more_tests)
@@ -83,4 +82,4 @@ class TestSuiteTest(unittest.TestCase):
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 896f073166..cb1f4397b9 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -25,9 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
from os.path import exists
from os.path import isdir
from os.path import join
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 7b5eb4fd38..d72a7b10dd 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -15,8 +15,10 @@ ALL_VARIANT_FLAGS = {
"experimental_regexp": [["--default-to-experimental-regexp-engine"]],
"jitless": [["--jitless"]],
"sparkplug": [["--sparkplug"]],
+ # TODO(v8:v8:7700): Support concurrent compilation and remove flag.
+ "maglev": [["--maglev", "--no-concurrent-recompilation"]],
"concurrent_sparkplug": [["--concurrent-sparkplug", "--sparkplug"]],
- "always_sparkplug": [[ "--always-sparkplug", "--sparkplug"]],
+ "always_sparkplug": [["--always-sparkplug", "--sparkplug"]],
"minor_mc": [["--minor-mc"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
@@ -52,7 +54,7 @@ ALL_VARIANT_FLAGS = {
INCOMPATIBLE_FLAGS_PER_VARIANT = {
"jitless": [
"--opt", "--always-opt", "--liftoff", "--track-field-types",
- "--validate-asm", "--sparkplug", "--concurrent-sparkplug",
+ "--validate-asm", "--sparkplug", "--concurrent-sparkplug", "--maglev",
"--always-sparkplug", "--regexp-tier-up", "--no-regexp-interpret-all",
"--maglev"
],
@@ -76,6 +78,11 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
],
"sparkplug": ["--jitless"],
"concurrent_sparkplug": ["--jitless"],
+ # TODO(v8:v8:7700): Support concurrent compilation and remove incompatible flags.
+ "maglev": [
+ "--jitless", "--concurrent-recompilation",
+ "--stress-concurrent-inlining"
+ ],
"always_sparkplug": ["--jitless"],
"code_serializer": [
"--cache=after-execute", "--cache=full-code-cache", "--cache=none"
diff --git a/deps/v8/tools/testrunner/local/verbose.py b/deps/v8/tools/testrunner/local/verbose.py
index 8569368a75..5bb9cab4fb 100644
--- a/deps/v8/tools/testrunner/local/verbose.py
+++ b/deps/v8/tools/testrunner/local/verbose.py
@@ -25,9 +25,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import sys
import time
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
index 5e319f035a..e5d2f4a355 100755
--- a/deps/v8/tools/testrunner/num_fuzzer.py
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -1,13 +1,9 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import absolute_import
-from __future__ import print_function
-
import random
import sys
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 82fb543055..56892cd326 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -69,19 +69,9 @@ TEST262_FRONTMATTER_PATTERN = re.compile(r"/\*---.*?---\*/", re.DOTALL)
TIMEOUT_LONG = "long"
-try:
- cmp # Python 2
-except NameError:
- def cmp(x, y): # Python 3
- return (x > y) - (x < y)
-
def read_file(file):
- try: # Python 3
- with open(file, encoding='ISO-8859-1') as f:
- return f.read()
- except TypeError: # Python 2 ..
- with open(file) as f:
- return f.read()
+ with open(file, encoding='ISO-8859-1') as f:
+ return f.read()
class TestCase(object):
def __init__(self, suite, path, name, test_config):
@@ -440,6 +430,8 @@ class TestCase(object):
def __cmp__(self, other):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
+ def cmp(x, y):
+ return (x > y) - (x < y)
return cmp(
(self.suite.name, self.name, self.variant),
(other.suite.name, other.name, other.variant)
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index b7ee301c5e..75e98ee024 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -2,12 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-try: # Python3
- from itertools import zip_longest
- PYTHON3 = True
-except ImportError: # Python2
- from itertools import izip_longest as zip_longest
- PYTHON3 = False
+from itertools import zip_longest
from ..testproc.base import (
DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
@@ -147,9 +142,7 @@ class ExpectedOutProc(OutProc):
if output.exit_code != 0:
return True
- # TODO(https://crbug.com/1292013): Simplify after Python3 migration.
- kwargs = {'encoding': 'utf-8'} if PYTHON3 else {}
- with open(self._expected_filename, 'r', **kwargs) as f:
+ with open(self._expected_filename, 'r', encoding='utf-8') as f:
expected_lines = f.readlines()
for act_iterator in self._act_block_iterator(output):
diff --git a/deps/v8/tools/testrunner/outproc/message.py b/deps/v8/tools/testrunner/outproc/message.py
index 39b8eadf64..c2ef141f9b 100644
--- a/deps/v8/tools/testrunner/outproc/message.py
+++ b/deps/v8/tools/testrunner/outproc/message.py
@@ -5,10 +5,7 @@
import os
import re
-try: # Python3
- from itertools import zip_longest
-except ImportError: # Python2
- from itertools import izip_longest as zip_longest
+from itertools import zip_longest
from . import base
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index eed7527453..18968d4f48 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -1,12 +1,9 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import absolute_import
-from __future__ import print_function
from functools import reduce
import datetime
diff --git a/deps/v8/tools/testrunner/testproc/combiner.py b/deps/v8/tools/testrunner/testproc/combiner.py
index 4d992f4c65..0761330c97 100644
--- a/deps/v8/tools/testrunner/testproc/combiner.py
+++ b/deps/v8/tools/testrunner/testproc/combiner.py
@@ -2,9 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
from collections import defaultdict
import time
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index ec97ab226f..6fdcc3a4d7 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -2,10 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-from __future__ import absolute_import
-
import datetime
import json
import os
@@ -15,7 +11,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test, is_flaky=False):
@@ -24,7 +19,9 @@ def print_failure_header(test, is_flaky=False):
text.append('[negative]')
if is_flaky:
text.append('(flaky)')
- print('=== %s ===' % ' '.join(text))
+ output = '=== %s ===' % ' '.join(text)
+ encoding = sys.stdout.encoding or 'utf-8'
+ print(output.encode(encoding, errors='replace').decode(encoding))
class ResultsTracker(base.TestProcObserver):
@@ -119,17 +116,17 @@ class StreamProgressIndicator(ProgressIndicator):
self._requirement = base.DROP_PASS_OUTPUT
def _on_result_for(self, test, result):
- if not result.has_unexpected_output:
- self.print('PASS', test)
- elif result.output.HasCrashed():
- self.print("CRASH", test)
- elif result.output.HasTimedOut():
- self.print("TIMEOUT", test)
+ if not result.has_unexpected_output:
+ self.print('PASS', test)
+ elif result.output.HasCrashed():
+ self.print("CRASH", test)
+ elif result.output.HasTimedOut():
+ self.print("TIMEOUT", test)
+ else:
+ if test.is_fail:
+ self.print("UNEXPECTED PASS", test)
else:
- if test.is_fail:
- self.print("UNEXPECTED PASS", test)
- else:
- self.print("FAIL", test)
+ self.print("FAIL", test)
def print(self, prefix, test):
print('%s: %ss' % (prefix, test))
@@ -152,7 +149,8 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
self._last_printed_time = time.time()
def _print(self, text):
- print(text)
+ encoding = sys.stdout.encoding or 'utf-8'
+ print(text.encode(encoding, errors='replace').decode(encoding))
sys.stdout.flush()
self._last_printed_time = time.time()
@@ -349,11 +347,11 @@ class ColorProgressIndicator(CompactProgressIndicator):
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
- templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
- "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
- }
- super(MonochromeProgressIndicator, self).__init__(templates)
+ templates = {
+ 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
+ "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
+ }
+ super(MonochromeProgressIndicator, self).__init__(templates)
def printFormatted(self, format, string):
print(string)
@@ -362,45 +360,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/testrunner/testproc/sequence_unittest.py b/deps/v8/tools/testrunner/testproc/sequence_test.py
index 8a0edc36bb..2cd4a063f3 100644
--- a/deps/v8/tools/testrunner/testproc/sequence_unittest.py
+++ b/deps/v8/tools/testrunner/testproc/sequence_test.py
@@ -1,8 +1,7 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2021 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""
Test integrating the sequence processor into a simple test pipeline.
"""
@@ -12,8 +11,8 @@ import sys
import unittest
# Needed because the test runner contains relative imports.
-TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc import base
@@ -26,6 +25,7 @@ class FakeExecutionProc(base.TestProc):
Test execution is simulated for each test by calling run().
"""
+
def __init__(self):
super(FakeExecutionProc, self).__init__()
self.tests = []
@@ -41,6 +41,7 @@ class FakeExecutionProc(base.TestProc):
class FakeResultObserver(base.TestProcObserver):
"""Observer to track all results sent back through the pipeline."""
+
def __init__(self):
super(FakeResultObserver, self).__init__()
self.tests = set([])
@@ -51,6 +52,7 @@ class FakeResultObserver(base.TestProcObserver):
class FakeTest(object):
"""Simple test representation to differentiate light/heavy tests."""
+
def __init__(self, n, is_heavy):
self.n = n
self.is_heavy = is_heavy
@@ -58,6 +60,7 @@ class FakeTest(object):
class TestSequenceProc(unittest.TestCase):
+
def _test(self, tests, batch_size, max_heavy):
# Set up a simple processing pipeline:
# Loader -> observe results -> sequencer -> execution.
@@ -87,76 +90,78 @@ class TestSequenceProc(unittest.TestCase):
self.assertEqual(set(test.n for test in tests), results.tests)
def test_wrong_usage(self):
- self.assertRaises(lambda: SequenceProc(0))
+ with self.assertRaises(Exception):
+ SequenceProc(0)
def test_no_tests(self):
self._test([], 1, 1)
def test_large_batch_light(self):
self._test([
- FakeTest(0, False),
- FakeTest(1, False),
- FakeTest(2, False),
+ FakeTest(0, False),
+ FakeTest(1, False),
+ FakeTest(2, False),
], 4, 1)
def test_small_batch_light(self):
self._test([
- FakeTest(0, False),
- FakeTest(1, False),
- FakeTest(2, False),
+ FakeTest(0, False),
+ FakeTest(1, False),
+ FakeTest(2, False),
], 2, 1)
def test_large_batch_heavy(self):
self._test([
- FakeTest(0, True),
- FakeTest(1, True),
- FakeTest(2, True),
+ FakeTest(0, True),
+ FakeTest(1, True),
+ FakeTest(2, True),
], 4, 1)
def test_small_batch_heavy(self):
self._test([
- FakeTest(0, True),
- FakeTest(1, True),
- FakeTest(2, True),
+ FakeTest(0, True),
+ FakeTest(1, True),
+ FakeTest(2, True),
], 2, 1)
def test_large_batch_mixed(self):
self._test([
- FakeTest(0, True),
- FakeTest(1, False),
- FakeTest(2, True),
- FakeTest(3, False),
+ FakeTest(0, True),
+ FakeTest(1, False),
+ FakeTest(2, True),
+ FakeTest(3, False),
], 4, 1)
def test_small_batch_mixed(self):
self._test([
- FakeTest(0, True),
- FakeTest(1, False),
- FakeTest(2, True),
- FakeTest(3, False),
+ FakeTest(0, True),
+ FakeTest(1, False),
+ FakeTest(2, True),
+ FakeTest(3, False),
], 2, 1)
def test_large_batch_more_heavy(self):
self._test([
- FakeTest(0, True),
- FakeTest(1, True),
- FakeTest(2, True),
- FakeTest(3, False),
- FakeTest(4, True),
- FakeTest(5, True),
- FakeTest(6, False),
+ FakeTest(0, True),
+ FakeTest(1, True),
+ FakeTest(2, True),
+ FakeTest(3, False),
+ FakeTest(4, True),
+ FakeTest(5, True),
+ FakeTest(6, False),
], 4, 2)
def test_small_batch_more_heavy(self):
self._test([
- FakeTest(0, True),
- FakeTest(1, True),
- FakeTest(2, True),
- FakeTest(3, False),
- FakeTest(4, True),
- FakeTest(5, True),
- FakeTest(6, False),
+ FakeTest(0, True),
+ FakeTest(1, True),
+ FakeTest(2, True),
+ FakeTest(3, False),
+ FakeTest(4, True),
+ FakeTest(5, True),
+ FakeTest(6, False),
], 2, 2)
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/testrunner/testproc/shard_unittest.py b/deps/v8/tools/testrunner/testproc/shard_test.py
index 33a094e05a..160d8d6ae3 100755
--- a/deps/v8/tools/testrunner/testproc/shard_unittest.py
+++ b/deps/v8/tools/testrunner/testproc/shard_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,14 +9,15 @@ import tempfile
import unittest
# Needed because the test runner contains relative imports.
-TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc.shard import radix_hash
class TestRadixHashing(unittest.TestCase):
+
def test_hash_character_by_radix(self):
self.assertEqual(97, radix_hash(capacity=2**32, key="a"))
@@ -28,20 +29,20 @@ class TestRadixHashing(unittest.TestCase):
def test_hash_test_id(self):
self.assertEqual(
- 5,
- radix_hash(capacity=7,
- key="test262/Map/class-private-method-Variant-0-1"))
+ 5,
+ radix_hash(
+ capacity=7, key="test262/Map/class-private-method-Variant-0-1"))
def test_hash_boundaries(self):
total_variants = 5
cases = []
for case in [
- "test262/Map/class-private-method",
- "test262/Map/class-public-method",
- "test262/Map/object-retrieval",
- "test262/Map/object-deletion",
- "test262/Map/object-creation",
- "test262/Map/garbage-collection",
+ "test262/Map/class-private-method",
+ "test262/Map/class-public-method",
+ "test262/Map/object-retrieval",
+ "test262/Map/object-deletion",
+ "test262/Map/object-creation",
+ "test262/Map/garbage-collection",
]:
for variant_index in range(total_variants):
cases.append("%s-Variant-%d" % (case, variant_index))
diff --git a/deps/v8/tools/testrunner/testproc/sigproc.py b/deps/v8/tools/testrunner/testproc/sigproc.py
index f29fa22e60..f81e7f9420 100644
--- a/deps/v8/tools/testrunner/testproc/sigproc.py
+++ b/deps/v8/tools/testrunner/testproc/sigproc.py
@@ -2,9 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import signal
from . import base
diff --git a/deps/v8/tools/testrunner/testproc/timeout.py b/deps/v8/tools/testrunner/testproc/timeout.py
index 026ba02cd9..9a4e88c8f0 100644
--- a/deps/v8/tools/testrunner/testproc/timeout.py
+++ b/deps/v8/tools/testrunner/testproc/timeout.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/testproc/util.py b/deps/v8/tools/testrunner/testproc/util.py
index 1f5cc7ef91..f8fbb6cefe 100644
--- a/deps/v8/tools/testrunner/testproc/util.py
+++ b/deps/v8/tools/testrunner/testproc/util.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/testproc/util_unittest.py b/deps/v8/tools/testrunner/testproc/util_test.py
index 5bf6a6e79a..e1203c4c1a 100644
--- a/deps/v8/tools/testrunner/testproc/util_unittest.py
+++ b/deps/v8/tools/testrunner/testproc/util_test.py
@@ -1,21 +1,21 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-from __future__ import absolute_import
-
import os
import sys
import unittest
-TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc.util import FixedSizeTopList
+
class TestOrderedFixedSizeList(unittest.TestCase):
+
def test_empty(self):
ofsl = FixedSizeTopList(3)
self.assertEqual(ofsl.as_list(), [])
@@ -24,7 +24,7 @@ class TestOrderedFixedSizeList(unittest.TestCase):
ofsl = FixedSizeTopList(3)
ofsl.add(1)
ofsl.add(2)
- self.assertEqual(ofsl.as_list(), [2,1])
+ self.assertEqual(ofsl.as_list(), [2, 1])
def test_4321(self):
ofsl = FixedSizeTopList(3)
@@ -33,7 +33,7 @@ class TestOrderedFixedSizeList(unittest.TestCase):
ofsl.add(2)
ofsl.add(1)
data = ofsl.as_list()
- self.assertEqual(data, [4,3,2])
+ self.assertEqual(data, [4, 3, 2])
def test_544321(self):
ofsl = FixedSizeTopList(4)
@@ -47,21 +47,21 @@ class TestOrderedFixedSizeList(unittest.TestCase):
self.assertEqual(data, [5, 4, 4, 3])
def test_withkey(self):
- ofsl = FixedSizeTopList(3,key=lambda x: x['val'])
- ofsl.add({'val':4, 'something': 'four'})
- ofsl.add({'val':3, 'something': 'three'})
- ofsl.add({'val':-1, 'something': 'minusone'})
- ofsl.add({'val':5, 'something': 'five'})
- ofsl.add({'val':0, 'something': 'zero'})
+ ofsl = FixedSizeTopList(3, key=lambda x: x['val'])
+ ofsl.add({'val': 4, 'something': 'four'})
+ ofsl.add({'val': 3, 'something': 'three'})
+ ofsl.add({'val': -1, 'something': 'minusone'})
+ ofsl.add({'val': 5, 'something': 'five'})
+ ofsl.add({'val': 0, 'something': 'zero'})
data = [e['something'] for e in ofsl.as_list()]
self.assertEqual(data, ['five', 'four', 'three'])
def test_withkeyclash(self):
# Test that a key clash does not throw exeption
- ofsl = FixedSizeTopList(2,key=lambda x: x['val'])
- ofsl.add({'val':2, 'something': 'two'})
- ofsl.add({'val':2, 'something': 'two'})
- ofsl.add({'val':0, 'something': 'zero'})
+ ofsl = FixedSizeTopList(2, key=lambda x: x['val'])
+ ofsl.add({'val': 2, 'something': 'two'})
+ ofsl.add({'val': 2, 'something': 'two'})
+ ofsl.add({'val': 0, 'something': 'zero'})
data = [e['something'] for e in ofsl.as_list()]
self.assertEqual(data, ['two', 'two'])
diff --git a/deps/v8/tools/testrunner/testproc/variant_unittest.py b/deps/v8/tools/testrunner/testproc/variant_test.py
index 56e28c8d5b..6a09e797f9 100755
--- a/deps/v8/tools/testrunner/testproc/variant_unittest.py
+++ b/deps/v8/tools/testrunner/testproc/variant_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,8 +9,8 @@ import tempfile
import unittest
# Needed because the test runner contains relative imports.
-TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc import base
@@ -18,6 +18,7 @@ from testrunner.testproc.variant import VariantProc
class FakeResultObserver(base.TestProcObserver):
+
def __init__(self):
super(FakeResultObserver, self).__init__()
@@ -28,6 +29,7 @@ class FakeResultObserver(base.TestProcObserver):
class FakeFilter(base.TestProcFilter):
+
def __init__(self, filter_predicate):
super(FakeFilter, self).__init__()
@@ -47,11 +49,13 @@ class FakeFilter(base.TestProcFilter):
class FakeSuite(object):
+
def __init__(self, name):
self.name = name
class FakeTest(object):
+
def __init__(self, procid):
self.suite = FakeSuite("fake_suite")
self.procid = procid
@@ -66,6 +70,7 @@ class FakeTest(object):
class FakeVariantGen(object):
+
def __init__(self, variants):
self._variants = variants
@@ -75,6 +80,7 @@ class FakeVariantGen(object):
class TestVariantProcLoading(unittest.TestCase):
+
def setUp(self):
self.test = FakeTest("test")
@@ -86,12 +92,11 @@ class TestVariantProcLoading(unittest.TestCase):
# Creates a Variant processor containing the possible types of test
# variants.
self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
- self.variant_proc._variant_gens = {
- "fake_suite": FakeVariantGen(variants)}
+ self.variant_proc._variant_gens = {"fake_suite": FakeVariantGen(variants)}
# FakeFilter only lets tests passing the predicate to be loaded.
self.fake_filter = FakeFilter(
- filter_predicate=(lambda t: t.procid == "to_filter"))
+ filter_predicate=(lambda t: t.procid == "to_filter"))
# FakeResultObserver to verify that VariantProc calls result_for correctly.
self.fake_result_observer = FakeResultObserver()
@@ -112,10 +117,10 @@ class TestVariantProcLoading(unittest.TestCase):
def test_filters_first_two_variants(self):
variants = [
- FakeTest('to_filter'),
- FakeTest('to_filter'),
- FakeTest('to_load'),
- FakeTest('to_load'),
+ FakeTest('to_filter'),
+ FakeTest('to_filter'),
+ FakeTest('to_load'),
+ FakeTest('to_load'),
]
expected_load_results = {variants[2]}
@@ -126,9 +131,9 @@ class TestVariantProcLoading(unittest.TestCase):
def test_stops_loading_after_first_successful_load(self):
variants = [
- FakeTest('to_load'),
- FakeTest('to_load'),
- FakeTest('to_filter'),
+ FakeTest('to_load'),
+ FakeTest('to_load'),
+ FakeTest('to_filter'),
]
expected_load_results = {variants[0]}
@@ -139,8 +144,8 @@ class TestVariantProcLoading(unittest.TestCase):
def test_return_result_when_out_of_variants(self):
variants = [
- FakeTest('to_filter'),
- FakeTest('to_load'),
+ FakeTest('to_filter'),
+ FakeTest('to_load'),
]
self._simulate_proc(variants)
@@ -153,9 +158,9 @@ class TestVariantProcLoading(unittest.TestCase):
def test_return_result_after_running_variants(self):
variants = [
- FakeTest('to_filter'),
- FakeTest('to_load'),
- FakeTest('to_load'),
+ FakeTest('to_filter'),
+ FakeTest('to_load'),
+ FakeTest('to_load'),
]
self._simulate_proc(variants)
@@ -168,5 +173,6 @@ class TestVariantProcLoading(unittest.TestCase):
expected_results = {(self.test, None)}
self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py b/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py
index 963b0e2abe..a8ab0afb46 100644
--- a/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py
+++ b/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py
@@ -11,9 +11,6 @@ Raw gyp values are supported - they will be tranformed into valid json.
"""
# TODO(machenbach): Remove this when gyp is deprecated.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import json
import os
import sys
diff --git a/deps/v8/tools/tickprocessor.mjs b/deps/v8/tools/tickprocessor.mjs
index 071bf35ca4..6afcea0797 100644
--- a/deps/v8/tools/tickprocessor.mjs
+++ b/deps/v8/tools/tickprocessor.mjs
@@ -510,10 +510,8 @@ export class TickProcessor extends LogReader {
onlySummary,
runtimeTimerFilter,
preprocessJson) {
- super({},
- timedRange,
- pairwiseTimedRange);
- this.dispatchTable_ = {
+ super(timedRange, pairwiseTimedRange);
+ this.setDispatchTable({
__proto__: null,
'shared-library': {
parsers: [parseString, parseInt, parseInt, parseInt],
@@ -586,7 +584,7 @@ export class TickProcessor extends LogReader {
'code-allocate': undefined,
'begin-code-region': undefined,
'end-code-region': undefined
- };
+ });
this.preprocessJson = preprocessJson;
this.cppEntriesProvider_ = cppEntriesProvider;
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index f46841875e..30404a8151 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -7,21 +7,16 @@
"""This program either generates the parser files for Torque, generating
the source and header files directly in V8's src directory."""
-# for py2/py3 compatibility
-from __future__ import print_function
-
import subprocess
import sys
import re
from subprocess import Popen, PIPE
-PYTHON3 = sys.version_info >= (3, 0)
-
-def maybe_decode(arg, encoding="utf-8"):
- return arg.decode(encoding) if PYTHON3 else arg
+def decode(arg, encoding="utf-8"):
+ return arg.decode(encoding)
-def maybe_encode(arg, encoding="utf-8"):
- return arg.encode(encoding) if PYTHON3 else arg
+def encode(arg, encoding="utf-8"):
+ return arg.encode(encoding)
kPercentEscape = r'α'; # Unicode alpha
kDerefEscape = r'☆'; # Unicode star
@@ -111,8 +106,8 @@ def process(filename, lint, should_format):
p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
else:
p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
- output, err = p.communicate(maybe_encode(preprocess(content)))
- output = postprocess(maybe_decode(output))
+ output, err = p.communicate(encode(preprocess(content)))
+ output = postprocess(decode(output))
rc = p.returncode
if (rc != 0):
print("error code " + str(rc) + " running clang-format. Exiting...")
@@ -124,7 +119,7 @@ def process(filename, lint, should_format):
if should_format:
output_file = open(filename, 'wb')
- output_file.write(maybe_encode(output))
+ output_file.write(encode(output))
output_file.close()
def print_usage():
diff --git a/deps/v8/tools/turbolizer/info-view.html b/deps/v8/tools/turbolizer/info-view.html
index 534860d54a..aceb595694 100644
--- a/deps/v8/tools/turbolizer/info-view.html
+++ b/deps/v8/tools/turbolizer/info-view.html
@@ -23,6 +23,14 @@
<td>Relayout graph</td>
</tr>
<tr>
+ <td>n</td>
+ <td>Show graph with selected nodes for next phase</td>
+ </tr>
+ <tr>
+ <td>b</td>
+ <td>Show graph with selected nodes for previous phase</td>
+ </tr>
+ <tr>
<td>a</td>
<td>Select all nodes</td>
</tr>
diff --git a/deps/v8/tools/turbolizer/src/edge.ts b/deps/v8/tools/turbolizer/src/edge.ts
index 30d265c561..c94ef9112e 100644
--- a/deps/v8/tools/turbolizer/src/edge.ts
+++ b/deps/v8/tools/turbolizer/src/edge.ts
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { GNode, DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
+import { GNode, MINIMUM_EDGE_SEPARATION, DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
import { Graph } from "./graph";
-export const MINIMUM_EDGE_SEPARATION = 20;
+const BEZIER_CONSTANT = 0.3;
export class Edge {
target: GNode;
@@ -64,20 +64,31 @@ export class Edge {
const outputApproach = source.getOutputApproach(showTypes);
const horizontalPos = this.getInputHorizontalPosition(graph, showTypes);
- let result = "M" + outputX + "," + outputY +
- "L" + outputX + "," + outputApproach +
- "L" + horizontalPos + "," + outputApproach;
+ let result: string;
- if (horizontalPos != inputX) {
- result += "L" + horizontalPos + "," + inputApproach;
- } else {
- if (inputApproach < outputApproach) {
- inputApproach = outputApproach;
+ if (inputY < outputY) {
+ result = `M ${outputX} ${outputY}
+ L ${outputX} ${outputApproach}
+ L ${horizontalPos} ${outputApproach}`;
+
+ if (horizontalPos != inputX) {
+ result += `L ${horizontalPos} ${inputApproach}`;
+ } else {
+ if (inputApproach < outputApproach) {
+ inputApproach = outputApproach;
+ }
}
+
+ result += `L ${inputX} ${inputApproach}
+ L ${inputX} ${inputY}`;
+ } else {
+ const controlY = outputY + (inputY - outputY) * BEZIER_CONSTANT;
+ result = `M ${outputX} ${outputY}
+ C ${outputX} ${controlY},
+ ${inputX} ${outputY},
+ ${inputX} ${inputY}`;
}
- result += "L" + inputX + "," + inputApproach +
- "L" + inputX + "," + inputY;
return result;
}
diff --git a/deps/v8/tools/turbolizer/src/graph-layout.ts b/deps/v8/tools/turbolizer/src/graph-layout.ts
index 3687c28c86..ad32557e90 100644
--- a/deps/v8/tools/turbolizer/src/graph-layout.ts
+++ b/deps/v8/tools/turbolizer/src/graph-layout.ts
@@ -3,11 +3,11 @@
// found in the LICENSE file.
import { MAX_RANK_SENTINEL } from "../src/constants";
-import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
-import { NODE_INPUT_WIDTH, MINIMUM_NODE_OUTPUT_APPROACH, DEFAULT_NODE_BUBBLE_RADIUS, GNode } from "../src/node";
+import { Edge } from "../src/edge";
+import { GNode, MINIMUM_EDGE_SEPARATION, NODE_INPUT_WIDTH, MINIMUM_NODE_OUTPUT_APPROACH, DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
import { Graph } from "./graph";
-const DEFAULT_NODE_ROW_SEPARATION = 130;
+const DEFAULT_NODE_ROW_SEPARATION = 150;
const traceLayout = false;
function newGraphOccupation(graph: Graph) {
diff --git a/deps/v8/tools/turbolizer/src/graph-view.ts b/deps/v8/tools/turbolizer/src/graph-view.ts
index 3cb5e6fbc2..798a5cd340 100644
--- a/deps/v8/tools/turbolizer/src/graph-view.ts
+++ b/deps/v8/tools/turbolizer/src/graph-view.ts
@@ -19,6 +19,13 @@ function nodeToStringKey(n: GNode) {
return "" + n.id;
}
+function nodeOriginToStringKey(n: GNode): string | undefined {
+ if (n.nodeLabel && n.nodeLabel.origin) {
+ return "" + n.nodeLabel.origin.nodeId;
+ }
+ return undefined;
+}
+
interface GraphState {
showTypes: boolean;
selection: MySelection;
@@ -132,7 +139,7 @@ export class GraphView extends PhaseView {
}
};
- view.state.selection = new MySelection(nodeToStringKey);
+ view.state.selection = new MySelection(nodeToStringKey, nodeOriginToStringKey);
const defs = svg.append('svg:defs');
defs.append('svg:marker')
@@ -254,12 +261,14 @@ export class GraphView extends PhaseView {
this.toolbox.appendChild(createImgInput("toggle-types", "toggle types",
partial(this.toggleTypesAction, this)));
+ const adaptedSelection = this.adaptSelectionToCurrentPhase(data.data, rememberedSelection);
+
this.phaseName = data.name;
- this.createGraph(data.data, rememberedSelection);
+ this.createGraph(data.data, adaptedSelection);
this.broker.addNodeHandler(this.selectionHandler);
- if (rememberedSelection != null && rememberedSelection.size > 0) {
- this.attachSelection(rememberedSelection);
+ if (adaptedSelection != null && adaptedSelection.size > 0) {
+ this.attachSelection(adaptedSelection);
this.connectVisibleSelectedNodes();
this.viewSelection();
} else {
@@ -286,14 +295,14 @@ export class GraphView extends PhaseView {
this.deleteContent();
}
- createGraph(data, rememberedSelection) {
+ createGraph(data, selection) {
this.graph = new Graph(data);
this.showControlAction(this);
- if (rememberedSelection != undefined) {
+ if (selection != undefined) {
for (const n of this.graph.nodes()) {
- n.visible = n.visible || rememberedSelection.has(nodeToStringKey(n));
+ n.visible = n.visible || selection.has(nodeToStringKey(n));
}
}
@@ -359,6 +368,33 @@ export class GraphView extends PhaseView {
});
}
+ adaptSelectionToCurrentPhase(data, selection) {
+ const updatedGraphSelection = new Set();
+ if (!data || !(selection instanceof Map)) return updatedGraphSelection;
+ // Adding survived nodes (with the same id)
+ for (const node of data.nodes) {
+ const stringKey = this.state.selection.stringKey(node);
+ if (selection.has(stringKey)) {
+ updatedGraphSelection.add(stringKey);
+ }
+ }
+ // Adding children of nodes
+ for (const node of data.nodes) {
+ const originStringKey = this.state.selection.originStringKey(node);
+ if (originStringKey && selection.has(originStringKey)) {
+ updatedGraphSelection.add(this.state.selection.stringKey(node));
+ }
+ }
+ // Adding ancestors of nodes
+ selection.forEach(selectedNode => {
+ const originStringKey = this.state.selection.originStringKey(selectedNode);
+ if (originStringKey) {
+ updatedGraphSelection.add(originStringKey);
+ }
+ });
+ return updatedGraphSelection;
+ }
+
attachSelection(s) {
if (!(s instanceof Set)) return;
this.selectionHandler.clear();
diff --git a/deps/v8/tools/turbolizer/src/graph.ts b/deps/v8/tools/turbolizer/src/graph.ts
index 0eb2e3e1e6..8eb0d26d20 100644
--- a/deps/v8/tools/turbolizer/src/graph.ts
+++ b/deps/v8/tools/turbolizer/src/graph.ts
@@ -1,5 +1,5 @@
-import { GNode } from "./node";
-import { Edge, MINIMUM_EDGE_SEPARATION } from "./edge";
+import { GNode, MINIMUM_EDGE_SEPARATION } from "./node";
+import { Edge } from "./edge";
export class Graph {
nodeMap: Array<GNode>;
diff --git a/deps/v8/tools/turbolizer/src/graphmultiview.ts b/deps/v8/tools/turbolizer/src/graphmultiview.ts
index 4f8f633919..19113eef2b 100644
--- a/deps/v8/tools/turbolizer/src/graphmultiview.ts
+++ b/deps/v8/tools/turbolizer/src/graphmultiview.ts
@@ -8,6 +8,7 @@ import { SequenceView } from "../src/sequence-view";
import { SourceResolver } from "../src/source-resolver";
import { SelectionBroker } from "../src/selection-broker";
import { View, PhaseView } from "../src/view";
+import { GNode } from "./node";
const multiviewID = "multiview";
@@ -61,6 +62,10 @@ export class GraphMultiView extends View {
view.divNode.addEventListener("keyup", (e: KeyboardEvent) => {
if (e.keyCode == 191) { // keyCode == '/'
searchInput.focus();
+ } else if (e.keyCode == 78) { // keyCode == 'n'
+ view.displayNextGraphPhase();
+ } else if (e.keyCode == 66) { // keyCode == 'b'
+ view.displayPreviousGraphPhase();
}
});
searchInput.setAttribute("value", window.sessionStorage.getItem("lastSearch") || "");
@@ -101,7 +106,7 @@ export class GraphMultiView extends View {
this.displayPhase(this.sourceResolver.getPhase(initialPhaseIndex));
}
- displayPhase(phase, selection?: Set<any>) {
+ displayPhase(phase, selection?: Map<string, GNode>) {
if (phase.type == "graph") {
this.displayPhaseView(this.graph, phase, selection);
} else if (phase.type == "schedule") {
@@ -111,18 +116,46 @@ export class GraphMultiView extends View {
}
}
- displayPhaseView(view: PhaseView, data, selection?: Set<any>) {
+ displayPhaseView(view: PhaseView, data, selection?: Map<string, GNode>) {
const rememberedSelection = selection ? selection : this.hideCurrentPhase();
view.initializeContent(data, rememberedSelection);
this.currentPhaseView = view;
}
- displayPhaseByName(phaseName, selection?: Set<any>) {
+ displayPhaseByName(phaseName, selection?: Map<string, GNode>) {
const phaseId = this.sourceResolver.getPhaseIdByName(phaseName);
this.selectMenu.selectedIndex = phaseId;
this.displayPhase(this.sourceResolver.getPhase(phaseId), selection);
}
+ displayNextGraphPhase() {
+ let nextPhaseIndex = this.selectMenu.selectedIndex + 1;
+ while (nextPhaseIndex < this.sourceResolver.phases.length) {
+ const nextPhase = this.sourceResolver.getPhase(nextPhaseIndex);
+ if (nextPhase.type == "graph") {
+ this.selectMenu.selectedIndex = nextPhaseIndex;
+ window.sessionStorage.setItem("lastSelectedPhase", nextPhaseIndex.toString());
+ this.displayPhase(nextPhase);
+ break;
+ }
+ nextPhaseIndex += 1;
+ }
+ }
+
+ displayPreviousGraphPhase() {
+ let previousPhaseIndex = this.selectMenu.selectedIndex - 1;
+ while (previousPhaseIndex >= 0) {
+ const previousPhase = this.sourceResolver.getPhase(previousPhaseIndex);
+ if (previousPhase.type == "graph") {
+ this.selectMenu.selectedIndex = previousPhaseIndex;
+ window.sessionStorage.setItem("lastSelectedPhase", previousPhaseIndex.toString());
+ this.displayPhase(previousPhase);
+ break;
+ }
+ previousPhaseIndex -= 1;
+ }
+ }
+
hideCurrentPhase() {
let rememberedSelection = null;
if (this.currentPhaseView != null) {
diff --git a/deps/v8/tools/turbolizer/src/node.ts b/deps/v8/tools/turbolizer/src/node.ts
index 02906d1204..90db8adb1a 100644
--- a/deps/v8/tools/turbolizer/src/node.ts
+++ b/deps/v8/tools/turbolizer/src/node.ts
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
+import { Edge } from "../src/edge";
import { NodeLabel } from "./node-label";
import { MAX_RANK_SENTINEL } from "./constants";
import { alignUp, measureText } from "./util";
@@ -10,6 +10,7 @@ import { alignUp, measureText } from "./util";
export const DEFAULT_NODE_BUBBLE_RADIUS = 12;
export const NODE_INPUT_WIDTH = 50;
export const MINIMUM_NODE_OUTPUT_APPROACH = 15;
+export const MINIMUM_EDGE_SEPARATION = 20;
const MINIMUM_NODE_INPUT_APPROACH = 15 + 2 * DEFAULT_NODE_BUBBLE_RADIUS;
export class GNode {
diff --git a/deps/v8/tools/turbolizer/src/selection.ts b/deps/v8/tools/turbolizer/src/selection.ts
index 044a1969c3..a3f3dd941f 100644
--- a/deps/v8/tools/turbolizer/src/selection.ts
+++ b/deps/v8/tools/turbolizer/src/selection.ts
@@ -2,13 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+import { GNode } from "./node";
+
export class MySelection {
selection: any;
stringKey: (o: any) => string;
+ originStringKey: (node: GNode) => string;
- constructor(stringKeyFnc) {
+ constructor(stringKeyFnc, originStringKeyFnc?) {
this.selection = new Map();
this.stringKey = stringKeyFnc;
+ this.originStringKey = originStringKeyFnc;
}
isEmpty(): boolean {
@@ -50,7 +54,7 @@ export class MySelection {
}
detachSelection() {
- const result = this.selectedKeys();
+ const result = this.selection;
this.clear();
return result;
}
diff --git a/deps/v8/tools/turbolizer/src/source-resolver.ts b/deps/v8/tools/turbolizer/src/source-resolver.ts
index 4d5bd250b7..4632ad306a 100644
--- a/deps/v8/tools/turbolizer/src/source-resolver.ts
+++ b/deps/v8/tools/turbolizer/src/source-resolver.ts
@@ -81,7 +81,7 @@ interface InstructionsPhase {
name: string;
data: any;
instructionOffsetToPCOffset?: any;
- blockIdtoInstructionRange?: any;
+ blockIdToInstructionRange?: any;
nodeIdToInstructionRange?: any;
codeOffsetsInfo?: CodeOffsetsInfo;
}
@@ -595,8 +595,8 @@ export class SourceResolver {
if (phase.nodeIdToInstructionRange) {
this.readNodeIdToInstructionRange(phase.nodeIdToInstructionRange);
}
- if (phase.blockIdtoInstructionRange) {
- this.readBlockIdToInstructionRange(phase.blockIdtoInstructionRange);
+ if (phase.blockIdToInstructionRange) {
+ this.readBlockIdToInstructionRange(phase.blockIdToInstructionRange);
}
if (phase.instructionOffsetToPCOffset) {
this.readInstructionOffsetToPCOffset(phase.instructionOffsetToPCOffset);
diff --git a/deps/v8/tools/turbolizer/src/text-view.ts b/deps/v8/tools/turbolizer/src/text-view.ts
index dcda2db2de..fbf43f03d9 100644
--- a/deps/v8/tools/turbolizer/src/text-view.ts
+++ b/deps/v8/tools/turbolizer/src/text-view.ts
@@ -20,7 +20,7 @@ export abstract class TextView extends PhaseView {
instructionIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
nodeIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
blockIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
- blockIdtoNodeIds: Map<string, Array<string>>;
+ blockIdToNodeIds: Map<string, Array<string>>;
nodeIdToBlockId: Array<string>;
patterns: any;
sourceResolver: SourceResolver;
@@ -34,7 +34,7 @@ export abstract class TextView extends PhaseView {
view.instructionIdToHtmlElementsMap = new Map();
view.nodeIdToHtmlElementsMap = new Map();
view.blockIdToHtmlElementsMap = new Map();
- view.blockIdtoNodeIds = new Map();
+ view.blockIdToNodeIds = new Map();
view.nodeIdToBlockId = [];
view.selection = new MySelection(anyToString);
view.blockSelection = new MySelection(anyToString);
@@ -147,10 +147,10 @@ export abstract class TextView extends PhaseView {
addNodeIdToBlockId(anyNodeId, anyBlockId) {
const blockId = anyToString(anyBlockId);
- if (!this.blockIdtoNodeIds.has(blockId)) {
- this.blockIdtoNodeIds.set(blockId, []);
+ if (!this.blockIdToNodeIds.has(blockId)) {
+ this.blockIdToNodeIds.set(blockId, []);
}
- this.blockIdtoNodeIds.get(blockId).push(anyToString(anyNodeId));
+ this.blockIdToNodeIds.get(blockId).push(anyToString(anyNodeId));
this.nodeIdToBlockId[anyNodeId] = blockId;
}
diff --git a/deps/v8/tools/turbolizer/src/view.ts b/deps/v8/tools/turbolizer/src/view.ts
index d93eeeda8f..cd76c6de44 100644
--- a/deps/v8/tools/turbolizer/src/view.ts
+++ b/deps/v8/tools/turbolizer/src/view.ts
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+import { GNode } from "./node";
+
export abstract class View {
protected container: HTMLElement;
protected divNode: HTMLElement;
@@ -22,8 +24,8 @@ export abstract class View {
}
export abstract class PhaseView extends View {
- public abstract initializeContent(data: any, rememberedSelection: Set<any>): void;
- public abstract detachSelection(): Set<string>;
+ public abstract initializeContent(data: any, rememberedSelection: Map<string, GNode>): void;
+ public abstract detachSelection(): Map<string, GNode>;
public abstract onresize(): void;
public abstract searchInputAction(searchInput: HTMLInputElement, e: Event, onlyVisible: boolean): void;
diff --git a/deps/v8/tools/unittests/__init__.py b/deps/v8/tools/unittests/__init__.py
index 3841a861c8..e1bbf0cdfc 100644
--- a/deps/v8/tools/unittests/__init__.py
+++ b/deps/v8/tools/unittests/__init__.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/unittests/compare_torque_output_test.py b/deps/v8/tools/unittests/compare_torque_output_test.py
index d5a5c4a125..f5f240cdaf 100644
--- a/deps/v8/tools/unittests/compare_torque_output_test.py
+++ b/deps/v8/tools/unittests/compare_torque_output_test.py
@@ -14,11 +14,6 @@ TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
COMPARE_SCRIPT = os.path.join(TOOLS_DIR, 'compare_torque_output.py')
TEST_DATA = os.path.join(TOOLS_DIR, 'unittests', 'testdata', 'compare_torque')
-_PY3 = sys.version_info[0] == 3
-PYTHON_EXECUTABLE = "python%s" % sys.version_info[0]
-
-def maybe_bytes(value):
- return value.decode("utf-8") if _PY3 else value
class PredictableTest(unittest.TestCase):
def setUp(self):
@@ -29,7 +24,7 @@ class PredictableTest(unittest.TestCase):
file1 = os.path.join(TEST_DATA, test_folder, 'f1')
file2 = os.path.join(TEST_DATA, test_folder, 'f2')
proc = subprocess.Popen([
- PYTHON_EXECUTABLE, '-u',
+ sys.executable, '-u',
COMPARE_SCRIPT, file1, file2, self.tmp_file
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = proc.communicate()
@@ -39,7 +34,7 @@ class PredictableTest(unittest.TestCase):
exitcode, output = self._compare_from('test1')
self.assertEqual(1, exitcode)
full_match = r'^Found.*-line 2\+line 2 with diff.*\+line 3\n\n$'
- self.assertRegexpMatches(maybe_bytes(output), re.compile(full_match, re.M | re.S))
+ self.assertRegex(output.decode('utf-8'), re.compile(full_match, re.M | re.S))
def test_no_diff(self):
exitcode, output = self._compare_from('test2')
@@ -49,12 +44,12 @@ class PredictableTest(unittest.TestCase):
def test_right_only(self):
exitcode, output = self._compare_from('test3')
self.assertEqual(1, exitcode)
- self.assertRegexpMatches(maybe_bytes(output), r'Some files exist only in.*f2\nfile3')
+ self.assertRegex(output.decode('utf-8'), r'Some files exist only in.*f2\nfile3')
def test_left_only(self):
exitcode, output = self._compare_from('test4')
self.assertEqual(1, exitcode)
- self.assertRegexpMatches(maybe_bytes(output), r'Some files exist only in.*f1\nfile4')
+ self.assertRegex(output.decode('utf-8'), r'Some files exist only in.*f1\nfile4')
def tearDown(self):
os.unlink(self.tmp_file)
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 6d8c5e2a13..18f91d7bda 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -1,11 +1,8 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
from collections import namedtuple
import json
import os
@@ -103,8 +100,8 @@ class UnitTest(unittest.TestCase):
def testBuildDirectory(self):
base_path = os.path.join(TEST_DATA, 'builddirs', 'dir1', 'out')
expected_path = os.path.join(base_path, 'build')
- self.assertEquals(
- expected_path, run_perf.find_build_directory(base_path, 'x64'))
+ self.assertEqual(expected_path,
+ run_perf.find_build_directory(base_path, 'x64'))
class PerfTest(unittest.TestCase):
@@ -383,7 +380,7 @@ class PerfTest(unittest.TestCase):
def testOneRunStdDevRegExp(self):
test_input = dict(V8_JSON)
- test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
+ test_input['stddev_regexp'] = r'^%s-stddev: (.+)$'
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
@@ -398,7 +395,7 @@ class PerfTest(unittest.TestCase):
def testTwoRunsStdDevRegExp(self):
test_input = dict(V8_JSON)
- test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
+ test_input['stddev_regexp'] = r'^%s-stddev: (.+)$'
test_input['run_count'] = 2
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 3\nRichards-stddev: 0.7\n'
@@ -410,13 +407,14 @@ class PerfTest(unittest.TestCase):
{'name': 'Richards', 'results': [2.0, 3.0], 'stddev': '0.7'},
{'name': 'DeltaBlue', 'results': [5.0, 6.0], 'stddev': '0.8'},
])
- self._VerifyErrors(
- ['Test test/Richards should only run once since a stddev is provided '
- 'by the test.',
- 'Test test/DeltaBlue should only run once since a stddev is provided '
- 'by the test.',
- 'Regexp "^DeltaBlue\-stddev: (.+)$" did not match for test '
- 'test/DeltaBlue.'])
+ self._VerifyErrors([
+ 'Test test/Richards should only run once since a stddev is provided '
+ 'by the test.',
+ 'Test test/DeltaBlue should only run once since a stddev is provided '
+ 'by the test.',
+ r'Regexp "^DeltaBlue-stddev: (.+)$" did not match for test '
+ r'test/DeltaBlue.'
+ ])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 762d3096ec..14daae6865 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -17,9 +17,6 @@ with different test suite extensions and build configurations.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import collections
import contextlib
import json
@@ -30,11 +27,7 @@ import sys
import tempfile
import unittest
-# TODO(https://crbug.com/1292016): Remove after Python3 migration.
-try:
- from cStringIO import StringIO
-except ImportError:
- from io import StringIO
+from io import StringIO
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
diff --git a/deps/v8/tools/unittests/v8_presubmit_test.py b/deps/v8/tools/unittests/v8_presubmit_test.py
index 2c66d1891b..7b784b2d8c 100755
--- a/deps/v8/tools/unittests/v8_presubmit_test.py
+++ b/deps/v8/tools/unittests/v8_presubmit_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index 92b9ab51d8..ac6d06f6c1 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
@@ -27,17 +27,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# for py2/py3 compatibility
-from __future__ import absolute_import
-from __future__ import print_function
-
-try:
- import hashlib
- md5er = hashlib.md5
-except ImportError as e:
- import md5
- md5er = md5.new
+import hashlib
+md5er = hashlib.md5
import json
@@ -55,13 +46,8 @@ from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
-PYTHON3 = sys.version_info >= (3, 0)
-
-def maybe_decode(arg, encoding="utf-8"):
- return arg.decode(encoding) if PYTHON3 else arg
-
-def maybe_encode(arg, encoding="utf-8"):
- return arg.encode(encoding) if PYTHON3 else arg
+def decode(arg, encoding="utf-8"):
+ return arg.decode(encoding)
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
@@ -92,6 +78,9 @@ ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-opt.*\n")
TOOLS_PATH = dirname(abspath(__file__))
+DEPS_DEPOT_TOOLS_PATH = abspath(
+ join(TOOLS_PATH, '..', 'third_party', 'depot_tools'))
+
def CppLintWorker(command):
try:
@@ -100,7 +89,7 @@ def CppLintWorker(command):
out_lines = ""
error_count = -1
while True:
- out_line = maybe_decode(process.stderr.readline())
+ out_line = decode(process.stderr.readline())
if out_line == '' and process.poll() != None:
if error_count == -1:
print("Failed to process %s" % command.pop())
@@ -120,7 +109,7 @@ def CppLintWorker(command):
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
- ' in your $PATH. Lint check skipped.')
+ ' in your third_party directory. Lint check skipped.')
process.kill()
def TorqueLintWorker(command):
@@ -130,14 +119,14 @@ def TorqueLintWorker(command):
out_lines = ""
error_count = 0
while True:
- out_line = maybe_decode(process.stderr.readline())
+ out_line = decode(process.stderr.readline())
if out_line == '' and process.poll() != None:
break
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
if error_count != 0:
- sys.stdout.write(
+ sys.stdout.write(
"warning: formatting and overwriting unformatted Torque files\n")
return error_count
except KeyboardInterrupt:
@@ -160,15 +149,16 @@ def JSLintWorker(command):
sys.stdout.write("error code " + str(rc) + " running clang-format.\n")
return rc
- if maybe_decode(output) != contents:
+ if decode(output) != contents:
return 1
return 0
except KeyboardInterrupt:
process.kill()
except Exception:
- print('Error running clang-format. Please make sure you have depot_tools' +
- ' in your $PATH. Lint check skipped.')
+ print(
+ 'Error running clang-format. Please make sure you have depot_tools' +
+ ' in your third_party directory. Lint check skipped.')
process.kill()
rc = format_file(command)
@@ -189,7 +179,7 @@ class FileContentsCache(object):
try:
sums_file = None
try:
- sums_file = open(self.sums_file_name, 'r')
+ sums_file = open(self.sums_file_name, 'rb')
self.sums = pickle.load(sums_file)
except:
# Cannot parse pickle for any reason. Not much we can do about it.
@@ -200,7 +190,7 @@ class FileContentsCache(object):
def Save(self):
try:
- sums_file = open(self.sums_file_name, 'w')
+ sums_file = open(self.sums_file_name, 'wb')
pickle.dump(self.sums, sums_file)
except:
# Failed to write pickle. Try to clean-up behind us.
@@ -217,8 +207,8 @@ class FileContentsCache(object):
changed_or_new = []
for file in files:
try:
- handle = open(file, "r")
- file_sum = md5er(maybe_encode(handle.read())).digest()
+ handle = open(file, "rb")
+ file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
@@ -401,13 +391,9 @@ class CppLintProcessor(CacheableSourceFileProcessor):
def GetProcessorScript(self):
filters = ','.join([n for n in LINT_RULES])
arguments = ['--filter', filters]
- for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
- path = path.strip('"')
- cpplint = os.path.join(path, 'cpplint.py')
- if os.path.isfile(cpplint):
- return cpplint, arguments
- return None, arguments
+ cpplint = os.path.join(DEPS_DEPOT_TOOLS_PATH, 'cpplint.py')
+ return cpplint, arguments
class TorqueLintProcessor(CacheableSourceFileProcessor):
@@ -459,13 +445,9 @@ class JSLintProcessor(CacheableSourceFileProcessor):
return JSLintWorker
def GetProcessorScript(self):
- for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
- path = path.strip('"')
- clang_format = os.path.join(path, 'clang_format.py')
- if os.path.isfile(clang_format):
- return clang_format, []
+ jslint = os.path.join(DEPS_DEPOT_TOOLS_PATH, 'clang_format.py')
+ return jslint, []
- return None, []
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-2][0-9] the V8 project authors. All rights reserved.')
@@ -502,7 +484,7 @@ class SourceProcessor(SourceFileProcessor):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
- for file in maybe_decode(output.stdout.read()).split():
+ for file in decode(output.stdout.read()).split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
@@ -636,7 +618,7 @@ class SourceProcessor(SourceFileProcessor):
for file in files:
try:
handle = open(file, "rb")
- contents = maybe_decode(handle.read(), "ISO-8859-1")
+ contents = decode(handle.read(), "ISO-8859-1")
if len(contents) > 0 and not self.ProcessContents(file, contents):
success = False
violations += 1
@@ -742,16 +724,32 @@ def CheckDeps(workspace):
return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
+def FindTests(workspace):
+ scripts = []
+ # TODO(almuthanna): unskip valid tests when they are properly migrated
+ exclude = [
+ 'tools/clang',
+ 'tools/mb/mb_test.py',
+ 'tools/cppgc/gen_cmake_test.py',
+ 'tools/ignition/linux_perf_report_test.py',
+ 'tools/ignition/bytecode_dispatches_report_test.py',
+ 'tools/ignition/linux_perf_bytecode_annotate_test.py',
+ ]
+ scripts_without_excluded = []
+ for root, dirs, files in os.walk(join(workspace, 'tools')):
+ for f in files:
+ if f.endswith('_test.py'):
+ fullpath = os.path.join(root, f)
+ scripts.append(fullpath)
+ for script in scripts:
+ if not any(exc_dir in script for exc_dir in exclude):
+ scripts_without_excluded.append(script)
+ return scripts_without_excluded
+
+
def PyTests(workspace):
result = True
- for script in [
- join(workspace, 'tools', 'clusterfuzz', 'foozzie', 'v8_foozzie_test.py'),
- join(workspace, 'tools', 'release', 'test_scripts.py'),
- join(workspace, 'tools', 'unittests', 'predictable_wrapper_test.py'),
- join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
- join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
- join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
- ]:
+ for script in FindTests(workspace):
print('Running ' + script)
result &= subprocess.call(
[sys.executable, script], stdout=subprocess.PIPE) == 0
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 64b9ac20a8..66466b7e82 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
@@ -68,45 +69,45 @@ INSTANCE_TYPES = {
162: "INTERPRETER_DATA_TYPE",
163: "MODULE_REQUEST_TYPE",
164: "PROMISE_CAPABILITY_TYPE",
- 165: "PROMISE_REACTION_TYPE",
- 166: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 167: "PROTOTYPE_INFO_TYPE",
- 168: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
- 169: "SCRIPT_TYPE",
- 170: "SCRIPT_OR_MODULE_TYPE",
- 171: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 172: "STACK_FRAME_INFO_TYPE",
- 173: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 174: "TUPLE2_TYPE",
- 175: "WASM_CONTINUATION_OBJECT_TYPE",
- 176: "WASM_EXCEPTION_TAG_TYPE",
- 177: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 178: "FIXED_ARRAY_TYPE",
- 179: "HASH_TABLE_TYPE",
- 180: "EPHEMERON_HASH_TABLE_TYPE",
- 181: "GLOBAL_DICTIONARY_TYPE",
- 182: "NAME_DICTIONARY_TYPE",
- 183: "NAME_TO_INDEX_HASH_TABLE_TYPE",
- 184: "NUMBER_DICTIONARY_TYPE",
- 185: "ORDERED_HASH_MAP_TYPE",
- 186: "ORDERED_HASH_SET_TYPE",
- 187: "ORDERED_NAME_DICTIONARY_TYPE",
- 188: "REGISTERED_SYMBOL_TABLE_TYPE",
- 189: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 190: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 191: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 192: "SCRIPT_CONTEXT_TABLE_TYPE",
- 193: "BYTE_ARRAY_TYPE",
- 194: "BYTECODE_ARRAY_TYPE",
- 195: "FIXED_DOUBLE_ARRAY_TYPE",
- 196: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 197: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 198: "TURBOFAN_BITSET_TYPE_TYPE",
- 199: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
- 200: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
- 201: "TURBOFAN_RANGE_TYPE_TYPE",
- 202: "TURBOFAN_UNION_TYPE_TYPE",
- 203: "CELL_TYPE",
+ 165: "PROMISE_ON_STACK_TYPE",
+ 166: "PROMISE_REACTION_TYPE",
+ 167: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 168: "PROTOTYPE_INFO_TYPE",
+ 169: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
+ 170: "SCRIPT_TYPE",
+ 171: "SCRIPT_OR_MODULE_TYPE",
+ 172: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 173: "STACK_FRAME_INFO_TYPE",
+ 174: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 175: "TUPLE2_TYPE",
+ 176: "WASM_CONTINUATION_OBJECT_TYPE",
+ 177: "WASM_EXCEPTION_TAG_TYPE",
+ 178: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 179: "FIXED_ARRAY_TYPE",
+ 180: "HASH_TABLE_TYPE",
+ 181: "EPHEMERON_HASH_TABLE_TYPE",
+ 182: "GLOBAL_DICTIONARY_TYPE",
+ 183: "NAME_DICTIONARY_TYPE",
+ 184: "NAME_TO_INDEX_HASH_TABLE_TYPE",
+ 185: "NUMBER_DICTIONARY_TYPE",
+ 186: "ORDERED_HASH_MAP_TYPE",
+ 187: "ORDERED_HASH_SET_TYPE",
+ 188: "ORDERED_NAME_DICTIONARY_TYPE",
+ 189: "REGISTERED_SYMBOL_TABLE_TYPE",
+ 190: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 191: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 192: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 193: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 194: "BYTE_ARRAY_TYPE",
+ 195: "BYTECODE_ARRAY_TYPE",
+ 196: "FIXED_DOUBLE_ARRAY_TYPE",
+ 197: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+ 198: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+ 199: "TURBOFAN_BITSET_TYPE_TYPE",
+ 200: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
+ 201: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
+ 202: "TURBOFAN_RANGE_TYPE_TYPE",
+ 203: "TURBOFAN_UNION_TYPE_TYPE",
204: "FOREIGN_TYPE",
205: "WASM_INTERNAL_FUNCTION_TYPE",
206: "WASM_TYPE_INFO_TYPE",
@@ -142,39 +143,40 @@ INSTANCE_TYPES = {
236: "SYNTHETIC_MODULE_TYPE",
237: "WEAK_FIXED_ARRAY_TYPE",
238: "TRANSITION_ARRAY_TYPE",
- 239: "CODE_TYPE",
- 240: "CODE_DATA_CONTAINER_TYPE",
- 241: "COVERAGE_INFO_TYPE",
- 242: "EMBEDDER_DATA_ARRAY_TYPE",
- 243: "FEEDBACK_METADATA_TYPE",
- 244: "FEEDBACK_VECTOR_TYPE",
- 245: "FILLER_TYPE",
- 246: "FREE_SPACE_TYPE",
- 247: "INTERNAL_CLASS_TYPE",
- 248: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 249: "MAP_TYPE",
- 250: "MEGA_DOM_HANDLER_TYPE",
- 251: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 252: "PREPARSE_DATA_TYPE",
- 253: "PROPERTY_ARRAY_TYPE",
- 254: "PROPERTY_CELL_TYPE",
- 255: "SCOPE_INFO_TYPE",
- 256: "SHARED_FUNCTION_INFO_TYPE",
- 257: "SMI_BOX_TYPE",
- 258: "SMI_PAIR_TYPE",
- 259: "SORT_STATE_TYPE",
- 260: "SWISS_NAME_DICTIONARY_TYPE",
- 261: "WASM_API_FUNCTION_REF_TYPE",
- 262: "WASM_ON_FULFILLED_DATA_TYPE",
- 263: "WEAK_ARRAY_LIST_TYPE",
- 264: "WEAK_CELL_TYPE",
- 265: "WASM_ARRAY_TYPE",
- 266: "WASM_STRUCT_TYPE",
- 267: "JS_PROXY_TYPE",
+ 239: "CELL_TYPE",
+ 240: "CODE_TYPE",
+ 241: "CODE_DATA_CONTAINER_TYPE",
+ 242: "COVERAGE_INFO_TYPE",
+ 243: "EMBEDDER_DATA_ARRAY_TYPE",
+ 244: "FEEDBACK_METADATA_TYPE",
+ 245: "FEEDBACK_VECTOR_TYPE",
+ 246: "FILLER_TYPE",
+ 247: "FREE_SPACE_TYPE",
+ 248: "INTERNAL_CLASS_TYPE",
+ 249: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 250: "MAP_TYPE",
+ 251: "MEGA_DOM_HANDLER_TYPE",
+ 252: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 253: "PREPARSE_DATA_TYPE",
+ 254: "PROPERTY_ARRAY_TYPE",
+ 255: "PROPERTY_CELL_TYPE",
+ 256: "SCOPE_INFO_TYPE",
+ 257: "SHARED_FUNCTION_INFO_TYPE",
+ 258: "SMI_BOX_TYPE",
+ 259: "SMI_PAIR_TYPE",
+ 260: "SORT_STATE_TYPE",
+ 261: "SWISS_NAME_DICTIONARY_TYPE",
+ 262: "WASM_API_FUNCTION_REF_TYPE",
+ 263: "WASM_ON_FULFILLED_DATA_TYPE",
+ 264: "WEAK_ARRAY_LIST_TYPE",
+ 265: "WEAK_CELL_TYPE",
+ 266: "WASM_ARRAY_TYPE",
+ 267: "WASM_STRUCT_TYPE",
+ 268: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 268: "JS_GLOBAL_OBJECT_TYPE",
- 269: "JS_GLOBAL_PROXY_TYPE",
- 270: "JS_MODULE_NAMESPACE_TYPE",
+ 269: "JS_GLOBAL_OBJECT_TYPE",
+ 270: "JS_GLOBAL_PROXY_TYPE",
+ 271: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1058: "JS_API_OBJECT_TYPE",
@@ -273,287 +275,290 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02151): (249, "MetaMap"),
- ("read_only_space", 0x02179): (131, "NullMap"),
- ("read_only_space", 0x021a1): (234, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x021c9): (263, "WeakArrayListMap"),
- ("read_only_space", 0x0220d): (157, "EnumCacheMap"),
- ("read_only_space", 0x02241): (178, "FixedArrayMap"),
- ("read_only_space", 0x0228d): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x022d9): (246, "FreeSpaceMap"),
- ("read_only_space", 0x02301): (245, "OnePointerFillerMap"),
- ("read_only_space", 0x02329): (245, "TwoPointerFillerMap"),
- ("read_only_space", 0x02351): (131, "UninitializedMap"),
- ("read_only_space", 0x023c9): (131, "UndefinedMap"),
- ("read_only_space", 0x0240d): (130, "HeapNumberMap"),
- ("read_only_space", 0x02441): (131, "TheHoleMap"),
- ("read_only_space", 0x024a1): (131, "BooleanMap"),
- ("read_only_space", 0x02545): (193, "ByteArrayMap"),
- ("read_only_space", 0x0256d): (178, "FixedCOWArrayMap"),
- ("read_only_space", 0x02595): (179, "HashTableMap"),
- ("read_only_space", 0x025bd): (128, "SymbolMap"),
- ("read_only_space", 0x025e5): (40, "OneByteStringMap"),
- ("read_only_space", 0x0260d): (255, "ScopeInfoMap"),
- ("read_only_space", 0x02635): (256, "SharedFunctionInfoMap"),
- ("read_only_space", 0x0265d): (239, "CodeMap"),
- ("read_only_space", 0x02685): (203, "CellMap"),
- ("read_only_space", 0x026ad): (254, "GlobalPropertyCellMap"),
- ("read_only_space", 0x026d5): (204, "ForeignMap"),
- ("read_only_space", 0x026fd): (238, "TransitionArrayMap"),
- ("read_only_space", 0x02725): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x0274d): (244, "FeedbackVectorMap"),
- ("read_only_space", 0x02785): (131, "ArgumentsMarkerMap"),
- ("read_only_space", 0x027e5): (131, "ExceptionMap"),
- ("read_only_space", 0x02841): (131, "TerminationExceptionMap"),
- ("read_only_space", 0x028a9): (131, "OptimizedOutMap"),
- ("read_only_space", 0x02909): (131, "StaleRegisterMap"),
- ("read_only_space", 0x02969): (192, "ScriptContextTableMap"),
- ("read_only_space", 0x02991): (190, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x029b9): (243, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029e1): (178, "ArrayListMap"),
- ("read_only_space", 0x02a09): (129, "BigIntMap"),
- ("read_only_space", 0x02a31): (191, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a59): (194, "BytecodeArrayMap"),
- ("read_only_space", 0x02a81): (240, "CodeDataContainerMap"),
- ("read_only_space", 0x02aa9): (241, "CoverageInfoMap"),
- ("read_only_space", 0x02ad1): (195, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02af9): (181, "GlobalDictionaryMap"),
- ("read_only_space", 0x02b21): (159, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b49): (250, "MegaDomHandlerMap"),
- ("read_only_space", 0x02b71): (178, "ModuleInfoMap"),
- ("read_only_space", 0x02b99): (182, "NameDictionaryMap"),
- ("read_only_space", 0x02bc1): (159, "NoClosuresCellMap"),
- ("read_only_space", 0x02be9): (184, "NumberDictionaryMap"),
- ("read_only_space", 0x02c11): (159, "OneClosureCellMap"),
- ("read_only_space", 0x02c39): (185, "OrderedHashMapMap"),
- ("read_only_space", 0x02c61): (186, "OrderedHashSetMap"),
- ("read_only_space", 0x02c89): (183, "NameToIndexHashTableMap"),
- ("read_only_space", 0x02cb1): (188, "RegisteredSymbolTableMap"),
- ("read_only_space", 0x02cd9): (187, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02d01): (252, "PreparseDataMap"),
- ("read_only_space", 0x02d29): (253, "PropertyArrayMap"),
- ("read_only_space", 0x02d51): (153, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02d79): (153, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02da1): (153, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02dc9): (189, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02df1): (228, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02e19): (229, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02e41): (230, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02e69): (235, "SourceTextModuleMap"),
- ("read_only_space", 0x02e91): (260, "SwissNameDictionaryMap"),
- ("read_only_space", 0x02eb9): (236, "SyntheticModuleMap"),
- ("read_only_space", 0x02ee1): (261, "WasmApiFunctionRefMap"),
- ("read_only_space", 0x02f09): (222, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x02f31): (223, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x02f59): (205, "WasmInternalFunctionMap"),
- ("read_only_space", 0x02f81): (224, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x02fa9): (262, "WasmOnFulfilledDataMap"),
- ("read_only_space", 0x02fd1): (206, "WasmTypeInfoMap"),
- ("read_only_space", 0x02ff9): (237, "WeakFixedArrayMap"),
- ("read_only_space", 0x03021): (180, "EphemeronHashTableMap"),
- ("read_only_space", 0x03049): (242, "EmbedderDataArrayMap"),
- ("read_only_space", 0x03071): (264, "WeakCellMap"),
- ("read_only_space", 0x03099): (32, "StringMap"),
- ("read_only_space", 0x030c1): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x030e9): (33, "ConsStringMap"),
- ("read_only_space", 0x03111): (37, "ThinStringMap"),
- ("read_only_space", 0x03139): (35, "SlicedStringMap"),
- ("read_only_space", 0x03161): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x03189): (34, "ExternalStringMap"),
- ("read_only_space", 0x031b1): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x031d9): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x03201): (0, "InternalizedStringMap"),
- ("read_only_space", 0x03229): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x03251): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x03279): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x032a1): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x032c9): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x032f1): (104, "SharedOneByteStringMap"),
- ("read_only_space", 0x03319): (96, "SharedStringMap"),
- ("read_only_space", 0x03341): (109, "SharedThinOneByteStringMap"),
- ("read_only_space", 0x03369): (101, "SharedThinStringMap"),
- ("read_only_space", 0x03391): (96, "TwoByteSeqStringMigrationSentinelMap"),
- ("read_only_space", 0x033b9): (104, "OneByteSeqStringMigrationSentinelMap"),
- ("read_only_space", 0x033e1): (131, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x03409): (131, "BasicBlockCountersMarkerMap"),
- ("read_only_space", 0x0344d): (147, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x0354d): (161, "InterceptorInfoMap"),
- ("read_only_space", 0x0601d): (132, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x06045): (133, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x0606d): (134, "CallableTaskMap"),
- ("read_only_space", 0x06095): (135, "CallbackTaskMap"),
- ("read_only_space", 0x060bd): (136, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x060e5): (139, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x0610d): (140, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x06135): (141, "AccessCheckInfoMap"),
- ("read_only_space", 0x0615d): (142, "AccessorInfoMap"),
- ("read_only_space", 0x06185): (143, "AccessorPairMap"),
- ("read_only_space", 0x061ad): (144, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x061d5): (145, "AllocationMementoMap"),
- ("read_only_space", 0x061fd): (148, "AsmWasmDataMap"),
- ("read_only_space", 0x06225): (149, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x0624d): (150, "BreakPointMap"),
- ("read_only_space", 0x06275): (151, "BreakPointInfoMap"),
- ("read_only_space", 0x0629d): (152, "CachedTemplateObjectMap"),
- ("read_only_space", 0x062c5): (154, "CallSiteInfoMap"),
- ("read_only_space", 0x062ed): (155, "ClassPositionsMap"),
- ("read_only_space", 0x06315): (156, "DebugInfoMap"),
- ("read_only_space", 0x0633d): (158, "ErrorStackDataMap"),
- ("read_only_space", 0x06365): (160, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x0638d): (162, "InterpreterDataMap"),
- ("read_only_space", 0x063b5): (163, "ModuleRequestMap"),
- ("read_only_space", 0x063dd): (164, "PromiseCapabilityMap"),
- ("read_only_space", 0x06405): (165, "PromiseReactionMap"),
- ("read_only_space", 0x0642d): (166, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x06455): (167, "PrototypeInfoMap"),
- ("read_only_space", 0x0647d): (168, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x064a5): (169, "ScriptMap"),
- ("read_only_space", 0x064cd): (170, "ScriptOrModuleMap"),
- ("read_only_space", 0x064f5): (171, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x0651d): (172, "StackFrameInfoMap"),
- ("read_only_space", 0x06545): (173, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x0656d): (174, "Tuple2Map"),
- ("read_only_space", 0x06595): (175, "WasmContinuationObjectMap"),
- ("read_only_space", 0x065bd): (176, "WasmExceptionTagMap"),
- ("read_only_space", 0x065e5): (177, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x0660d): (197, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x06635): (233, "DescriptorArrayMap"),
- ("read_only_space", 0x0665d): (219, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x06685): (217, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x066ad): (220, "UncompiledDataWithoutPreparseDataWithJobMap"),
- ("read_only_space", 0x066d5): (218, "UncompiledDataWithPreparseDataAndJobMap"),
- ("read_only_space", 0x066fd): (251, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x06725): (198, "TurbofanBitsetTypeMap"),
- ("read_only_space", 0x0674d): (202, "TurbofanUnionTypeMap"),
- ("read_only_space", 0x06775): (201, "TurbofanRangeTypeMap"),
- ("read_only_space", 0x0679d): (199, "TurbofanHeapConstantTypeMap"),
- ("read_only_space", 0x067c5): (200, "TurbofanOtherNumberConstantTypeMap"),
- ("read_only_space", 0x067ed): (247, "InternalClassMap"),
- ("read_only_space", 0x06815): (258, "SmiPairMap"),
- ("read_only_space", 0x0683d): (257, "SmiBoxMap"),
- ("read_only_space", 0x06865): (225, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x0688d): (226, "ExportedSubClassMap"),
- ("read_only_space", 0x068b5): (231, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x068dd): (232, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x06905): (196, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x0692d): (248, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x06955): (227, "ExportedSubClass2Map"),
- ("read_only_space", 0x0697d): (259, "SortStateMap"),
- ("read_only_space", 0x069a5): (146, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x069cd): (146, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x069f5): (137, "LoadHandler1Map"),
- ("read_only_space", 0x06a1d): (137, "LoadHandler2Map"),
- ("read_only_space", 0x06a45): (137, "LoadHandler3Map"),
- ("read_only_space", 0x06a6d): (138, "StoreHandler0Map"),
- ("read_only_space", 0x06a95): (138, "StoreHandler1Map"),
- ("read_only_space", 0x06abd): (138, "StoreHandler2Map"),
- ("read_only_space", 0x06ae5): (138, "StoreHandler3Map"),
- ("map_space", 0x02151): (2113, "ExternalMap"),
- ("map_space", 0x02179): (2117, "JSMessageObjectMap"),
+ ("read_only_space", 0x02149): (250, "MetaMap"),
+ ("read_only_space", 0x02171): (131, "NullMap"),
+ ("read_only_space", 0x02199): (234, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x021c1): (264, "WeakArrayListMap"),
+ ("read_only_space", 0x02205): (157, "EnumCacheMap"),
+ ("read_only_space", 0x02239): (179, "FixedArrayMap"),
+ ("read_only_space", 0x02285): (8, "OneByteInternalizedStringMap"),
+ ("read_only_space", 0x022d1): (247, "FreeSpaceMap"),
+ ("read_only_space", 0x022f9): (246, "OnePointerFillerMap"),
+ ("read_only_space", 0x02321): (246, "TwoPointerFillerMap"),
+ ("read_only_space", 0x02349): (131, "UninitializedMap"),
+ ("read_only_space", 0x023c1): (131, "UndefinedMap"),
+ ("read_only_space", 0x02405): (130, "HeapNumberMap"),
+ ("read_only_space", 0x02439): (131, "TheHoleMap"),
+ ("read_only_space", 0x02499): (131, "BooleanMap"),
+ ("read_only_space", 0x0253d): (194, "ByteArrayMap"),
+ ("read_only_space", 0x02565): (179, "FixedCOWArrayMap"),
+ ("read_only_space", 0x0258d): (180, "HashTableMap"),
+ ("read_only_space", 0x025b5): (128, "SymbolMap"),
+ ("read_only_space", 0x025dd): (40, "OneByteStringMap"),
+ ("read_only_space", 0x02605): (256, "ScopeInfoMap"),
+ ("read_only_space", 0x0262d): (257, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x02655): (240, "CodeMap"),
+ ("read_only_space", 0x0267d): (239, "CellMap"),
+ ("read_only_space", 0x026a5): (255, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x026cd): (204, "ForeignMap"),
+ ("read_only_space", 0x026f5): (238, "TransitionArrayMap"),
+ ("read_only_space", 0x0271d): (45, "ThinOneByteStringMap"),
+ ("read_only_space", 0x02745): (245, "FeedbackVectorMap"),
+ ("read_only_space", 0x0277d): (131, "ArgumentsMarkerMap"),
+ ("read_only_space", 0x027dd): (131, "ExceptionMap"),
+ ("read_only_space", 0x02839): (131, "TerminationExceptionMap"),
+ ("read_only_space", 0x028a1): (131, "OptimizedOutMap"),
+ ("read_only_space", 0x02901): (131, "StaleRegisterMap"),
+ ("read_only_space", 0x02961): (193, "ScriptContextTableMap"),
+ ("read_only_space", 0x02989): (191, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x029b1): (244, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x029d9): (179, "ArrayListMap"),
+ ("read_only_space", 0x02a01): (129, "BigIntMap"),
+ ("read_only_space", 0x02a29): (192, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02a51): (195, "BytecodeArrayMap"),
+ ("read_only_space", 0x02a79): (241, "CodeDataContainerMap"),
+ ("read_only_space", 0x02aa1): (242, "CoverageInfoMap"),
+ ("read_only_space", 0x02ac9): (196, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x02af1): (182, "GlobalDictionaryMap"),
+ ("read_only_space", 0x02b19): (159, "ManyClosuresCellMap"),
+ ("read_only_space", 0x02b41): (251, "MegaDomHandlerMap"),
+ ("read_only_space", 0x02b69): (179, "ModuleInfoMap"),
+ ("read_only_space", 0x02b91): (183, "NameDictionaryMap"),
+ ("read_only_space", 0x02bb9): (159, "NoClosuresCellMap"),
+ ("read_only_space", 0x02be1): (185, "NumberDictionaryMap"),
+ ("read_only_space", 0x02c09): (159, "OneClosureCellMap"),
+ ("read_only_space", 0x02c31): (186, "OrderedHashMapMap"),
+ ("read_only_space", 0x02c59): (187, "OrderedHashSetMap"),
+ ("read_only_space", 0x02c81): (184, "NameToIndexHashTableMap"),
+ ("read_only_space", 0x02ca9): (189, "RegisteredSymbolTableMap"),
+ ("read_only_space", 0x02cd1): (188, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x02cf9): (253, "PreparseDataMap"),
+ ("read_only_space", 0x02d21): (254, "PropertyArrayMap"),
+ ("read_only_space", 0x02d49): (153, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x02d71): (153, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d99): (153, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02dc1): (190, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x02de9): (228, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x02e11): (229, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x02e39): (230, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x02e61): (235, "SourceTextModuleMap"),
+ ("read_only_space", 0x02e89): (261, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x02eb1): (236, "SyntheticModuleMap"),
+ ("read_only_space", 0x02ed9): (262, "WasmApiFunctionRefMap"),
+ ("read_only_space", 0x02f01): (222, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x02f29): (223, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x02f51): (205, "WasmInternalFunctionMap"),
+ ("read_only_space", 0x02f79): (224, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x02fa1): (263, "WasmOnFulfilledDataMap"),
+ ("read_only_space", 0x02fc9): (206, "WasmTypeInfoMap"),
+ ("read_only_space", 0x02ff1): (237, "WeakFixedArrayMap"),
+ ("read_only_space", 0x03019): (181, "EphemeronHashTableMap"),
+ ("read_only_space", 0x03041): (243, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x03069): (265, "WeakCellMap"),
+ ("read_only_space", 0x03091): (32, "StringMap"),
+ ("read_only_space", 0x030b9): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x030e1): (33, "ConsStringMap"),
+ ("read_only_space", 0x03109): (37, "ThinStringMap"),
+ ("read_only_space", 0x03131): (35, "SlicedStringMap"),
+ ("read_only_space", 0x03159): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x03181): (34, "ExternalStringMap"),
+ ("read_only_space", 0x031a9): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x031d1): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x031f9): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x03221): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x03249): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x03271): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x03299): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x032c1): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x032e9): (104, "SharedOneByteStringMap"),
+ ("read_only_space", 0x03311): (96, "SharedStringMap"),
+ ("read_only_space", 0x03339): (109, "SharedThinOneByteStringMap"),
+ ("read_only_space", 0x03361): (101, "SharedThinStringMap"),
+ ("read_only_space", 0x03389): (96, "TwoByteSeqStringMigrationSentinelMap"),
+ ("read_only_space", 0x033b1): (104, "OneByteSeqStringMigrationSentinelMap"),
+ ("read_only_space", 0x033d9): (131, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x03401): (131, "BasicBlockCountersMarkerMap"),
+ ("read_only_space", 0x03445): (147, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x03545): (161, "InterceptorInfoMap"),
+ ("read_only_space", 0x06015): (132, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x0603d): (133, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x06065): (134, "CallableTaskMap"),
+ ("read_only_space", 0x0608d): (135, "CallbackTaskMap"),
+ ("read_only_space", 0x060b5): (136, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x060dd): (139, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x06105): (140, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x0612d): (141, "AccessCheckInfoMap"),
+ ("read_only_space", 0x06155): (142, "AccessorInfoMap"),
+ ("read_only_space", 0x0617d): (143, "AccessorPairMap"),
+ ("read_only_space", 0x061a5): (144, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x061cd): (145, "AllocationMementoMap"),
+ ("read_only_space", 0x061f5): (148, "AsmWasmDataMap"),
+ ("read_only_space", 0x0621d): (149, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x06245): (150, "BreakPointMap"),
+ ("read_only_space", 0x0626d): (151, "BreakPointInfoMap"),
+ ("read_only_space", 0x06295): (152, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x062bd): (154, "CallSiteInfoMap"),
+ ("read_only_space", 0x062e5): (155, "ClassPositionsMap"),
+ ("read_only_space", 0x0630d): (156, "DebugInfoMap"),
+ ("read_only_space", 0x06335): (158, "ErrorStackDataMap"),
+ ("read_only_space", 0x0635d): (160, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x06385): (162, "InterpreterDataMap"),
+ ("read_only_space", 0x063ad): (163, "ModuleRequestMap"),
+ ("read_only_space", 0x063d5): (164, "PromiseCapabilityMap"),
+ ("read_only_space", 0x063fd): (165, "PromiseOnStackMap"),
+ ("read_only_space", 0x06425): (166, "PromiseReactionMap"),
+ ("read_only_space", 0x0644d): (167, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x06475): (168, "PrototypeInfoMap"),
+ ("read_only_space", 0x0649d): (169, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x064c5): (170, "ScriptMap"),
+ ("read_only_space", 0x064ed): (171, "ScriptOrModuleMap"),
+ ("read_only_space", 0x06515): (172, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x0653d): (173, "StackFrameInfoMap"),
+ ("read_only_space", 0x06565): (174, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x0658d): (175, "Tuple2Map"),
+ ("read_only_space", 0x065b5): (176, "WasmContinuationObjectMap"),
+ ("read_only_space", 0x065dd): (177, "WasmExceptionTagMap"),
+ ("read_only_space", 0x06605): (178, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x0662d): (198, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x06655): (233, "DescriptorArrayMap"),
+ ("read_only_space", 0x0667d): (219, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x066a5): (217, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x066cd): (220, "UncompiledDataWithoutPreparseDataWithJobMap"),
+ ("read_only_space", 0x066f5): (218, "UncompiledDataWithPreparseDataAndJobMap"),
+ ("read_only_space", 0x0671d): (252, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x06745): (199, "TurbofanBitsetTypeMap"),
+ ("read_only_space", 0x0676d): (203, "TurbofanUnionTypeMap"),
+ ("read_only_space", 0x06795): (202, "TurbofanRangeTypeMap"),
+ ("read_only_space", 0x067bd): (200, "TurbofanHeapConstantTypeMap"),
+ ("read_only_space", 0x067e5): (201, "TurbofanOtherNumberConstantTypeMap"),
+ ("read_only_space", 0x0680d): (248, "InternalClassMap"),
+ ("read_only_space", 0x06835): (259, "SmiPairMap"),
+ ("read_only_space", 0x0685d): (258, "SmiBoxMap"),
+ ("read_only_space", 0x06885): (225, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x068ad): (226, "ExportedSubClassMap"),
+ ("read_only_space", 0x068d5): (231, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x068fd): (232, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x06925): (197, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x0694d): (249, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x06975): (227, "ExportedSubClass2Map"),
+ ("read_only_space", 0x0699d): (260, "SortStateMap"),
+ ("read_only_space", 0x069c5): (146, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x069ed): (146, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x06a15): (137, "LoadHandler1Map"),
+ ("read_only_space", 0x06a3d): (137, "LoadHandler2Map"),
+ ("read_only_space", 0x06a65): (137, "LoadHandler3Map"),
+ ("read_only_space", 0x06a8d): (138, "StoreHandler0Map"),
+ ("read_only_space", 0x06ab5): (138, "StoreHandler1Map"),
+ ("read_only_space", 0x06add): (138, "StoreHandler2Map"),
+ ("read_only_space", 0x06b05): (138, "StoreHandler3Map"),
+ ("map_space", 0x02149): (2113, "ExternalMap"),
+ ("map_space", 0x02171): (2117, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("read_only_space", 0x021f1): "EmptyWeakArrayList",
- ("read_only_space", 0x021fd): "EmptyDescriptorArray",
- ("read_only_space", 0x02235): "EmptyEnumCache",
- ("read_only_space", 0x02269): "EmptyFixedArray",
- ("read_only_space", 0x02271): "NullValue",
- ("read_only_space", 0x02379): "UninitializedValue",
- ("read_only_space", 0x023f1): "UndefinedValue",
- ("read_only_space", 0x02435): "NanValue",
- ("read_only_space", 0x02469): "TheHoleValue",
- ("read_only_space", 0x02495): "HoleNanValue",
- ("read_only_space", 0x024c9): "TrueValue",
- ("read_only_space", 0x02509): "FalseValue",
- ("read_only_space", 0x02539): "empty_string",
- ("read_only_space", 0x02775): "EmptyScopeInfo",
- ("read_only_space", 0x027ad): "ArgumentsMarker",
- ("read_only_space", 0x0280d): "Exception",
- ("read_only_space", 0x02869): "TerminationException",
- ("read_only_space", 0x028d1): "OptimizedOut",
- ("read_only_space", 0x02931): "StaleRegister",
- ("read_only_space", 0x03431): "EmptyPropertyArray",
- ("read_only_space", 0x03439): "EmptyByteArray",
- ("read_only_space", 0x03441): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x03475): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x03481): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x03489): "EmptySlowElementDictionary",
- ("read_only_space", 0x034ad): "EmptyOrderedHashMap",
- ("read_only_space", 0x034c1): "EmptyOrderedHashSet",
- ("read_only_space", 0x034d5): "EmptyFeedbackMetadata",
- ("read_only_space", 0x034e1): "EmptyPropertyDictionary",
- ("read_only_space", 0x03509): "EmptyOrderedPropertyDictionary",
- ("read_only_space", 0x03521): "EmptySwissPropertyDictionary",
- ("read_only_space", 0x03575): "NoOpInterceptorInfo",
- ("read_only_space", 0x0359d): "EmptyArrayList",
- ("read_only_space", 0x035a9): "EmptyWeakFixedArray",
- ("read_only_space", 0x035b1): "InfinityValue",
- ("read_only_space", 0x035bd): "MinusZeroValue",
- ("read_only_space", 0x035c9): "MinusInfinityValue",
- ("read_only_space", 0x035d5): "SelfReferenceMarker",
- ("read_only_space", 0x03615): "BasicBlockCountersMarker",
- ("read_only_space", 0x03659): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x03665): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x03695): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x036b9): "NativeScopeInfo",
- ("read_only_space", 0x036d1): "HashSeed",
- ("old_space", 0x0421d): "ArgumentsIteratorAccessor",
- ("old_space", 0x04261): "ArrayLengthAccessor",
- ("old_space", 0x042a5): "BoundFunctionLengthAccessor",
- ("old_space", 0x042e9): "BoundFunctionNameAccessor",
- ("old_space", 0x0432d): "ErrorStackAccessor",
- ("old_space", 0x04371): "FunctionArgumentsAccessor",
- ("old_space", 0x043b5): "FunctionCallerAccessor",
- ("old_space", 0x043f9): "FunctionNameAccessor",
- ("old_space", 0x0443d): "FunctionLengthAccessor",
- ("old_space", 0x04481): "FunctionPrototypeAccessor",
- ("old_space", 0x044c5): "StringLengthAccessor",
- ("old_space", 0x04509): "InvalidPrototypeValidityCell",
- ("old_space", 0x04511): "EmptyScript",
- ("old_space", 0x04551): "ManyClosuresCell",
- ("old_space", 0x0455d): "ArrayConstructorProtector",
- ("old_space", 0x04571): "NoElementsProtector",
- ("old_space", 0x04585): "MegaDOMProtector",
- ("old_space", 0x04599): "IsConcatSpreadableProtector",
- ("old_space", 0x045ad): "ArraySpeciesProtector",
- ("old_space", 0x045c1): "TypedArraySpeciesProtector",
- ("old_space", 0x045d5): "PromiseSpeciesProtector",
- ("old_space", 0x045e9): "RegExpSpeciesProtector",
- ("old_space", 0x045fd): "StringLengthProtector",
- ("old_space", 0x04611): "ArrayIteratorProtector",
- ("old_space", 0x04625): "ArrayBufferDetachingProtector",
- ("old_space", 0x04639): "PromiseHookProtector",
- ("old_space", 0x0464d): "PromiseResolveProtector",
- ("old_space", 0x04661): "MapIteratorProtector",
- ("old_space", 0x04675): "PromiseThenProtector",
- ("old_space", 0x04689): "SetIteratorProtector",
- ("old_space", 0x0469d): "StringIteratorProtector",
- ("old_space", 0x046b1): "SingleCharacterStringCache",
- ("old_space", 0x04ab9): "StringSplitCache",
- ("old_space", 0x04ec1): "RegExpMultipleCache",
- ("old_space", 0x052c9): "BuiltinsConstantsTable",
- ("old_space", 0x056f5): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x05719): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x0573d): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x05761): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x05785): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x057a9): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x057cd): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x057f1): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x05815): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x05839): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x0585d): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x05881): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x058a5): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x058c9): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x058ed): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x05911): "PromiseCatchFinallySharedFun",
- ("old_space", 0x05935): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x05959): "PromiseThenFinallySharedFun",
- ("old_space", 0x0597d): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x059a1): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x059c5): "ProxyRevokeSharedFun",
+ ("read_only_space", 0x021e9): "EmptyWeakArrayList",
+ ("read_only_space", 0x021f5): "EmptyDescriptorArray",
+ ("read_only_space", 0x0222d): "EmptyEnumCache",
+ ("read_only_space", 0x02261): "EmptyFixedArray",
+ ("read_only_space", 0x02269): "NullValue",
+ ("read_only_space", 0x02371): "UninitializedValue",
+ ("read_only_space", 0x023e9): "UndefinedValue",
+ ("read_only_space", 0x0242d): "NanValue",
+ ("read_only_space", 0x02461): "TheHoleValue",
+ ("read_only_space", 0x0248d): "HoleNanValue",
+ ("read_only_space", 0x024c1): "TrueValue",
+ ("read_only_space", 0x02501): "FalseValue",
+ ("read_only_space", 0x02531): "empty_string",
+ ("read_only_space", 0x0276d): "EmptyScopeInfo",
+ ("read_only_space", 0x027a5): "ArgumentsMarker",
+ ("read_only_space", 0x02805): "Exception",
+ ("read_only_space", 0x02861): "TerminationException",
+ ("read_only_space", 0x028c9): "OptimizedOut",
+ ("read_only_space", 0x02929): "StaleRegister",
+ ("read_only_space", 0x03429): "EmptyPropertyArray",
+ ("read_only_space", 0x03431): "EmptyByteArray",
+ ("read_only_space", 0x03439): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x0346d): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x03479): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x03481): "EmptySlowElementDictionary",
+ ("read_only_space", 0x034a5): "EmptyOrderedHashMap",
+ ("read_only_space", 0x034b9): "EmptyOrderedHashSet",
+ ("read_only_space", 0x034cd): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x034d9): "EmptyPropertyDictionary",
+ ("read_only_space", 0x03501): "EmptyOrderedPropertyDictionary",
+ ("read_only_space", 0x03519): "EmptySwissPropertyDictionary",
+ ("read_only_space", 0x0356d): "NoOpInterceptorInfo",
+ ("read_only_space", 0x03595): "EmptyArrayList",
+ ("read_only_space", 0x035a1): "EmptyWeakFixedArray",
+ ("read_only_space", 0x035a9): "InfinityValue",
+ ("read_only_space", 0x035b5): "MinusZeroValue",
+ ("read_only_space", 0x035c1): "MinusInfinityValue",
+ ("read_only_space", 0x035cd): "SelfReferenceMarker",
+ ("read_only_space", 0x0360d): "BasicBlockCountersMarker",
+ ("read_only_space", 0x03651): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x0365d): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x0368d): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x036b1): "NativeScopeInfo",
+ ("read_only_space", 0x036c9): "HashSeed",
+ ("old_space", 0x04215): "ArgumentsIteratorAccessor",
+ ("old_space", 0x04259): "ArrayLengthAccessor",
+ ("old_space", 0x0429d): "BoundFunctionLengthAccessor",
+ ("old_space", 0x042e1): "BoundFunctionNameAccessor",
+ ("old_space", 0x04325): "ErrorStackAccessor",
+ ("old_space", 0x04369): "FunctionArgumentsAccessor",
+ ("old_space", 0x043ad): "FunctionCallerAccessor",
+ ("old_space", 0x043f1): "FunctionNameAccessor",
+ ("old_space", 0x04435): "FunctionLengthAccessor",
+ ("old_space", 0x04479): "FunctionPrototypeAccessor",
+ ("old_space", 0x044bd): "StringLengthAccessor",
+ ("old_space", 0x04501): "WrappedFunctionLengthAccessor",
+ ("old_space", 0x04545): "WrappedFunctionNameAccessor",
+ ("old_space", 0x04589): "InvalidPrototypeValidityCell",
+ ("old_space", 0x04591): "EmptyScript",
+ ("old_space", 0x045d1): "ManyClosuresCell",
+ ("old_space", 0x045dd): "ArrayConstructorProtector",
+ ("old_space", 0x045f1): "NoElementsProtector",
+ ("old_space", 0x04605): "MegaDOMProtector",
+ ("old_space", 0x04619): "IsConcatSpreadableProtector",
+ ("old_space", 0x0462d): "ArraySpeciesProtector",
+ ("old_space", 0x04641): "TypedArraySpeciesProtector",
+ ("old_space", 0x04655): "PromiseSpeciesProtector",
+ ("old_space", 0x04669): "RegExpSpeciesProtector",
+ ("old_space", 0x0467d): "StringLengthProtector",
+ ("old_space", 0x04691): "ArrayIteratorProtector",
+ ("old_space", 0x046a5): "ArrayBufferDetachingProtector",
+ ("old_space", 0x046b9): "PromiseHookProtector",
+ ("old_space", 0x046cd): "PromiseResolveProtector",
+ ("old_space", 0x046e1): "MapIteratorProtector",
+ ("old_space", 0x046f5): "PromiseThenProtector",
+ ("old_space", 0x04709): "SetIteratorProtector",
+ ("old_space", 0x0471d): "StringIteratorProtector",
+ ("old_space", 0x04731): "SingleCharacterStringCache",
+ ("old_space", 0x04b39): "StringSplitCache",
+ ("old_space", 0x04f41): "RegExpMultipleCache",
+ ("old_space", 0x05349): "BuiltinsConstantsTable",
+ ("old_space", 0x05775): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x05799): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x057bd): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x057e1): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x05805): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x05829): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x0584d): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x05871): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x05895): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x058b9): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x058dd): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x05901): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x05925): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x05949): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x0596d): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x05991): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x059b5): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x059d9): "PromiseThenFinallySharedFun",
+ ("old_space", 0x059fd): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x05a21): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x05a45): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.