summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/.ycm_extra_conf.py4
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/BUILD.bazel59
-rw-r--r--deps/v8/BUILD.gn97
-rw-r--r--deps/v8/DEPS60
-rw-r--r--deps/v8/OWNERS10
-rw-r--r--deps/v8/WATCHLISTS22
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h46
-rw-r--r--deps/v8/bazel/defs.bzl59
-rw-r--r--deps/v8/include/cppgc/allocation.h1
-rw-r--r--deps/v8/include/cppgc/cross-thread-persistent.h45
-rw-r--r--deps/v8/include/cppgc/heap-consistency.h17
-rw-r--r--deps/v8/include/cppgc/internal/persistent-node.h14
-rw-r--r--deps/v8/include/cppgc/internal/write-barrier.h45
-rw-r--r--deps/v8/include/cppgc/liveness-broker.h5
-rw-r--r--deps/v8/include/cppgc/member.h2
-rw-r--r--deps/v8/include/cppgc/persistent.h8
-rw-r--r--deps/v8/include/cppgc/platform.h1
-rw-r--r--deps/v8/include/cppgc/visitor.h16
-rw-r--r--deps/v8/include/js_protocol.pdl2
-rw-r--r--deps/v8/include/v8-fast-api-calls.h63
-rw-r--r--deps/v8/include/v8-inspector.h3
-rw-r--r--deps/v8/include/v8-internal.h17
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h185
-rw-r--r--deps/v8/include/v8config.h49
-rw-r--r--deps/v8/infra/mb/mb_config.pyl15
-rw-r--r--deps/v8/infra/playground/OWNERS5
-rw-r--r--deps/v8/infra/playground/README.md1
-rw-r--r--deps/v8/infra/testing/builders.pyl4
-rw-r--r--deps/v8/src/DEPS1
-rw-r--r--deps/v8/src/api/api-arguments-inl.h3
-rw-r--r--deps/v8/src/api/api-inl.h31
-rw-r--r--deps/v8/src/api/api-natives.cc14
-rw-r--r--deps/v8/src/api/api.cc222
-rw-r--r--deps/v8/src/base/atomicops.h37
-rw-r--r--deps/v8/src/base/bits.h4
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc53
-rw-r--r--deps/v8/src/base/build_config.h7
-rw-r--r--deps/v8/src/base/page-allocator.cc2
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc18
-rw-r--r--deps/v8/src/base/platform/condition-variable.h2
-rw-r--r--deps/v8/src/base/platform/elapsed-timer.h93
-rw-r--r--deps/v8/src/base/platform/mutex.cc65
-rw-r--r--deps/v8/src/base/platform/mutex.h13
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc6
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc2
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc35
-rw-r--r--deps/v8/src/base/platform/platform.h11
-rw-r--r--deps/v8/src/base/platform/semaphore.cc2
-rw-r--r--deps/v8/src/base/platform/time.cc26
-rw-r--r--deps/v8/src/base/sys-info.cc2
-rw-r--r--deps/v8/src/base/win32-headers.h108
-rw-r--r--deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h3
-rw-r--r--deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h3
-rw-r--r--deps/v8/src/baseline/baseline-assembler-inl.h9
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h5
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc27
-rw-r--r--deps/v8/src/baseline/baseline-compiler.h9
-rw-r--r--deps/v8/src/baseline/baseline.cc5
-rw-r--r--deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h3
-rw-r--r--deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h2
-rw-r--r--deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h3
-rw-r--r--deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h3
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h3
-rw-r--r--deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h6
-rw-r--r--deps/v8/src/bigint/bigint-internal.cc26
-rw-r--r--deps/v8/src/bigint/bigint-internal.h31
-rw-r--r--deps/v8/src/bigint/bigint.h218
-rw-r--r--deps/v8/src/bigint/div-barrett.cc366
-rw-r--r--deps/v8/src/bigint/fromstring.cc72
-rw-r--r--deps/v8/src/bigint/mul-karatsuba.cc2
-rw-r--r--deps/v8/src/bigint/tostring.cc313
-rw-r--r--deps/v8/src/bigint/vector-arithmetic.h3
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc111
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc90
-rw-r--r--deps/v8/src/builtins/array-concat.tq49
-rw-r--r--deps/v8/src/builtins/array-findlast.tq110
-rw-r--r--deps/v8/src/builtins/array-findlastindex.tq111
-rw-r--r--deps/v8/src/builtins/arraybuffer.tq114
-rw-r--r--deps/v8/src/builtins/base.tq19
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc101
-rw-r--r--deps/v8/src/builtins/builtins-console.cc24
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h15
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc28
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc26
-rw-r--r--deps/v8/src/builtins/builtins-utils.h16
-rw-r--r--deps/v8/src/builtins/cast.tq9
-rw-r--r--deps/v8/src/builtins/conversion.tq2
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc150
-rw-r--r--deps/v8/src/builtins/iterator.tq2
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc81
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc81
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc84
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc85
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc44
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq8
-rw-r--r--deps/v8/src/builtins/typed-array-findlast.tq112
-rw-r--r--deps/v8/src/builtins/typed-array-findlastindex.tq115
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq7
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq7
-rw-r--r--deps/v8/src/builtins/typed-array.tq2
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc168
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc46
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h21
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc90
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h17
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc57
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h29
-rw-r--r--deps/v8/src/codegen/arm64/cpu-arm64.cc4
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc60
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h9
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc33
-rw-r--r--deps/v8/src/codegen/assembler.h14
-rw-r--r--deps/v8/src/codegen/bailout-reason.h2
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc84
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h7
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc68
-rw-r--r--deps/v8/src/codegen/compilation-cache.h12
-rw-r--r--deps/v8/src/codegen/compiler.cc167
-rw-r--r--deps/v8/src/codegen/compiler.h49
-rw-r--r--deps/v8/src/codegen/constant-pool.cc12
-rw-r--r--deps/v8/src/codegen/constant-pool.h3
-rw-r--r--deps/v8/src/codegen/cpu-features.h12
-rw-r--r--deps/v8/src/codegen/external-reference.cc21
-rw-r--r--deps/v8/src/codegen/external-reference.h8
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h15
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc32
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h21
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc139
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h33
-rw-r--r--deps/v8/src/codegen/machine-type.h9
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc40
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h22
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h2
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc72
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h10
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc36
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h22
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc83
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h10
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc114
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h78
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h162
-rw-r--r--deps/v8/src/codegen/ppc/cpu-ppc.cc2
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc451
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h132
-rw-r--r--deps/v8/src/codegen/reloc-info.cc12
-rw-r--r--deps/v8/src/codegen/reloc-info.h10
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc115
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h39
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h2
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc396
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h32
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc21
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h14
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc410
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h142
-rw-r--r--deps/v8/src/codegen/script-details.h39
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc85
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h9
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h11
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc52
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h21
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc188
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h154
-rw-r--r--deps/v8/src/common/globals.h33
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS1
-rw-r--r--deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc (renamed from deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc)85
-rw-r--r--deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h (renamed from deps/v8/src/compiler-dispatcher/compiler-dispatcher.h)61
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc16
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h12
-rw-r--r--deps/v8/src/compiler/access-info.cc749
-rw-r--r--deps/v8/src/compiler/access-info.h160
-rw-r--r--deps/v8/src/compiler/allocation-builder-inl.h1
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc26
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc11
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h6
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc33
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc44
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc25
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc32
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h24
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc13
-rw-r--r--deps/v8/src/compiler/backend/instruction.h38
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc11
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc11
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc218
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc16
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc719
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc4
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc52
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc16
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc86
-rw-r--r--deps/v8/src/compiler/branch-elimination.h36
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc226
-rw-r--r--deps/v8/src/compiler/code-assembler.h2
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc241
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h15
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h1
-rw-r--r--deps/v8/src/compiler/compiler-source-position-table.h6
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc306
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h66
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc153
-rw-r--r--deps/v8/src/compiler/fast-api-calls.cc1
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc9
-rw-r--r--deps/v8/src/compiler/graph-assembler.h1
-rw-r--r--deps/v8/src/compiler/heap-refs.cc2065
-rw-r--r--deps/v8/src/compiler/heap-refs.h285
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc354
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h10
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc41
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc505
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc402
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h99
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc66
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc27
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h1
-rw-r--r--deps/v8/src/compiler/js-inlining.cc35
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc360
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h11
-rw-r--r--deps/v8/src/compiler/js-operator.cc157
-rw-r--r--deps/v8/src/compiler/js-operator.h275
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc43
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h2
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc10
-rw-r--r--deps/v8/src/compiler/loop-analysis.h17
-rw-r--r--deps/v8/src/compiler/machine-operator.cc4
-rw-r--r--deps/v8/src/compiler/map-inference.cc36
-rw-r--r--deps/v8/src/compiler/map-inference.h8
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc129
-rw-r--r--deps/v8/src/compiler/memory-lowering.h3
-rw-r--r--deps/v8/src/compiler/node-aux-data.h24
-rw-r--r--deps/v8/src/compiler/node-origin-table.h5
-rw-r--r--deps/v8/src/compiler/node-properties.cc58
-rw-r--r--deps/v8/src/compiler/node-properties.h11
-rw-r--r--deps/v8/src/compiler/persistent-map.h6
-rw-r--r--deps/v8/src/compiler/pipeline.cc87
-rw-r--r--deps/v8/src/compiler/processed-feedback.h14
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc111
-rw-r--r--deps/v8/src/compiler/property-access-builder.h27
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc3605
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h40
-rw-r--r--deps/v8/src/compiler/serializer-hints.h175
-rw-r--r--deps/v8/src/compiler/typer.cc4
-rw-r--r--deps/v8/src/compiler/types.cc2
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc66
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h7
-rw-r--r--deps/v8/src/d8/d8-test.cc152
-rw-r--r--deps/v8/src/d8/d8.cc24
-rw-r--r--deps/v8/src/debug/debug-coverage.cc5
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc8
-rw-r--r--deps/v8/src/debug/debug-frames.cc1
-rw-r--r--deps/v8/src/debug/debug-interface.cc8
-rw-r--r--deps/v8/src/debug/debug-interface.h2
-rw-r--r--deps/v8/src/debug/debug-property-iterator.cc23
-rw-r--r--deps/v8/src/debug/debug-property-iterator.h2
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.cc10
-rw-r--r--deps/v8/src/debug/debug.cc14
-rw-r--r--deps/v8/src/debug/debug.h29
-rw-r--r--deps/v8/src/debug/liveedit.cc3
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc8
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc30
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h8
-rw-r--r--deps/v8/src/diagnostics/arm/unwinder-arm.cc3
-rw-r--r--deps/v8/src/diagnostics/arm64/unwinder-arm64.cc2
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc27
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.cc1
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.h5
-rw-r--r--deps/v8/src/diagnostics/ia32/unwinder-ia32.cc2
-rw-r--r--deps/v8/src/diagnostics/mips/unwinder-mips.cc2
-rw-r--r--deps/v8/src/diagnostics/mips64/unwinder-mips64.cc2
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc31
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc12
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc49
-rw-r--r--deps/v8/src/diagnostics/ppc/unwinder-ppc.cc3
-rw-r--r--deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc2
-rw-r--r--deps/v8/src/diagnostics/s390/unwinder-s390.cc3
-rw-r--r--deps/v8/src/diagnostics/system-jit-win.cc1
-rw-r--r--deps/v8/src/diagnostics/system-jit-win.h5
-rw-r--r--deps/v8/src/diagnostics/unwinder.cc2
-rw-r--r--deps/v8/src/diagnostics/unwinder.h5
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc32
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc5
-rw-r--r--deps/v8/src/diagnostics/x64/unwinder-x64.cc2
-rw-r--r--deps/v8/src/execution/arm64/pointer-authentication-arm64.h4
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc35
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.h14
-rw-r--r--deps/v8/src/execution/frames.cc4
-rw-r--r--deps/v8/src/execution/isolate-inl.h28
-rw-r--r--deps/v8/src/execution/isolate.cc83
-rw-r--r--deps/v8/src/execution/isolate.h64
-rw-r--r--deps/v8/src/execution/local-isolate-inl.h5
-rw-r--r--deps/v8/src/execution/local-isolate.cc18
-rw-r--r--deps/v8/src/execution/local-isolate.h28
-rw-r--r--deps/v8/src/execution/microtask-queue.cc42
-rw-r--r--deps/v8/src/execution/microtask-queue.h20
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc24
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc24
-rw-r--r--deps/v8/src/execution/pointer-authentication-dummy.h6
-rw-r--r--deps/v8/src/execution/pointer-authentication.h3
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h2
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc107
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc77
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.h7
-rw-r--r--deps/v8/src/execution/thread-local-top.cc2
-rw-r--r--deps/v8/src/execution/thread-local-top.h2
-rw-r--r--deps/v8/src/execution/vm-state-inl.h4
-rw-r--r--deps/v8/src/execution/vm-state.h3
-rw-r--r--deps/v8/src/flags/flag-definitions.h60
-rw-r--r--deps/v8/src/heap/combined-heap.h17
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc49
-rw-r--r--deps/v8/src/heap/concurrent-marking.h12
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc8
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h2
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc31
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.cc2
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h6
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc39
-rw-r--r--deps/v8/src/heap/cppgc/process-heap.h1
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc19
-rw-r--r--deps/v8/src/heap/factory.cc77
-rw-r--r--deps/v8/src/heap/factory.h4
-rw-r--r--deps/v8/src/heap/gc-tracer.cc3
-rw-r--r--deps/v8/src/heap/heap-inl.h59
-rw-r--r--deps/v8/src/heap/heap.cc161
-rw-r--r--deps/v8/src/heap/heap.h38
-rw-r--r--deps/v8/src/heap/incremental-marking.cc11
-rw-r--r--deps/v8/src/heap/incremental-marking.h4
-rw-r--r--deps/v8/src/heap/linear-allocation-area.h117
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h3
-rw-r--r--deps/v8/src/heap/local-factory-inl.h4
-rw-r--r--deps/v8/src/heap/local-heap-inl.h8
-rw-r--r--deps/v8/src/heap/local-heap.h8
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h17
-rw-r--r--deps/v8/src/heap/mark-compact.cc413
-rw-r--r--deps/v8/src/heap/mark-compact.h31
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h59
-rw-r--r--deps/v8/src/heap/marking-visitor.h13
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h1
-rw-r--r--deps/v8/src/heap/memory-chunk.cc15
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h16
-rw-r--r--deps/v8/src/heap/new-spaces.cc14
-rw-r--r--deps/v8/src/heap/object-stats.cc6
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h38
-rw-r--r--deps/v8/src/heap/paged-spaces.h2
-rw-r--r--deps/v8/src/heap/parked-scope.h2
-rw-r--r--deps/v8/src/heap/remembered-set.h5
-rw-r--r--deps/v8/src/heap/safepoint.cc1
-rw-r--r--deps/v8/src/heap/scavenger-inl.h8
-rw-r--r--deps/v8/src/heap/scavenger.cc15
-rw-r--r--deps/v8/src/heap/scavenger.h1
-rw-r--r--deps/v8/src/heap/spaces-inl.h29
-rw-r--r--deps/v8/src/heap/spaces.cc2
-rw-r--r--deps/v8/src/heap/spaces.h57
-rw-r--r--deps/v8/src/heap/sweeper.cc7
-rw-r--r--deps/v8/src/heap/sweeper.h1
-rw-r--r--deps/v8/src/heap/third-party/heap-api-stub.cc6
-rw-r--r--deps/v8/src/heap/third-party/heap-api.h4
-rw-r--r--deps/v8/src/heap/weak-object-worklists.cc21
-rw-r--r--deps/v8/src/heap/weak-object-worklists.h4
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h6
-rw-r--r--deps/v8/src/ic/handler-configuration.h1
-rw-r--r--deps/v8/src/ic/ic.cc53
-rw-r--r--deps/v8/src/ic/ic.h3
-rw-r--r--deps/v8/src/init/OWNERS1
-rw-r--r--deps/v8/src/init/bootstrapper.cc222
-rw-r--r--deps/v8/src/init/heap-symbols.h441
-rw-r--r--deps/v8/src/init/v8.cc6
-rw-r--r--deps/v8/src/inspector/v8-console.cc6
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc11
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc5
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h3
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc53
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h7
-rw-r--r--deps/v8/src/inspector/value-mirror.cc55
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc4
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc10
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc6
-rw-r--r--deps/v8/src/libsampler/sampler.cc2
-rw-r--r--deps/v8/src/logging/counters-definitions.h58
-rw-r--r--deps/v8/src/logging/counters-scopes.h191
-rw-r--r--deps/v8/src/logging/counters.cc67
-rw-r--r--deps/v8/src/logging/counters.h230
-rw-r--r--deps/v8/src/logging/local-logger.cc11
-rw-r--r--deps/v8/src/logging/local-logger.h6
-rw-r--r--deps/v8/src/logging/log-inl.h13
-rw-r--r--deps/v8/src/logging/log-utils.cc2
-rw-r--r--deps/v8/src/logging/log.cc100
-rw-r--r--deps/v8/src/logging/log.h60
-rw-r--r--deps/v8/src/logging/runtime-call-stats.h1
-rw-r--r--deps/v8/src/logging/tracing-flags.h2
-rw-r--r--deps/v8/src/numbers/conversions.cc317
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h3
-rw-r--r--deps/v8/src/objects/allocation-site.tq1
-rw-r--r--deps/v8/src/objects/api-callbacks.tq4
-rw-r--r--deps/v8/src/objects/arguments.tq7
-rw-r--r--deps/v8/src/objects/backing-store.cc36
-rw-r--r--deps/v8/src/objects/backing-store.h21
-rw-r--r--deps/v8/src/objects/bigint.cc170
-rw-r--r--deps/v8/src/objects/bigint.h15
-rw-r--r--deps/v8/src/objects/bigint.tq5
-rw-r--r--deps/v8/src/objects/cell.tq1
-rw-r--r--deps/v8/src/objects/code-inl.h25
-rw-r--r--deps/v8/src/objects/code.cc10
-rw-r--r--deps/v8/src/objects/code.h46
-rw-r--r--deps/v8/src/objects/code.tq12
-rw-r--r--deps/v8/src/objects/contexts-inl.h17
-rw-r--r--deps/v8/src/objects/contexts.cc3
-rw-r--r--deps/v8/src/objects/contexts.h11
-rw-r--r--deps/v8/src/objects/data-handler-inl.h7
-rw-r--r--deps/v8/src/objects/data-handler.h18
-rw-r--r--deps/v8/src/objects/data-handler.tq12
-rw-r--r--deps/v8/src/objects/debug-objects.tq4
-rw-r--r--deps/v8/src/objects/descriptor-array.tq1
-rw-r--r--deps/v8/src/objects/elements-kind.h6
-rw-r--r--deps/v8/src/objects/elements.cc2
-rw-r--r--deps/v8/src/objects/embedder-data-array.tq1
-rw-r--r--deps/v8/src/objects/feedback-cell.tq1
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h11
-rw-r--r--deps/v8/src/objects/feedback-vector.cc20
-rw-r--r--deps/v8/src/objects/feedback-vector.h9
-rw-r--r--deps/v8/src/objects/feedback-vector.tq1
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h12
-rw-r--r--deps/v8/src/objects/fixed-array.h15
-rw-r--r--deps/v8/src/objects/fixed-array.tq12
-rw-r--r--deps/v8/src/objects/foreign.tq1
-rw-r--r--deps/v8/src/objects/free-space.tq1
-rw-r--r--deps/v8/src/objects/heap-number.tq1
-rw-r--r--deps/v8/src/objects/heap-object.h3
-rw-r--r--deps/v8/src/objects/heap-object.tq1
-rw-r--r--deps/v8/src/objects/instance-type.h3
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h34
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc12
-rw-r--r--deps/v8/src/objects/js-array-buffer.h2
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq4
-rw-r--r--deps/v8/src/objects/js-array.tq6
-rw-r--r--deps/v8/src/objects/js-break-iterator.tq1
-rw-r--r--deps/v8/src/objects/js-collator.tq1
-rw-r--r--deps/v8/src/objects/js-collection-iterator.tq1
-rw-r--r--deps/v8/src/objects/js-collection.tq18
-rw-r--r--deps/v8/src/objects/js-date-time-format.tq1
-rw-r--r--deps/v8/src/objects/js-display-names.cc14
-rw-r--r--deps/v8/src/objects/js-display-names.tq1
-rw-r--r--deps/v8/src/objects/js-function-inl.h62
-rw-r--r--deps/v8/src/objects/js-function.cc2
-rw-r--r--deps/v8/src/objects/js-function.h19
-rw-r--r--deps/v8/src/objects/js-function.tq3
-rw-r--r--deps/v8/src/objects/js-generator.tq4
-rw-r--r--deps/v8/src/objects/js-list-format.tq1
-rw-r--r--deps/v8/src/objects/js-locale.cc32
-rw-r--r--deps/v8/src/objects/js-locale.tq1
-rw-r--r--deps/v8/src/objects/js-number-format.tq1
-rw-r--r--deps/v8/src/objects/js-objects-inl.h5
-rw-r--r--deps/v8/src/objects/js-objects.cc48
-rw-r--r--deps/v8/src/objects/js-objects.h7
-rw-r--r--deps/v8/src/objects/js-objects.tq15
-rw-r--r--deps/v8/src/objects/js-plural-rules.tq1
-rw-r--r--deps/v8/src/objects/js-promise.tq1
-rw-r--r--deps/v8/src/objects/js-proxy.tq2
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.tq1
-rw-r--r--deps/v8/src/objects/js-regexp.h13
-rw-r--r--deps/v8/src/objects/js-regexp.tq4
-rw-r--r--deps/v8/src/objects/js-relative-time-format.tq1
-rw-r--r--deps/v8/src/objects/js-segment-iterator.tq1
-rw-r--r--deps/v8/src/objects/js-segmenter.tq1
-rw-r--r--deps/v8/src/objects/js-segments.tq1
-rw-r--r--deps/v8/src/objects/js-weak-refs.tq7
-rw-r--r--deps/v8/src/objects/keys.cc16
-rw-r--r--deps/v8/src/objects/keys.h5
-rw-r--r--deps/v8/src/objects/literal-objects.cc25
-rw-r--r--deps/v8/src/objects/literal-objects.tq2
-rw-r--r--deps/v8/src/objects/lookup.cc71
-rw-r--r--deps/v8/src/objects/lookup.h7
-rw-r--r--deps/v8/src/objects/map-inl.h15
-rw-r--r--deps/v8/src/objects/map-updater.cc122
-rw-r--r--deps/v8/src/objects/map-updater.h6
-rw-r--r--deps/v8/src/objects/map.cc263
-rw-r--r--deps/v8/src/objects/map.h42
-rw-r--r--deps/v8/src/objects/map.tq2
-rw-r--r--deps/v8/src/objects/megadom-handler.tq1
-rw-r--r--deps/v8/src/objects/microtask.tq3
-rw-r--r--deps/v8/src/objects/module-inl.h10
-rw-r--r--deps/v8/src/objects/module.cc45
-rw-r--r--deps/v8/src/objects/module.h36
-rw-r--r--deps/v8/src/objects/module.tq10
-rw-r--r--deps/v8/src/objects/name-inl.h9
-rw-r--r--deps/v8/src/objects/name.h6
-rw-r--r--deps/v8/src/objects/name.tq2
-rw-r--r--deps/v8/src/objects/object-list-macros.h3
-rw-r--r--deps/v8/src/objects/object-macros-undef.h7
-rw-r--r--deps/v8/src/objects/object-macros.h25
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h47
-rw-r--r--deps/v8/src/objects/objects-inl.h66
-rw-r--r--deps/v8/src/objects/objects.cc40
-rw-r--r--deps/v8/src/objects/objects.h4
-rw-r--r--deps/v8/src/objects/ordered-hash-table.tq4
-rw-r--r--deps/v8/src/objects/primitive-heap-object.tq1
-rw-r--r--deps/v8/src/objects/promise.tq12
-rw-r--r--deps/v8/src/objects/property-array-inl.h5
-rw-r--r--deps/v8/src/objects/property-array.h11
-rw-r--r--deps/v8/src/objects/property-cell-inl.h4
-rw-r--r--deps/v8/src/objects/property-cell.h11
-rw-r--r--deps/v8/src/objects/property-descriptor-object.tq1
-rw-r--r--deps/v8/src/objects/prototype-info.tq1
-rw-r--r--deps/v8/src/objects/regexp-match-info-inl.h68
-rw-r--r--deps/v8/src/objects/regexp-match-info.h10
-rw-r--r--deps/v8/src/objects/scope-info.tq1
-rw-r--r--deps/v8/src/objects/script-inl.h10
-rw-r--r--deps/v8/src/objects/script.h8
-rw-r--r--deps/v8/src/objects/script.tq1
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h106
-rw-r--r--deps/v8/src/objects/shared-function-info.cc3
-rw-r--r--deps/v8/src/objects/shared-function-info.h25
-rw-r--r--deps/v8/src/objects/shared-function-info.tq6
-rw-r--r--deps/v8/src/objects/source-text-module.cc96
-rw-r--r--deps/v8/src/objects/source-text-module.tq3
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc22
-rw-r--r--deps/v8/src/objects/stack-frame-info.tq1
-rw-r--r--deps/v8/src/objects/string-comparator.cc24
-rw-r--r--deps/v8/src/objects/string-comparator.h9
-rw-r--r--deps/v8/src/objects/string-inl.h10
-rw-r--r--deps/v8/src/objects/string-table.cc2
-rw-r--r--deps/v8/src/objects/string.cc77
-rw-r--r--deps/v8/src/objects/string.h45
-rw-r--r--deps/v8/src/objects/string.tq8
-rw-r--r--deps/v8/src/objects/struct-inl.h14
-rw-r--r--deps/v8/src/objects/struct.h11
-rw-r--r--deps/v8/src/objects/struct.tq4
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.cc7
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.h3
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.tq1
-rw-r--r--deps/v8/src/objects/synthetic-module.cc2
-rw-r--r--deps/v8/src/objects/synthetic-module.tq1
-rw-r--r--deps/v8/src/objects/tagged-field.h2
-rw-r--r--deps/v8/src/objects/template-objects.tq2
-rw-r--r--deps/v8/src/objects/templates-inl.h27
-rw-r--r--deps/v8/src/objects/templates.cc15
-rw-r--r--deps/v8/src/objects/templates.h8
-rw-r--r--deps/v8/src/objects/templates.tq8
-rw-r--r--deps/v8/src/objects/transitions-inl.h43
-rw-r--r--deps/v8/src/objects/transitions.cc115
-rw-r--r--deps/v8/src/objects/transitions.h24
-rw-r--r--deps/v8/src/objects/value-serializer.cc3
-rw-r--r--deps/v8/src/objects/visitors.h6
-rw-r--r--deps/v8/src/parsing/parse-info.cc11
-rw-r--r--deps/v8/src/parsing/parse-info.h10
-rw-r--r--deps/v8/src/parsing/parser.cc4
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc4
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc54
-rw-r--r--deps/v8/src/profiler/profile-generator.cc10
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc11
-rw-r--r--deps/v8/src/profiler/strings-storage.cc21
-rw-r--r--deps/v8/src/profiler/strings-storage.h2
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.cc3
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc8
-rw-r--r--deps/v8/src/regexp/regexp-ast.h26
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc117
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc133
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h11
-rw-r--r--deps/v8/src/regexp/regexp-nodes.h25
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc115
-rw-r--r--deps/v8/src/regexp/regexp.cc7
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc13
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc19
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc49
-rw-r--r--deps/v8/src/runtime/runtime-module.cc4
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc2
-rw-r--r--deps/v8/src/runtime/runtime-test-wasm.cc8
-rw-r--r--deps/v8/src/runtime/runtime-test.cc110
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc27
-rw-r--r--deps/v8/src/runtime/runtime.h151
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc298
-rw-r--r--deps/v8/src/snapshot/code-serializer.h46
-rw-r--r--deps/v8/src/snapshot/context-deserializer.h3
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc320
-rw-r--r--deps/v8/src/snapshot/deserializer.h36
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc2
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc60
-rw-r--r--deps/v8/src/snapshot/object-deserializer.h21
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.h2
-rw-r--r--deps/v8/src/snapshot/serializer.cc20
-rw-r--r--deps/v8/src/snapshot/serializer.h1
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h2
-rw-r--r--deps/v8/src/strings/string-stream.cc2
-rw-r--r--deps/v8/src/torque/constants.h3
-rw-r--r--deps/v8/src/torque/torque-parser.cc15
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.cc59
-rw-r--r--deps/v8/src/trap-handler/handler-outside-simulator.cc33
-rw-r--r--deps/v8/src/trap-handler/trap-handler-simulator.h37
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h19
-rw-r--r--deps/v8/src/utils/v8dll-main.cc2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h100
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h95
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h142
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h2
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc3
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h2
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc62
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h46
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h83
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h83
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h370
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h78
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h592
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h137
-rw-r--r--deps/v8/src/wasm/c-api.cc2
-rw-r--r--deps/v8/src/wasm/code-space-access.cc75
-rw-r--r--deps/v8/src/wasm/code-space-access.h45
-rw-r--r--deps/v8/src/wasm/compilation-environment.h3
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h72
-rw-r--r--deps/v8/src/wasm/function-compiler.cc31
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc19
-rw-r--r--deps/v8/src/wasm/module-compiler.cc174
-rw-r--r--deps/v8/src/wasm/module-decoder.cc102
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc118
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc289
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h39
-rw-r--r--deps/v8/src/wasm/wasm-constants.h7
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc11
-rw-r--r--deps/v8/src/wasm/wasm-js.cc485
-rw-r--r--deps/v8/src/wasm/wasm-limits.h2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc4
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h13
-rw-r--r--deps/v8/src/wasm/wasm-module.cc61
-rw-r--r--deps/v8/src/wasm/wasm-module.h24
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h15
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc111
-rw-r--r--deps/v8/src/wasm/wasm-objects.h91
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq29
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc28
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc617
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.h51
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status3
-rw-r--r--deps/v8/test/bigint/BUILD.gn4
-rw-r--r--deps/v8/test/bigint/bigint-shell.cc113
-rw-r--r--deps/v8/test/cctest/BUILD.gn2
-rw-r--r--deps/v8/test/cctest/cctest.cc6
-rw-r--r--deps/v8/test/cctest/cctest.status40
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.cc383
-rw-r--r--deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc51
-rw-r--r--deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction-scheduler.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc25
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc21
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc5
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc2
-rw-r--r--deps/v8/test/cctest/libsampler/signals-and-mutexes.cc2
-rw-r--r--deps/v8/test/cctest/test-api.cc137
-rw-r--r--deps/v8/test/cctest/test-atomicops.cc17
-rw-r--r--deps/v8/test/cctest/test-compiler.cc8
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc8
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc26
-rw-r--r--deps/v8/test/cctest/test-icache.cc24
-rw-r--r--deps/v8/test/cctest/test-serialize.cc254
-rw-r--r--deps/v8/test/cctest/test-stack-unwinding-win64.cc5
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc8
-rw-r--r--deps/v8/test/cctest/test-web-snapshots.cc145
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc48
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc11
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc36
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc38
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h7
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.cc18
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.h2
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc96
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.h2
-rw-r--r--deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js4
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-9067.js2
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc1
-rw-r--r--deps/v8/test/fuzzer/inspector-fuzzer.cc18
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc4
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc167
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc103
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h3
-rw-r--r--deps/v8/test/inspector/BUILD.gn16
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage-block.js2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage.js3
-rw-r--r--deps/v8/test/inspector/debugger/async-instrumentation-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-set-timeout-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt30
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt42
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-imports-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-expected.txt4
-rw-r--r--deps/v8/test/inspector/inspector-test.cc18
-rw-r--r--deps/v8/test/inspector/inspector.status12
-rw-r--r--deps/v8/test/inspector/isolate-data.cc11
-rw-r--r--deps/v8/test/inspector/isolate-data.h1
-rw-r--r--deps/v8/test/inspector/protocol-test.js2
-rw-r--r--deps/v8/test/inspector/runtime/command-line-api-expected.txt9
-rw-r--r--deps/v8/test/inspector/runtime/console-methods-expected.txt66
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-generate-preview-expected.txt45
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js17
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-entries.js2
-rw-r--r--deps/v8/test/inspector/runtime/remote-object-expected.txt103
-rw-r--r--deps/v8/test/inspector/sessions/runtime-command-line-api-expected.txt7
-rw-r--r--deps/v8/test/intl/regress-1224869.js18
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.js5
-rw-r--r--deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.out5
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.js2
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.out6
-rw-r--r--deps/v8/test/message/fail/wasm-exception-throw.js2
-rw-r--r--deps/v8/test/message/fail/wasm-exception-throw.out6
-rw-r--r--deps/v8/test/message/fail/wasm-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-and-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-no-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-trap.out2
-rw-r--r--deps/v8/test/message/wasm-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-streaming.out2
-rw-r--r--deps/v8/test/mjsunit/baseline/batch-compilation.js4
-rw-r--r--deps/v8/test/mjsunit/baseline/flush-baseline-code.js83
-rw-r--r--deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js57
-rw-r--r--deps/v8/test/mjsunit/check-bounds-array-index.js2
-rw-r--r--deps/v8/test/mjsunit/check-bounds-string-from-char-code-at.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-ad-hoc.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-async.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-noopt.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-opt.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-class-fields.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-precise.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/array-slice-clone.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js84
-rw-r--r--deps/v8/test/mjsunit/compiler/catch-block-load.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-proto-change.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-add-static.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/construct-bound-function.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/construct-object.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/construct-receiver.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-detached.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-builtins.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-calls.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-helpers.js36
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js55
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-sequences.js247
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/js-create-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/js-create.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-const-field.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js36
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1125145.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-905555-2.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-905555.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9137-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9137-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9945-1.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9945-2.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-11977.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1230260.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-accessors.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-apply.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-call.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/string-startswith.js13
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js10
-rw-r--r--deps/v8/test/mjsunit/const-dict-tracking.js11
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-unlinked.js2
-rw-r--r--deps/v8/test/mjsunit/es6/classes-constructor.js9
-rw-r--r--deps/v8/test/mjsunit/es9/object-rest-basic.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/array-findlast-unscopables.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/array-prototype-findlast.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/array-prototype-findlastindex.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/div-special-cases.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regress-minuszero.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regress-tostring-2.js32
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regress-tostring.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/error-cause.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-indirection.mjs6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-throwing.mjs5
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error.mjs7
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-top-level-await-cycle-error.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarray-findlast.js226
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarray-findlastindex.js224
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js2
-rw-r--r--deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js26
-rw-r--r--deps/v8/test/mjsunit/ic-migrated-map-add-when-uninitialized.js23
-rw-r--r--deps/v8/test/mjsunit/ic-migrated-map-update-when-deprecated.js27
-rw-r--r--deps/v8/test/mjsunit/interrupt-budget-override.js1
-rw-r--r--deps/v8/test/mjsunit/json-parser-recursive.js4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status93
-rw-r--r--deps/v8/test/mjsunit/never-optimize.js1
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js81
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js74
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-dotall.js27
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers-i18n.js138
-rw-r--r--deps/v8/test/mjsunit/regexp-modifiers.js146
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1034449.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1221035.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1227568.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1230930.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1231901.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1232620.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1235071.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1236303.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1236307.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1236560.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1031479.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1113085.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1227476.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1235182.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1236962.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1237153.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-422858.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8799.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9656.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1188825.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1236958.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1239522.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1242689.js33
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7785.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8094.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8846.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8896.js4
-rw-r--r--deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js306
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-constructor.js19
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-date-add.js95
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js220
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-date-until.js226
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-day-of-week.js80
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-day-of-year.js43
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-day.js17
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-days-in-month.js77
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-days-in-week.js18
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-days-in-year.js57
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-fields.js23
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-from.js23
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-in-leap-year.js57
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-merge-fields.js63
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-month-code.js19
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js238
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-month.js19
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-months-in-year.js22
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-week-of-year.js68
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js144
-rw-r--r--deps/v8/test/mjsunit/temporal/calendar-year.js18
-rw-r--r--deps/v8/test/mjsunit/tools/processor.mjs2
-rw-r--r--deps/v8/test/mjsunit/typedarray-constructor-mixed-bigint.js28
-rw-r--r--deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js225
-rw-r--r--deps/v8/test/mjsunit/typedarray-helpers.js55
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js14
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js369
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-api.js219
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-export.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-externref.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-import.js48
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-rethrow.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-shared.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-type-reflection.js49
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-utils.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js80
-rw-r--r--deps/v8/test/mjsunit/wasm/externref.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-nominal.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/load-elimination.js319
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-unrolling.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/test-partial-serialization.js56
-rw-r--r--deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js43
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js23
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-array-js-interop.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js136
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-struct-js-interop.js4
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot.js56
-rw-r--r--deps/v8/test/test262/test262.status181
-rw-r--r--deps/v8/test/test262/testcfg.py7
-rw-r--r--deps/v8/test/unittests/BUILD.gn10
-rw-r--r--deps/v8/test/unittests/api/access-check-unittest.cc42
-rw-r--r--deps/v8/test/unittests/api/deserialize-unittest.cc236
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc111
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc132
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc7
-rw-r--r--deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc99
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc40
-rw-r--r--deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc7
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-unittest.cc11
-rw-r--r--deps/v8/test/unittests/heap/cppgc/liveness-broker-unittest.cc45
-rw-r--r--deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc92
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-unittest.cc40
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/runtime/runtime-debug-unittest.cc58
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc1
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc54
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc58
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc125
-rw-r--r--deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h1
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.cc232
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.h18
-rw-r--r--deps/v8/third_party/zlib/google/zip_unittest.cc196
-rw-r--r--deps/v8/third_party/zlib/google/zip_writer.cc12
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc2
-rwxr-xr-xdeps/v8/tools/dev/gm.py1
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py2
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py2
-rw-r--r--deps/v8/tools/release/common_includes.py2
-rwxr-xr-xdeps/v8/tools/release/create_release.py5
-rwxr-xr-xdeps/v8/tools/release/list_deprecated.py (renamed from deps/v8/tools/deprecation_stats.py)84
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py2
-rwxr-xr-xdeps/v8/tools/release/roll_merge.py2
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py10
-rw-r--r--deps/v8/tools/testrunner/base_runner.py6
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/variants.py8
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/filter.py8
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py2
-rw-r--r--deps/v8/tools/v8heapconst.py315
-rw-r--r--deps/v8/tools/whitespace.txt2
957 files changed, 26541 insertions, 19325 deletions
diff --git a/deps/v8/.ycm_extra_conf.py b/deps/v8/.ycm_extra_conf.py
index 25d01c1881..6d79c46245 100644
--- a/deps/v8/.ycm_extra_conf.py
+++ b/deps/v8/.ycm_extra_conf.py
@@ -114,7 +114,7 @@ def GetClangCommandFromNinjaForFilename(v8_root, filename):
# should contain most/all of the interesting flags for other targets too.
filename = os.path.join(v8_root, 'src', 'utils', 'utils.cc')
- sys.path.append(os.path.join(v8_root, 'tools', 'ninja'))
+ sys.path.append(os.path.join(v8_root, 'tools', 'vim'))
from ninja_output import GetNinjaOutputDirectory
out_dir = os.path.realpath(GetNinjaOutputDirectory(v8_root))
@@ -133,7 +133,7 @@ def GetClangCommandFromNinjaForFilename(v8_root, filename):
# Ninja might execute several commands to build something. We want the last
# clang command.
clang_line = None
- for line in reversed(stdout.split('\n')):
+ for line in reversed(stdout.decode('utf-8').splitlines()):
if 'clang' in line:
clang_line = line
break
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index ea786ddea4..d9eb05985c 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -86,7 +86,6 @@ Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel Dromboski <dandromb@gmail.com>
Daniel James <dnljms@gmail.com>
-Darshan Sen <raisinten@gmail.com>
David Carlier <devnexen@gmail.com>
David Manouchehri <david@davidmanouchehri.com>
Deepak Mohan <hop2deep@gmail.com>
diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel
index e0d5ce7138..c5b4a94f91 100644
--- a/deps/v8/BUILD.bazel
+++ b/deps/v8/BUILD.bazel
@@ -6,6 +6,7 @@ load("@bazel_skylib//lib:selects.bzl", "selects")
load(
"@v8//:bazel/defs.bzl",
"v8_binary",
+ "v8_build_config",
"v8_config",
"v8_custom_config",
"v8_raw_flag",
@@ -358,6 +359,7 @@ filegroup(
srcs = [
"include/cppgc/allocation.h",
"include/cppgc/common.h",
+ "include/cppgc/cross-thread-persistent.h",
"include/cppgc/custom-space.h",
"include/cppgc/default-platform.h",
"include/cppgc/ephemeron-pair.h",
@@ -610,11 +612,14 @@ filegroup(
srcs = [
"src/builtins/aggregate-error.tq",
"src/builtins/array-at.tq",
+ "src/builtins/array-concat.tq",
"src/builtins/array-copywithin.tq",
"src/builtins/array-every.tq",
"src/builtins/array-filter.tq",
"src/builtins/array-find.tq",
"src/builtins/array-findindex.tq",
+ "src/builtins/array-findlast.tq",
+ "src/builtins/array-findlastindex.tq",
"src/builtins/array-foreach.tq",
"src/builtins/array-from.tq",
"src/builtins/array-isarray.tq",
@@ -716,6 +721,8 @@ filegroup(
"src/builtins/typed-array-filter.tq",
"src/builtins/typed-array-find.tq",
"src/builtins/typed-array-findindex.tq",
+ "src/builtins/typed-array-findlast.tq",
+ "src/builtins/typed-array-findlastindex.tq",
"src/builtins/typed-array-foreach.tq",
"src/builtins/typed-array-from.tq",
"src/builtins/typed-array-keys.tq",
@@ -1018,6 +1025,7 @@ filegroup(
"src/codegen/reloc-info.h",
"src/codegen/safepoint-table.cc",
"src/codegen/safepoint-table.h",
+ "src/codegen/script-details.h",
"src/codegen/signature.h",
"src/codegen/source-position-table.cc",
"src/codegen/source-position-table.h",
@@ -1041,8 +1049,8 @@ filegroup(
"src/common/message-template.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.h",
- "src/compiler-dispatcher/compiler-dispatcher.cc",
- "src/compiler-dispatcher/compiler-dispatcher.h",
+ "src/compiler-dispatcher/lazy-compile-dispatcher.cc",
+ "src/compiler-dispatcher/lazy-compile-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/date/date.cc",
@@ -1253,6 +1261,7 @@ filegroup(
"src/heap/invalidated-slots.h",
"src/heap/large-spaces.cc",
"src/heap/large-spaces.h",
+ "src/heap/linear-allocation-area.h",
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
@@ -1403,6 +1412,7 @@ filegroup(
"src/logging/counters-definitions.h",
"src/logging/counters.cc",
"src/logging/counters.h",
+ "src/logging/counters-scopes.h",
"src/logging/local-logger.cc",
"src/logging/local-logger.h",
"src/logging/log-inl.h",
@@ -1591,6 +1601,7 @@ filegroup(
"src/objects/prototype-info.h",
"src/objects/prototype.h",
"src/objects/prototype-inl.h",
+ "src/objects/regexp-match-info-inl.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info-inl.h",
"src/objects/scope-info.cc",
@@ -2433,9 +2444,6 @@ filegroup(
"src/compiler/scheduler.h",
"src/compiler/select-lowering.cc",
"src/compiler/select-lowering.h",
- "src/compiler/serializer-for-background-compilation.cc",
- "src/compiler/serializer-for-background-compilation.h",
- "src/compiler/serializer-hints.h",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator.cc",
@@ -2672,10 +2680,12 @@ filegroup(
"src/bigint/bigint-internal.h",
"src/bigint/bigint.h",
"src/bigint/digit-arithmetic.h",
+ "src/bigint/div-barrett.cc",
"src/bigint/div-burnikel.cc",
"src/bigint/div-helpers.cc",
"src/bigint/div-helpers.h",
"src/bigint/div-schoolbook.cc",
+ "src/bigint/fromstring.cc",
"src/bigint/mul-fft.cc",
"src/bigint/mul-karatsuba.cc",
"src/bigint/mul-schoolbook.cc",
@@ -3050,3 +3060,42 @@ v8_binary(
],
deps = [ ":v8" ],
)
+
+# =================================================
+# Tests
+# =================================================
+
+v8_build_config(
+ name = "v8_build_config",
+)
+
+# Runs mjunit with d8.
+py_test(
+ name = "mjsunit",
+ size = "medium",
+ srcs = [
+ "test/mjsunit/testcfg.py",
+ "tools/predictable_wrapper.py",
+ "tools/run-tests.py",
+ ] + glob(["tools/testrunner/**/*.py"]),
+ args = [
+ "--no-sorting",
+ "--nopresubmit",
+ # TODO(victorgomes): Create a flag to pass the variant in the cmdline.
+ "--variant=default",
+ "--outdir bazel-bin",
+ "mjsunit",
+ ],
+ data = [
+ ":v8_build_config",
+ ":d8",
+ "test",
+ ] + glob(["test/**"]) + glob(["tools/**/*.js"]) + glob(["tools/**/*.mjs"]),
+ main = "tools/run-tests.py",
+ # TODO(victorgomes): Move this to PY3.
+ python_version = "PY2",
+ tags = [
+ # Disable sanitizers, as they don't work in general in V8.
+ "nosan",
+ ],
+)
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 17bab98e8f..3e48fb11bf 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -452,6 +452,12 @@ if (v8_enable_shared_ro_heap == "") {
v8_enable_pointer_compression_shared_cage
}
+# Check if it is a Chromium build and activate PAC/BTI if needed.
+if (build_with_chromium && v8_current_cpu == "arm64" &&
+ arm_control_flow_integrity == "standard") {
+ v8_control_flow_integrity = true
+}
+
assert(!v8_disable_write_barriers || v8_enable_single_generation,
"Disabling write barriers works only with single generation")
@@ -548,6 +554,10 @@ config("internal_config") {
if (is_component_build) {
defines += [ "BUILDING_V8_SHARED" ]
}
+
+ if (v8_current_cpu == "riscv64") {
+ libs = [ "atomic" ]
+ }
}
# Should be applied to all targets that write trace events.
@@ -616,6 +626,10 @@ config("external_config") {
if (is_component_build) {
defines += [ "USING_V8_SHARED" ]
}
+
+ if (current_cpu == "riscv64") {
+ libs = [ "atomic" ]
+ }
}
# This config should only be applied to code that needs to be explicitly
@@ -918,6 +932,9 @@ config("features") {
if (v8_allocation_site_tracking) {
defines += [ "V8_ALLOCATION_SITE_TRACKING" ]
}
+ if (v8_advanced_bigint_algorithms) {
+ defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ]
+ }
}
config("toolchain") {
@@ -1396,11 +1413,14 @@ action("postmortem-metadata") {
torque_files = [
"src/builtins/aggregate-error.tq",
"src/builtins/array-at.tq",
+ "src/builtins/array-concat.tq",
"src/builtins/array-copywithin.tq",
"src/builtins/array-every.tq",
"src/builtins/array-filter.tq",
"src/builtins/array-find.tq",
"src/builtins/array-findindex.tq",
+ "src/builtins/array-findlast.tq",
+ "src/builtins/array-findlastindex.tq",
"src/builtins/array-foreach.tq",
"src/builtins/array-from.tq",
"src/builtins/array-isarray.tq",
@@ -1502,6 +1522,8 @@ torque_files = [
"src/builtins/typed-array-filter.tq",
"src/builtins/typed-array-find.tq",
"src/builtins/typed-array-findindex.tq",
+ "src/builtins/typed-array-findlast.tq",
+ "src/builtins/typed-array-findlastindex.tq",
"src/builtins/typed-array-foreach.tq",
"src/builtins/typed-array-from.tq",
"src/builtins/typed-array-keys.tq",
@@ -2113,6 +2135,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-call-gen.cc",
"src/builtins/builtins-call-gen.h",
"src/builtins/builtins-collections-gen.cc",
+ "src/builtins/builtins-collections-gen.h",
"src/builtins/builtins-constructor-gen.cc",
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
@@ -2457,6 +2480,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/reglist.h",
"src/codegen/reloc-info.h",
"src/codegen/safepoint-table.h",
+ "src/codegen/script-details.h",
"src/codegen/signature.h",
"src/codegen/source-position-table.h",
"src/codegen/source-position.h",
@@ -2472,8 +2496,11 @@ v8_header_set("v8_internal_headers") {
"src/common/message-template.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.h",
- "src/compiler-dispatcher/compiler-dispatcher.h",
+ "src/compiler-dispatcher/lazy-compile-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
+ "src/compiler/access-builder.h",
+ "src/compiler/access-info.h",
+ "src/compiler/add-type-assertions-reducer.h",
"src/compiler/all-nodes.h",
"src/compiler/allocation-builder-inl.h",
"src/compiler/allocation-builder.h",
@@ -2529,6 +2556,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/graph-visualizer.h",
"src/compiler/graph-zone-traits.h",
"src/compiler/graph.h",
+ "src/compiler/heap-refs.h",
"src/compiler/js-call-reducer.h",
"src/compiler/js-context-specialization.h",
"src/compiler/js-create-lowering.h",
@@ -2582,8 +2610,6 @@ v8_header_set("v8_internal_headers") {
"src/compiler/schedule.h",
"src/compiler/scheduler.h",
"src/compiler/select-lowering.h",
- "src/compiler/serializer-for-background-compilation.h",
- "src/compiler/serializer-hints.h",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator-reducer.h",
"src/compiler/simplified-operator.h",
@@ -2695,6 +2721,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
"src/heap/embedder-tracing.h",
+ "src/heap/factory-base-inl.h",
"src/heap/factory-base.h",
"src/heap/factory-inl.h",
"src/heap/factory.h",
@@ -2715,6 +2742,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.h",
"src/heap/large-spaces.h",
+ "src/heap/linear-allocation-area.h",
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
@@ -2807,6 +2835,7 @@ v8_header_set("v8_internal_headers") {
"src/libsampler/sampler.h",
"src/logging/code-events.h",
"src/logging/counters-definitions.h",
+ "src/logging/counters-scopes.h",
"src/logging/counters.h",
"src/logging/local-logger.h",
"src/logging/log-inl.h",
@@ -2872,6 +2901,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/free-space-inl.h",
"src/objects/free-space.h",
"src/objects/function-kind.h",
+ "src/objects/function-syntax-kind.h",
"src/objects/hash-table-inl.h",
"src/objects/hash-table.h",
"src/objects/heap-number-inl.h",
@@ -2934,6 +2964,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/object-type.h",
"src/objects/objects-body-descriptors-inl.h",
"src/objects/objects-body-descriptors.h",
+ "src/objects/objects-definitions.h",
"src/objects/objects-inl.h",
"src/objects/objects.h",
"src/objects/oddball-inl.h",
@@ -2969,6 +3000,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/slots-atomic-inl.h",
"src/objects/slots-inl.h",
"src/objects/slots.h",
+ "src/objects/smi-inl.h",
+ "src/objects/smi.h",
"src/objects/source-text-module-inl.h",
"src/objects/source-text-module.h",
"src/objects/stack-frame-info-inl.h",
@@ -3149,6 +3182,7 @@ v8_header_set("v8_internal_headers") {
if (v8_enable_webassembly) {
sources += [
"src/asmjs/asm-js.h",
+ "src/asmjs/asm-names.h",
"src/asmjs/asm-parser.h",
"src/asmjs/asm-scanner.h",
"src/asmjs/asm-types.h",
@@ -3364,8 +3398,16 @@ v8_header_set("v8_internal_headers") {
if (v8_control_flow_integrity) {
sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ]
}
- if (v8_enable_webassembly && current_cpu == "arm64" && is_mac) {
- sources += [ "src/trap-handler/handler-inside-posix.h" ]
+ if (v8_enable_webassembly) {
+ # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux
+ # and Mac.
+ if ((current_cpu == "arm64" && is_mac) ||
+ (current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
+ sources += [ "src/trap-handler/handler-inside-posix.h" ]
+ }
+ if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) {
+ sources += [ "src/trap-handler/trap-handler-simulator.h" ]
+ }
}
if (is_win) {
sources += [ "src/diagnostics/unwinding-info-win64.h" ]
@@ -3449,6 +3491,8 @@ v8_header_set("v8_internal_headers") {
]
} else if (v8_current_cpu == "riscv64") {
sources += [ ### gcmole(arch:riscv64) ###
+ "src/baseline/riscv64/baseline-assembler-riscv64-inl.h",
+ "src/baseline/riscv64/baseline-compiler-riscv64-inl.h",
"src/codegen/riscv64/assembler-riscv64-inl.h",
"src/codegen/riscv64/assembler-riscv64.h",
"src/codegen/riscv64/constants-riscv64.h",
@@ -3576,7 +3620,6 @@ v8_compiler_sources = [
"src/compiler/schedule.cc",
"src/compiler/scheduler.cc",
"src/compiler/select-lowering.cc",
- "src/compiler/serializer-for-background-compilation.cc",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-operator-reducer.cc",
"src/compiler/simplified-operator.cc",
@@ -3756,7 +3799,7 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/turbo-assembler.cc",
"src/codegen/unoptimized-compilation-info.cc",
"src/common/assert-scope.cc",
- "src/compiler-dispatcher/compiler-dispatcher.cc",
+ "src/compiler-dispatcher/lazy-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/date/date.cc",
"src/date/dateparser.cc",
@@ -4265,11 +4308,19 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/arm64/simulator-logic-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
]
- if (v8_enable_webassembly && current_cpu == "arm64" && is_mac) {
- sources += [
- "src/trap-handler/handler-inside-posix.cc",
- "src/trap-handler/handler-outside-posix.cc",
- ]
+ if (v8_enable_webassembly) {
+ # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux
+ # and Mac.
+ if ((current_cpu == "arm64" && is_mac) ||
+ (current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
+ sources += [
+ "src/trap-handler/handler-inside-posix.cc",
+ "src/trap-handler/handler-outside-posix.cc",
+ ]
+ }
+ if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) {
+ sources += [ "src/trap-handler/handler-outside-simulator.cc" ]
+ }
}
if (is_win) {
sources += [ "src/diagnostics/unwinding-info-win64.cc" ]
@@ -4712,10 +4763,12 @@ v8_component("v8_libbase") {
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/template-utils.h",
+ "src/base/threaded-list.h",
"src/base/timezone-cache.h",
"src/base/type-traits.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
+ "src/base/v8-fallthrough.h",
"src/base/vector.h",
"src/base/vlq-base64.cc",
"src/base/vlq-base64.h",
@@ -4927,6 +4980,10 @@ v8_component("v8_libplatform") {
sources += [ "src/libplatform/tracing/recorder-win.cc" ]
}
}
+
+ if (v8_current_cpu == "riscv64") {
+ libs = [ "atomic" ]
+ }
}
v8_source_set("fuzzer_support") {
@@ -4957,6 +5014,7 @@ v8_source_set("v8_bigint") {
"src/bigint/div-helpers.cc",
"src/bigint/div-helpers.h",
"src/bigint/div-schoolbook.cc",
+ "src/bigint/fromstring.cc",
"src/bigint/mul-karatsuba.cc",
"src/bigint/mul-schoolbook.cc",
"src/bigint/tostring.cc",
@@ -4967,11 +5025,10 @@ v8_source_set("v8_bigint") {
if (v8_advanced_bigint_algorithms) {
sources += [
+ "src/bigint/div-barrett.cc",
"src/bigint/mul-fft.cc",
"src/bigint/mul-toom.cc",
]
-
- defines = [ "V8_ADVANCED_BIGINT_ALGORITHMS" ]
}
configs = [ ":internal_config" ]
@@ -4983,6 +5040,7 @@ v8_source_set("v8_cppgc_shared") {
"src/heap/base/stack.h",
"src/heap/base/worklist.cc",
"src/heap/base/worklist.h",
+ "src/heap/cppgc/globals.h",
]
if (is_clang || !is_win) {
@@ -5017,7 +5075,10 @@ v8_source_set("v8_cppgc_shared") {
configs = [ ":internal_config" ]
- public_deps = [ ":v8_libbase" ]
+ public_deps = [
+ ":cppgc_headers",
+ ":v8_libbase",
+ ]
}
# This is split out to be a non-code containing target that the Chromium browser
@@ -5075,7 +5136,10 @@ v8_header_set("cppgc_headers") {
sources += [ "include/cppgc/internal/caged-heap-local-data.h" ]
}
- deps = [ ":v8_libplatform" ]
+ deps = [
+ ":v8_libbase",
+ ":v8_libplatform",
+ ]
public_deps = [ ":v8_config_headers" ]
}
@@ -5171,6 +5235,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/virtual-memory.cc",
"src/heap/cppgc/virtual-memory.h",
"src/heap/cppgc/visitor.cc",
+ "src/heap/cppgc/visitor.h",
"src/heap/cppgc/write-barrier.cc",
"src/heap/cppgc/write-barrier.h",
]
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index b1e297b106..439f45ca58 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -49,19 +49,19 @@ vars = {
'reclient_version': 're_client_version:0.33.0.3e223d5',
# GN CIPD package version.
- 'gn_version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8',
+ 'gn_version': 'git_revision:eea3906f0e2a8d3622080127d2005ff214d51383',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:8b8a9a6040ca6debd30694a71a99a1eac97d72fd',
+ 'luci_go': 'git_revision:1120f810b7ab7eb71bd618c4c57fe82a60d4f2fe',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
# and whatever else without interference from each other.
- 'android_sdk_build-tools_version': '8LZujEmLjSh0g3JciDA3cslSptxKs9HOa_iUPXkOeYQC',
+ 'android_sdk_build-tools_version': 'tRoD45SCi7UleQqSV7MrMQO1_e5P8ysphkCcj6z_cCQC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_emulator_version
# and whatever else without interference from each other.
- 'android_sdk_emulator_version': 'A4EvXZUIuQho0QRDJopMUpgyp6NA3aiDQjGKPUKbowMC',
+ 'android_sdk_emulator_version': 'gMHhUuoQRKfxr-MBn3fNNXZtkAVXtOwMwT7kfx8jkIgC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_extras_version
# and whatever else without interference from each other.
@@ -73,28 +73,28 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platform-tools_version
# and whatever else without interference from each other.
- 'android_sdk_platform-tools_version': '8tF0AOj7Dwlv4j7_nfkhxWB0jzrvWWYjEIpirt8FIWYC',
+ 'android_sdk_platform-tools_version': 'qi_k82nm6j9nz4dQosOoqXew4_TFAy8rcGOHDLptx1sC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
- 'android_sdk_platforms_version': 'YMUu9EHNZ__2Xcxl-KsaSf-dI5TMt_P62IseUVsxktMC',
+ 'android_sdk_platforms_version': 'lL3IGexKjYlwjO_1Ga-xwxgwbE_w-lmi2Zi1uOlWUIAC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_sources_version
# and whatever else without interference from each other.
- 'android_sdk_sources_version': '4gxhM8E62bvZpQs7Q3d0DinQaW0RLCIefhXrQBFkNy8C',
+ 'android_sdk_sources_version': 'n7svc8KYah-i4s8zwkVa85SI3_H0WFOniP0mpwNdFO0C',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
- 'android_sdk_cmdline-tools_version': 'V__2Ycej-H2-6AcXX5A3gi7sIk74SuN44PBm2uC_N1sC',
+ 'android_sdk_cmdline-tools_version': 'ZT3JmI6GMG4YVcZ1OtECRVMOLLJAWAdPbi-OclubJLMC',
}
deps = {
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'd5bb24e5d9802c8c917fcaa4375d5239a586c168',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '3da1e2fcf66acd5c7194497b4285ac163f32e239',
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '2d999384c270a340f592cce0a0fb3f8f94c15290',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'bbf7f0ed65548c4df862d2a2748e3a9b908a3217',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '2500c1d8f3a20a66a7cbafe3f69079a2edb742dd',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '37dc929ecb351687006a61744b116cda601753d7',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94',
'buildtools/linux64': {
@@ -110,7 +110,7 @@ deps = {
'buildtools/mac': {
'packages': [
{
- 'package': 'gn/gn/mac-amd64',
+ 'package': 'gn/gn/mac-${{arch}}',
'version': Var('gn_version'),
}
],
@@ -120,9 +120,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '6803464b0f46df0a51862347d39e0791b59cf568',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '24e92c2beed59b76ddabe7ceb5ee4b40f09e0712',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a5feaf61658af4453e282142a76aeb6f9c045311',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'b825591df326b2725e6b88bdf74fdc88fefdf460',
'buildtools/win': {
'packages': [
{
@@ -148,14 +148,14 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ebb6c34fa5dd76a6bea01c54ed7b182596492176',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ab353c6e732b9e175d3ad6779e3acf3ea82d3761',
'test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
'third_party/aemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'm4sM10idq7LeFHXpoLKLBtaOZsQzuj63Usa3Cl9af1YC'
+ 'version': 'qWiGSH8A_xdaUVO-GsDJsJ5HCkIRwZqb-HDyxsLiuWwC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -176,13 +176,13 @@ deps = {
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'b291e88d8e3e6774d6d46151e11dc3189ddeeb09',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'e98c753917587d320f4e7a24f1c7474535adac3f',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
'packages': [
{
- 'package': 'chromium/third_party/android_sdk/public/build-tools/30.0.1',
+ 'package': 'chromium/third_party/android_sdk/public/build-tools/31.0.0',
'version': Var('android_sdk_build-tools_version'),
},
{
@@ -202,11 +202,11 @@ deps = {
'version': Var('android_sdk_platform-tools_version'),
},
{
- 'package': 'chromium/third_party/android_sdk/public/platforms/android-30',
+ 'package': 'chromium/third_party/android_sdk/public/platforms/android-31',
'version': Var('android_sdk_platforms_version'),
},
{
- 'package': 'chromium/third_party/android_sdk/public/sources/android-29',
+ 'package': 'chromium/third_party/android_sdk/public/sources/android-30',
'version': Var('android_sdk_sources_version'),
},
{
@@ -218,7 +218,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '2814ff3716a8512518bee705a0f91425ce06b27b',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'abc7ba7d871fe3c25b0a1bec7fc84fb309034cb7',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -226,24 +226,24 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'a806594b95a39141fdbf1f359087a44ffb2deaaf',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '49a703f3d915b140c9f373107e1ba17f30e2487d',
'third_party/fuchsia-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '18896843130c33372c455c153ad07d2217bd2085',
'condition': 'checkout_fuchsia',
},
'third_party/google_benchmark/src': {
- 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'e451e50e9b8af453f076dec10bd6890847f1624e',
+ 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '4124223bf5303d1d65fe2c40f33e28372bbb986c',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '4ec4cd23f486bf70efcc5d2caa40f24368f752e3',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '47f819c3ca54fb602f432904443e00a0a1fe2f42',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'b9dfc58bf9b02ea0365509244aca13841322feb0',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '75e34bcccea0be165c31fdb278b3712c516c5876',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '4ae2535e8e894c3cd81d46aacdaf151b5df30709',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '9a8087bbbf43a355950fc1667575d1a753f8aaa4',
'third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
- 'url': Var('chromium_url') + '/external/github.com/intel/ittapi' + '@' + 'b4ae0122ba749163096058b4f1bb065bf4a7de94',
+ 'url': Var('chromium_url') + '/external/github.com/intel/ittapi' + '@' + 'a3911fff01a775023a06af8754f9ec1e5977dd97',
'condition': "checkout_ittapi or check_v8_header_includes",
},
'third_party/jinja2':
@@ -251,7 +251,7 @@ deps = {
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/logdog/logdog':
- Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '794d09a24c10401953880c253d0c7e267234ab75',
+ Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '17ec234f823f7bff6ada6584fdbbee9d54b8fc58',
'third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '1b882ef6372b58bfd55a3285f37ed801be9137cd',
'third_party/perfetto':
@@ -283,9 +283,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'dfbc590f5855bc2765256a743cad0abc56330a30',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '563140dd9c24f84bf40919196e9e7666d351cc0d',
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '9d0a403e85d25b5b0d3016a342d4b83b12941fd5',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '6a8e571efd68de48d226950d1e10cb8982e71496',
'tools/clang/dsymutil': {
'packages': [
{
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 6f0ac01720..f9b23f237f 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -27,8 +27,8 @@ per-file codereview.settings=file:INFRA_OWNERS
per-file AUTHORS=file:COMMON_OWNERS
per-file WATCHLISTS=file:COMMON_OWNERS
-per-file *-mips*=file:MIPS_OWNERS
-per-file *-mips64*=file:MIPS_OWNERS
-per-file *-ppc*=file:PPC_OWNERS
-per-file *-riscv64*=file:RISCV_OWNERS
-per-file *-s390*=file:S390_OWNERS
+per-file ...-mips*=file:MIPS_OWNERS
+per-file ...-mips64*=file:MIPS_OWNERS
+per-file ...-ppc*=file:PPC_OWNERS
+per-file ...-riscv64*=file:RISCV_OWNERS
+per-file ...-s390*=file:S390_OWNERS
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index f691d88e92..b8b7eac99a 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -33,6 +33,9 @@
{
'WATCHLIST_DEFINITIONS': {
+ 'api': {
+ 'filepath': 'include/',
+ },
'snapshot': {
'filepath': 'src/snapshot/',
},
@@ -52,7 +55,7 @@
'|test/unittests/interpreter/',
},
'baseline': {
- 'filepath': 'src/baseline/'
+ 'filepath': 'src/baseline/',
},
'feature_shipping_status': {
'filepath': 'src/flags/flag-definitions.h',
@@ -67,9 +70,6 @@
'filepath': 'src/codegen/code-stub-assembler\.(cc|h)$' \
'|src/builtins/.*-gen.(cc|h)$',
},
- 'ia32': {
- 'filepath': '/ia32/',
- },
'merges': {
'filepath': '.',
},
@@ -102,7 +102,7 @@
'filepath': 'BUILD.gn' \
'|BUILD.bazel' \
'|WORKSPACE' \
- '|bazel/'
+ '|bazel/',
},
},
@@ -153,17 +153,21 @@
'alph+watch@chromium.org',
'lpy+v8tracing@chromium.org',
'fmeawad@chromium.org',
+ 'cbruni+watch@chromium.org',
],
'ieee754': [
'rtoy+watch@chromium.org',
- 'hongchan+watch@chromium.org'
+ 'hongchan+watch@chromium.org',
],
'regexp': [
'jgruber+watch@chromium.org',
- 'pthier+watch@chromium.org'
+ 'pthier+watch@chromium.org',
+ ],
+ 'bazel': [
+ 'victorgomes+watch@chromium.org',
],
- 'bazel' : [
- 'victorgomes+watch@chromium.org'
+ 'api': [
+ 'cbruni+watch@chromium.org',
],
},
}
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index dff2f9b277..76391985c1 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -56,12 +56,12 @@
// static int send_count = 0;
// ++send_count;
// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
-// "ipc", "message", TRACE_ID_LOCAL(send_count));
+// "ipc", "message", TRACE_ID_WITH_SCOPE("message", send_count));
// Send(new MyMessage(send_count));
// [receive code]
// void OnMyMessage(send_count) {
// TRACE_NESTABLE_EVENT_ASYNC_END0(
-// "ipc", "message", TRACE_ID_LOCAL(send_count));
+// "ipc", "message", TRACE_ID_WITH_SCOPE("message", send_count));
// }
// The third parameter is a unique ID to match NESTABLE_ASYNC_BEGIN/ASYNC_END
// pairs. NESTABLE_ASYNC_BEGIN and ASYNC_END can occur on any thread of any
@@ -71,10 +71,12 @@
// class MyTracedClass {
// public:
// MyTracedClass() {
-// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("category", "MyTracedClass",
+// TRACE_ID_LOCAL(this));
// }
// ~MyTracedClass() {
-// TRACE_EVENT_NESTABLE_ASYNC_END0("category", "MyTracedClass", this);
+// TRACE_EVENT_NESTABLE_ASYNC_END0("category", "MyTracedClass",
+// TRACE_ID_LOCAL(this));
// }
// }
//
@@ -390,12 +392,15 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
arg2_name, arg2_val)
-// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// Similar to TRACE_EVENT_BEGINx but with a custom |timestamp| provided.
// - |id| is used to match the _BEGIN event with the _END event.
// Events are considered to match if their category_group, name and id values
// all match. |id| must either be a pointer or an integer value up to 64 bits.
// If it's a pointer, the bits will be xored with a hash of the process ID so
// that the same pointer on two different processes will not collide.
+// - |timestamp| must be non-null or it crashes. Use DCHECK(timestamp) before
+// calling this to detect an invalid timestamp even when tracing is not
+// enabled, as the commit queue doesn't run all tests with tracing enabled.
#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
thread_id, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@@ -446,6 +451,10 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
arg2_name, arg2_val)
+// Adds a trace event with the given |name| and |timestamp|. |timestamp| must be
+// non-null or it crashes. Use DCHECK(timestamp) before calling this to detect
+// an invalid timestamp even when tracing is not enabled, as the commit queue
+// doesn't run all tests with tracing enabled.
#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
@@ -476,12 +485,15 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_COPY)
-// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// Similar to TRACE_EVENT_ENDx but with a custom |timestamp| provided.
// - |id| is used to match the _BEGIN event with the _END event.
// Events are considered to match if their category_group, name and id values
// all match. |id| must either be a pointer or an integer value up to 64 bits.
// If it's a pointer, the bits will be xored with a hash of the process ID so
// that the same pointer on two different processes will not collide.
+// - |timestamp| must be non-null or it crashes. Use DCHECK(timestamp) before
+// calling this to detect an invalid timestamp even when tracing is not
+// enabled, as the commit queue doesn't run all tests with tracing enabled.
#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
thread_id, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@@ -540,6 +552,9 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
static_cast<int>(value2_val))
// Similar to TRACE_COUNTERx, but with a custom |timestamp| provided.
+// - |timestamp| must be non-null or it crashes. Use DCHECK(timestamp) before
+// calling this to detect an invalid timestamp even when tracing is not
+// enabled, as the commit queue doesn't run all tests with tracing enabled.
#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
@@ -925,6 +940,16 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
category_group, name, id, \
TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN1(category_group, name, id, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END0(category_group, name, id) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
category_group, name, id, \
@@ -934,6 +959,12 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP1( \
+ category_group, name, id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val)
#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \
category_group, name, id, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@@ -1088,9 +1119,6 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 10))
#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast<unsigned int>(1 << 11))
#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast<unsigned int>(1 << 12))
-// TODO(eseckler): Remove once we have native support for typed proto events in
-// TRACE_EVENT macros.
-#define TRACE_EVENT_FLAG_TYPED_PROTO_ARGS (static_cast<unsigned int>(1 << 15))
#define TRACE_EVENT_FLAG_JAVA_STRING_LITERALS \
(static_cast<unsigned int>(1 << 16))
diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl
index fbd1830ecb..58fd53ed60 100644
--- a/deps/v8/bazel/defs.bzl
+++ b/deps/v8/bazel/defs.bzl
@@ -237,3 +237,62 @@ v8_mksnapshot = rule(
),
}
)
+
+def _quote(val):
+ if val[0] == '"' and val[-1] == '"':
+ fail("String", val, "already quoted")
+ return '"' + val + '"'
+
+def _kv_bool_pair(k, v):
+ return _quote(k) + ": " + v
+
+def _json(kv_pairs):
+ content = "{"
+ for (k, v) in kv_pairs[:-1]:
+ content += _kv_bool_pair(k, v) + ", "
+ (k, v) = kv_pairs[-1]
+ content += _kv_bool_pair(k, v)
+ content += "}\n"
+ return content
+
+# TODO(victorgomes): Create a rule (instead of a macro), that can
+# dynamically populate the build config.
+def v8_build_config(name):
+ cpu = _quote("x64")
+ content = _json([
+ ("current_cpu", cpu),
+ ("dcheck_always_on", "false"),
+ ("is_android", "false"),
+ ("is_asan", "false"),
+ ("is_cfi", "false"),
+ ("is_clang", "true"),
+ ("is_component_build", "false"),
+ ("is_debug", "false"),
+ ("is_full_debug", "false"),
+ ("is_gcov_coverage", "false"),
+ ("is_msan", "false"),
+ ("is_tsan", "false"),
+ ("is_ubsan_vptr", "false"),
+ ("target_cpu", cpu),
+ ("v8_current_cpu", cpu),
+ ("v8_enable_atomic_marking_state", "false"),
+ ("v8_enable_atomic_object_field_writes", "false"),
+ ("v8_enable_concurrent_marking", "false"),
+ ("v8_enable_i18n_support", "true"),
+ ("v8_enable_verify_predictable", "false"),
+ ("v8_enable_verify_csa", "false"),
+ ("v8_enable_lite_mode", "false"),
+ ("v8_enable_runtime_call_stats", "false"),
+ ("v8_enable_pointer_compression", "true"),
+ ("v8_enable_pointer_compression_shared_cage", "false"),
+ ("v8_enable_third_party_heap", "false"),
+ ("v8_enable_webassembly", "false"),
+ ("v8_control_flow_integrity", "false"),
+ ("v8_enable_single_generation", "false"),
+ ("v8_target_cpu", cpu),
+ ])
+ native.genrule(
+ name = name,
+ outs = [name + ".json"],
+ cmd = "echo '" + content + "' > \"$@\"",
+ )
diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h
index b06d9d7020..d75f1a9729 100644
--- a/deps/v8/include/cppgc/allocation.h
+++ b/deps/v8/include/cppgc/allocation.h
@@ -10,6 +10,7 @@
#include <cstdint>
#include <new>
#include <type_traits>
+#include <utility>
#include "cppgc/custom-space.h"
#include "cppgc/internal/api-constants.h"
diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h
index fe61e9acbc..0a9afdcd2b 100644
--- a/deps/v8/include/cppgc/cross-thread-persistent.h
+++ b/deps/v8/include/cppgc/cross-thread-persistent.h
@@ -13,12 +13,34 @@
#include "cppgc/visitor.h"
namespace cppgc {
-
namespace internal {
+// Wrapper around PersistentBase that allows accessing poisoned memory when
+// using ASAN. This is needed as the GC of the heap that owns the value
+// of a CTP, may clear it (heap termination, weakness) while the object
+// holding the CTP may be poisoned as itself may be deemed dead.
+class CrossThreadPersistentBase : public PersistentBase {
+ public:
+ CrossThreadPersistentBase() = default;
+ explicit CrossThreadPersistentBase(const void* raw) : PersistentBase(raw) {}
+
+ V8_CLANG_NO_SANITIZE("address") const void* GetValueFromGC() const {
+ return raw_;
+ }
+
+ V8_CLANG_NO_SANITIZE("address")
+ PersistentNode* GetNodeFromGC() const { return node_; }
+
+ V8_CLANG_NO_SANITIZE("address")
+ void ClearFromGC() const {
+ raw_ = nullptr;
+ node_ = nullptr;
+ }
+};
+
template <typename T, typename WeaknessPolicy, typename LocationPolicy,
typename CheckingPolicy>
-class BasicCrossThreadPersistent final : public PersistentBase,
+class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
public LocationPolicy,
private WeaknessPolicy,
private CheckingPolicy {
@@ -38,11 +60,11 @@ class BasicCrossThreadPersistent final : public PersistentBase,
BasicCrossThreadPersistent(
SentinelPointer s, const SourceLocation& loc = SourceLocation::Current())
- : PersistentBase(s), LocationPolicy(loc) {}
+ : CrossThreadPersistentBase(s), LocationPolicy(loc) {}
BasicCrossThreadPersistent(
T* raw, const SourceLocation& loc = SourceLocation::Current())
- : PersistentBase(raw), LocationPolicy(loc) {
+ : CrossThreadPersistentBase(raw), LocationPolicy(loc) {
if (!IsValid(raw)) return;
PersistentRegionLock guard;
CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw);
@@ -61,7 +83,7 @@ class BasicCrossThreadPersistent final : public PersistentBase,
BasicCrossThreadPersistent(
UnsafeCtorTag, T* raw,
const SourceLocation& loc = SourceLocation::Current())
- : PersistentBase(raw), LocationPolicy(loc) {
+ : CrossThreadPersistentBase(raw), LocationPolicy(loc) {
if (!IsValid(raw)) return;
CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw);
SetNode(region.AllocateNode(this, &Trace));
@@ -329,12 +351,19 @@ class BasicCrossThreadPersistent final : public PersistentBase,
}
void ClearFromGC() const {
- if (IsValid(GetValue())) {
- WeaknessPolicy::GetPersistentRegion(GetValue()).FreeNode(GetNode());
- PersistentBase::ClearFromGC();
+ if (IsValid(GetValueFromGC())) {
+ WeaknessPolicy::GetPersistentRegion(GetValueFromGC())
+ .FreeNode(GetNodeFromGC());
+ CrossThreadPersistentBase::ClearFromGC();
}
}
+ // See Get() for details.
+ V8_CLANG_NO_SANITIZE("cfi-unrelated-cast")
+ T* GetFromGC() const {
+ return static_cast<T*>(const_cast<void*>(GetValueFromGC()));
+ }
+
friend class cppgc::Visitor;
};
diff --git a/deps/v8/include/cppgc/heap-consistency.h b/deps/v8/include/cppgc/heap-consistency.h
index 47caea1847..8e603d5d8a 100644
--- a/deps/v8/include/cppgc/heap-consistency.h
+++ b/deps/v8/include/cppgc/heap-consistency.h
@@ -69,6 +69,23 @@ class HeapConsistency final {
}
/**
+ * Gets the required write barrier type for a specific write.
+ * This version is meant to be used in conjunction with with a marking write
+ * barrier barrier which doesn't consider the slot.
+ *
+ * \param value The pointer to the object. May be an interior pointer to an
+ * interface of the actual object.
+ * \param params Parameters that may be used for actual write barrier calls.
+ * Only filled if return value indicates that a write barrier is needed. The
+ * contents of the `params` are an implementation detail.
+ * \returns whether a write barrier is needed and which barrier to invoke.
+ */
+ static V8_INLINE WriteBarrierType
+ GetWriteBarrierType(const void* value, WriteBarrierParams& params) {
+ return internal::WriteBarrier::GetWriteBarrierType(value, params);
+ }
+
+ /**
* Conservative Dijkstra-style write barrier that processes an object if it
* has not yet been processed.
*
diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h
index 5626b17820..b5dba476a4 100644
--- a/deps/v8/include/cppgc/internal/persistent-node.h
+++ b/deps/v8/include/cppgc/internal/persistent-node.h
@@ -75,7 +75,7 @@ class PersistentNode final {
TraceCallback trace_ = nullptr;
};
-class V8_EXPORT PersistentRegion final {
+class V8_EXPORT PersistentRegion {
using PersistentNodeSlots = std::array<PersistentNode, 256u>;
public:
@@ -116,6 +116,9 @@ class V8_EXPORT PersistentRegion final {
private:
void EnsureNodeSlots();
+ template <typename PersistentBaseClass>
+ void ClearAllUsedNodes();
+
std::vector<std::unique_ptr<PersistentNodeSlots>> nodes_;
PersistentNode* free_list_head_ = nullptr;
size_t nodes_in_use_ = 0;
@@ -135,7 +138,7 @@ class V8_EXPORT PersistentRegionLock final {
// Variant of PersistentRegion that checks whether the PersistentRegionLock is
// locked.
-class V8_EXPORT CrossThreadPersistentRegion final {
+class V8_EXPORT CrossThreadPersistentRegion final : protected PersistentRegion {
public:
CrossThreadPersistentRegion() = default;
// Clears Persistent fields to avoid stale pointers after heap teardown.
@@ -147,12 +150,12 @@ class V8_EXPORT CrossThreadPersistentRegion final {
V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
PersistentRegionLock::AssertLocked();
- return persistent_region_.AllocateNode(owner, trace);
+ return PersistentRegion::AllocateNode(owner, trace);
}
V8_INLINE void FreeNode(PersistentNode* node) {
PersistentRegionLock::AssertLocked();
- persistent_region_.FreeNode(node);
+ PersistentRegion::FreeNode(node);
}
void Trace(Visitor*);
@@ -160,9 +163,6 @@ class V8_EXPORT CrossThreadPersistentRegion final {
size_t NodesInUse() const;
void ClearAllUsedNodes();
-
- private:
- PersistentRegion persistent_region_;
};
} // namespace internal
diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h
index c1b3b3e34d..28184dc9c8 100644
--- a/deps/v8/include/cppgc/internal/write-barrier.h
+++ b/deps/v8/include/cppgc/internal/write-barrier.h
@@ -11,6 +11,7 @@
#include "cppgc/heap-state.h"
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/atomic-entry-flag.h"
+#include "cppgc/platform.h"
#include "cppgc/sentinel-pointer.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -66,6 +67,8 @@ class V8_EXPORT WriteBarrier final {
template <typename HeapHandleCallback>
static V8_INLINE Type GetWriteBarrierType(const void* slot, Params& params,
HeapHandleCallback callback);
+ // Returns the required write barrier for a given `value`.
+ static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params);
template <typename HeapHandleCallback>
static V8_INLINE Type GetWriteBarrierTypeForExternallyReferencedObject(
@@ -147,9 +150,27 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
+ template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type Get(const void* value,
+ WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ return GetNoSlot(value, params, callback);
+ }
+
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
- const void* value, WriteBarrier::Params& params, HeapHandleCallback) {
+ const void* value, WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ return GetNoSlot(value, params, callback);
+ }
+
+ private:
+ WriteBarrierTypeForCagedHeapPolicy() = delete;
+
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type GetNoSlot(const void* value,
+ WriteBarrier::Params& params,
+ HeapHandleCallback) {
if (!TryGetCagedHeap(value, value, params)) {
return WriteBarrier::Type::kNone;
}
@@ -159,14 +180,14 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
- private:
- WriteBarrierTypeForCagedHeapPolicy() = delete;
-
template <WriteBarrier::ValueMode value_mode>
struct ValueModeDispatch;
static V8_INLINE bool TryGetCagedHeap(const void* slot, const void* value,
WriteBarrier::Params& params) {
+ // TODO(chromium:1056170): Check if the null check can be folded in with
+ // the rest of the write barrier.
+ if (!value) return false;
params.start = reinterpret_cast<uintptr_t>(value) &
~(api_constants::kCagedHeapReservationAlignment - 1);
const uintptr_t slot_offset =
@@ -257,6 +278,15 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
+ template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type Get(const void* value,
+ WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ // The slot will never be used in `Get()` below.
+ return Get<WriteBarrier::ValueMode::kValuePresent>(nullptr, value, params,
+ callback);
+ }
+
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
const void* value, WriteBarrier::Params& params,
@@ -331,6 +361,13 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
}
// static
+WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
+ const void* value, WriteBarrier::Params& params) {
+ return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(value, params,
+ []() {});
+}
+
+// static
template <typename HeapHandleCallback>
WriteBarrier::Type
WriteBarrier::GetWriteBarrierTypeForExternallyReferencedObject(
diff --git a/deps/v8/include/cppgc/liveness-broker.h b/deps/v8/include/cppgc/liveness-broker.h
index e449091280..c94eef0d4a 100644
--- a/deps/v8/include/cppgc/liveness-broker.h
+++ b/deps/v8/include/cppgc/liveness-broker.h
@@ -44,7 +44,10 @@ class V8_EXPORT LivenessBroker final {
public:
template <typename T>
bool IsHeapObjectAlive(const T* object) const {
- return object &&
+ // nullptr objects are considered alive to allow weakness to be used from
+ // stack while running into a conservative GC. Treating nullptr as dead
+ // would mean that e.g. custom collectins could not be strongified on stack.
+ return !object ||
IsHeapObjectAliveImpl(
TraceTrait<T>::GetTraceDescriptor(object).base_object_payload);
}
diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h
index d0bf414c69..38105b8e43 100644
--- a/deps/v8/include/cppgc/member.h
+++ b/deps/v8/include/cppgc/member.h
@@ -218,6 +218,8 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
void ClearFromGC() const { MemberBase::ClearFromGC(); }
+ T* GetFromGC() const { return Get(); }
+
friend class cppgc::Visitor;
template <typename U>
friend struct cppgc::TraceTrait;
diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h
index 03b5e5b06b..b83a464576 100644
--- a/deps/v8/include/cppgc/persistent.h
+++ b/deps/v8/include/cppgc/persistent.h
@@ -41,7 +41,7 @@ class PersistentBase {
node_ = nullptr;
}
- private:
+ protected:
mutable const void* raw_ = nullptr;
mutable PersistentNode* node_ = nullptr;
@@ -259,6 +259,12 @@ class BasicPersistent final : public PersistentBase,
}
}
+ // Set Get() for details.
+ V8_CLANG_NO_SANITIZE("cfi-unrelated-cast")
+ T* GetFromGC() const {
+ return static_cast<T*>(const_cast<void*>(GetValue()));
+ }
+
friend class cppgc::Visitor;
};
diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h
index 2d933d620d..3276a26b65 100644
--- a/deps/v8/include/cppgc/platform.h
+++ b/deps/v8/include/cppgc/platform.h
@@ -148,6 +148,7 @@ namespace internal {
V8_EXPORT void Abort();
} // namespace internal
+
} // namespace cppgc
#endif // INCLUDE_CPPGC_PLATFORM_H_
diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h
index 98de9957bd..57e2ce3963 100644
--- a/deps/v8/include/cppgc/visitor.h
+++ b/deps/v8/include/cppgc/visitor.h
@@ -12,6 +12,7 @@
#include "cppgc/internal/pointer-policies.h"
#include "cppgc/liveness-broker.h"
#include "cppgc/member.h"
+#include "cppgc/sentinel-pointer.h"
#include "cppgc/source-location.h"
#include "cppgc/trace-trait.h"
#include "cppgc/type-traits.h"
@@ -318,10 +319,10 @@ class V8_EXPORT Visitor {
template <typename PointerType>
static void HandleWeak(const LivenessBroker& info, const void* object) {
const PointerType* weak = static_cast<const PointerType*>(object);
+ auto* raw_ptr = weak->GetFromGC();
// Sentinel values are preserved for weak pointers.
- if (*weak == kSentinelPointer) return;
- const auto* raw = weak->Get();
- if (!info.IsHeapObjectAlive(raw)) {
+ if (raw_ptr == kSentinelPointer) return;
+ if (!info.IsHeapObjectAlive(raw_ptr)) {
weak->ClearFromGC();
}
}
@@ -335,11 +336,11 @@ class V8_EXPORT Visitor {
static_assert(internal::IsGarbageCollectedOrMixinType<PointeeType>::value,
"Persistent's pointee type must be GarbageCollected or "
"GarbageCollectedMixin");
- if (!p.Get()) {
+ auto* ptr = p.GetFromGC();
+ if (!ptr) {
return;
}
- VisitRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()),
- loc);
+ VisitRoot(ptr, TraceTrait<PointeeType>::GetTraceDescriptor(ptr), loc);
}
template <
@@ -354,7 +355,8 @@ class V8_EXPORT Visitor {
"GarbageCollectedMixin");
static_assert(!internal::IsAllocatedOnCompactableSpace<PointeeType>::value,
"Weak references to compactable objects are not allowed");
- VisitWeakRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()),
+ auto* ptr = p.GetFromGC();
+ VisitWeakRoot(ptr, TraceTrait<PointeeType>::GetTraceDescriptor(ptr),
&HandleWeak<WeakPersistent>, &p, loc);
}
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index 63baa3da13..ebf9eb7fe8 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -1672,6 +1672,8 @@ domain Runtime
parameters
RemoteObject object
object hints
+ # Identifier of the context where the call was made.
+ experimental optional ExecutionContextId executionContextId
# This domain is deprecated.
deprecated domain Schema
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 1848800b48..8c9d02769e 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -225,8 +225,9 @@
#include <tuple>
#include <type_traits>
-#include "v8.h" // NOLINT(build/include_directory)
-#include "v8config.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@@ -298,10 +299,36 @@ class CTypeInfo {
Flags flags_;
};
+struct FastApiTypedArrayBase {
+ public:
+ // Returns the length in number of elements.
+ size_t V8_EXPORT length() const { return length_; }
+ // Checks whether the given index is within the bounds of the collection.
+ void V8_EXPORT ValidateIndex(size_t index) const;
+
+ protected:
+ size_t length_ = 0;
+};
+
template <typename T>
-struct FastApiTypedArray {
- T* data; // should include the typed array offset applied
- size_t length; // length in number of elements
+struct FastApiTypedArray : public FastApiTypedArrayBase {
+ public:
+ V8_INLINE T get(size_t index) const {
+#ifdef DEBUG
+ ValidateIndex(index);
+#endif // DEBUG
+ T tmp;
+ memcpy(&tmp, reinterpret_cast<T*>(data_) + index, sizeof(T));
+ return tmp;
+ }
+
+ private:
+ // This pointer should include the typed array offset applied.
+ // It's not guaranteed that it's aligned to sizeof(T), it's only
+ // guaranteed that it's 4-byte aligned, so for 8-byte types we need to
+ // provide a special implementation for reading from it, which hides
+ // the possibly unaligned read in the `get` method.
+ void* data_;
};
// Any TypedArray. It uses kTypedArrayBit with base type void
@@ -437,7 +464,7 @@ class V8_EXPORT CFunction {
};
};
-struct ApiObject {
+struct V8_DEPRECATE_SOON("Use v8::Local<v8::Value> instead.") ApiObject {
uintptr_t address;
};
@@ -578,7 +605,7 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS)
#define SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA(T, Enum) \
template <> \
- struct TypeInfoHelper<FastApiTypedArray<T>> { \
+ struct TypeInfoHelper<const FastApiTypedArray<T>&> { \
static constexpr CTypeInfo::Flags Flags() { \
return CTypeInfo::Flags::kNone; \
} \
@@ -770,6 +797,10 @@ CFunction CFunction::ArgUnwrap<R (*)(Args...)>::Make(R (*func)(Args...)) {
using CFunctionBuilder = internal::CFunctionBuilder;
+static constexpr CTypeInfo kTypeInfoInt32 = CTypeInfo(CTypeInfo::Type::kInt32);
+static constexpr CTypeInfo kTypeInfoFloat64 =
+ CTypeInfo(CTypeInfo::Type::kFloat64);
+
/**
* Copies the contents of this JavaScript array to a C++ buffer with
* a given max_length. A CTypeInfo is passed as an argument,
@@ -783,8 +814,22 @@ using CFunctionBuilder = internal::CFunctionBuilder;
* returns true on success. `type_info` will be used for conversions.
*/
template <const CTypeInfo* type_info, typename T>
-bool CopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
- uint32_t max_length);
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryCopyAndConvertArrayToCppBuffer(
+ Local<Array> src, T* dst, uint32_t max_length);
+
+template <>
+inline bool V8_WARN_UNUSED_RESULT
+TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>(
+ Local<Array> src, int32_t* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBufferInt32(src, dst, max_length);
+}
+
+template <>
+inline bool V8_WARN_UNUSED_RESULT
+TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>(
+ Local<Array> src, double* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBufferFloat64(src, dst, max_length);
+}
} // namespace v8
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index 0c19104a37..e6621ccd75 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -194,9 +194,6 @@ class V8_EXPORT V8InspectorClient {
v8::Local<v8::Context>, v8::Local<v8::Value>) {
return nullptr;
}
- virtual bool formatAccessorsAsProperties(v8::Local<v8::Value>) {
- return false;
- }
virtual bool isInspectableHeapObject(v8::Local<v8::Object>) { return true; }
virtual v8::Local<v8::Context> ensureDefaultContextInGroup(
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 1826dd6fca..0222ab2f7e 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -15,9 +15,12 @@
namespace v8 {
+class Array;
class Context;
class Data;
class Isolate;
+template <typename T>
+class Local;
namespace internal {
@@ -185,6 +188,8 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
// language mode is strict.
V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
+V8_EXPORT bool CanHaveInternalField(int instance_type);
+
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@@ -263,8 +268,9 @@ class Internals {
static const int kOddballType = 0x43;
static const int kForeignType = 0x46;
static const int kJSSpecialApiObjectType = 0x410;
- static const int kJSApiObjectType = 0x420;
static const int kJSObjectType = 0x421;
+ static const int kFirstJSApiObjectType = 0x422;
+ static const int kLastJSApiObjectType = 0x80A;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@@ -505,6 +511,15 @@ V8_INLINE void PerformCastCheck(T* data) {
class BackingStoreBase {};
} // namespace internal
+
+V8_EXPORT bool CopyAndConvertArrayToCppBufferInt32(Local<Array> src,
+ int32_t* dst,
+ uint32_t max_length);
+
+V8_EXPORT bool CopyAndConvertArrayToCppBufferFloat64(Local<Array> src,
+ double* dst,
+ uint32_t max_length);
+
} // namespace v8
#endif // INCLUDE_V8_INTERNAL_H_
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 30a4182357..845d32f360 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 9
-#define V8_MINOR_VERSION 3
-#define V8_BUILD_NUMBER 345
-#define V8_PATCH_LEVEL 19
+#define V8_MINOR_VERSION 4
+#define V8_BUILD_NUMBER 146
+#define V8_PATCH_LEVEL 18
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 1cc78f63b8..78c454e334 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -128,6 +128,7 @@ template<typename T> class PropertyCallbackInfo;
template<typename T> class ReturnValue;
namespace internal {
+class BackgroundDeserializeTask;
class BasicTracedReferenceExtractor;
class ExternalString;
class FunctionCallbackArguments;
@@ -1440,9 +1441,7 @@ class ScriptOriginOptions {
*/
class ScriptOrigin {
public:
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATE_SOON("Use constructor with primitive C++ types")
-#endif
+ V8_DEPRECATED("Use constructor with primitive C++ types")
V8_INLINE explicit ScriptOrigin(
Local<Value> resource_name, Local<Integer> resource_line_offset,
Local<Integer> resource_column_offset,
@@ -1453,9 +1452,7 @@ class ScriptOrigin {
Local<Boolean> is_wasm = Local<Boolean>(),
Local<Boolean> is_module = Local<Boolean>(),
Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATE_SOON("Use constructor that takes an isolate")
-#endif
+ V8_DEPRECATED("Use constructor that takes an isolate")
V8_INLINE explicit ScriptOrigin(
Local<Value> resource_name, int resource_line_offset = 0,
int resource_column_offset = 0,
@@ -1474,11 +1471,11 @@ class ScriptOrigin {
Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
V8_INLINE Local<Value> ResourceName() const;
- V8_DEPRECATE_SOON("Use getter with primitvie C++ types.")
+ V8_DEPRECATED("Use getter with primitvie C++ types.")
V8_INLINE Local<Integer> ResourceLineOffset() const;
- V8_DEPRECATE_SOON("Use getter with primitvie C++ types.")
+ V8_DEPRECATED("Use getter with primitvie C++ types.")
V8_INLINE Local<Integer> ResourceColumnOffset() const;
- V8_DEPRECATE_SOON("Use getter with primitvie C++ types.")
+ V8_DEPRECATED("Use getter with primitvie C++ types.")
V8_INLINE Local<Integer> ScriptID() const;
V8_INLINE int LineOffset() const;
V8_INLINE int ColumnOffset() const;
@@ -1630,14 +1627,14 @@ class V8_EXPORT Module : public Data {
/**
* Returns the number of modules requested by this module.
*/
- V8_DEPRECATE_SOON("Use Module::GetModuleRequests() and FixedArray::Length().")
+ V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().")
int GetModuleRequestsLength() const;
/**
* Returns the ith module specifier in this module.
* i must be < GetModuleRequestsLength() and >= 0.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().")
Local<String> GetModuleRequest(int i) const;
@@ -1645,7 +1642,7 @@ class V8_EXPORT Module : public Data {
* Returns the source location (line number and column number) of the ith
* module specifier's first occurrence in this module.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and "
"Module::SourceOffsetToLocation().")
Location GetModuleRequestLocation(int i) const;
@@ -1666,7 +1663,7 @@ class V8_EXPORT Module : public Data {
*/
int GetIdentityHash() const;
- using ResolveCallback =
+ using ResolveCallback V8_DEPRECATED("Use ResolveModuleCallback") =
MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
Local<Module> referrer);
using ResolveModuleCallback = MaybeLocal<Module> (*)(
@@ -1680,7 +1677,7 @@ class V8_EXPORT Module : public Data {
* instantiation. (In the case where the callback throws an exception, that
* exception is propagated.)
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use the version of InstantiateModule that takes a ResolveModuleCallback "
"parameter")
V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
@@ -1771,13 +1768,6 @@ class V8_EXPORT Module : public Data {
*/
V8_WARN_UNUSED_RESULT Maybe<bool> SetSyntheticModuleExport(
Isolate* isolate, Local<String> export_name, Local<Value> export_value);
- V8_DEPRECATED(
- "Use the preceding SetSyntheticModuleExport with an Isolate parameter, "
- "instead of the one that follows. The former will throw a runtime "
- "error if called for an export that doesn't exist (as per spec); "
- "the latter will crash with a failed CHECK().")
- void SetSyntheticModuleExport(Local<String> export_name,
- Local<Value> export_value);
V8_INLINE static Module* Cast(Data* data);
@@ -1818,6 +1808,8 @@ enum class ScriptType { kClassic, kModule };
*/
class V8_EXPORT ScriptCompiler {
public:
+ class ConsumeCodeCacheTask;
+
/**
* Compilation data that the embedder can cache and pass back to speed up
* future compilations. The data is produced if the CompilerOptions passed to
@@ -1861,12 +1853,15 @@ class V8_EXPORT ScriptCompiler {
*/
class Source {
public:
- // Source takes ownership of CachedData.
+ // Source takes ownership of both CachedData and CodeCacheConsumeTask.
V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
- CachedData* cached_data = nullptr);
- V8_INLINE explicit Source(Local<String> source_string,
- CachedData* cached_data = nullptr);
- V8_INLINE ~Source();
+ CachedData* cached_data = nullptr,
+ ConsumeCodeCacheTask* consume_cache_task = nullptr);
+ // Source takes ownership of both CachedData and CodeCacheConsumeTask.
+ V8_INLINE explicit Source(
+ Local<String> source_string, CachedData* cached_data = nullptr,
+ ConsumeCodeCacheTask* consume_cache_task = nullptr);
+ V8_INLINE ~Source() = default;
// Ownership of the CachedData or its buffers is *not* transferred to the
// caller. The CachedData object is alive as long as the Source object is
@@ -1875,10 +1870,6 @@ class V8_EXPORT ScriptCompiler {
V8_INLINE const ScriptOriginOptions& GetResourceOptions() const;
- // Prevent copying.
- Source(const Source&) = delete;
- Source& operator=(const Source&) = delete;
-
private:
friend class ScriptCompiler;
@@ -1895,7 +1886,8 @@ class V8_EXPORT ScriptCompiler {
// Cached data from previous compilation (if a kConsume*Cache flag is
// set), or hold newly generated cache data (kProduce*Cache flags) are
// set when calling a compile method.
- CachedData* cached_data;
+ std::unique_ptr<CachedData> cached_data;
+ std::unique_ptr<ConsumeCodeCacheTask> consume_cache_task;
};
/**
@@ -1957,12 +1949,6 @@ class V8_EXPORT ScriptCompiler {
public:
enum Encoding { ONE_BYTE, TWO_BYTE, UTF8, WINDOWS_1252 };
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATED(
- "This class takes ownership of source_stream, so use the constructor "
- "taking a unique_ptr to make these semantics clearer")
-#endif
- StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
Encoding encoding);
~StreamedSource();
@@ -1994,6 +1980,26 @@ class V8_EXPORT ScriptCompiler {
internal::ScriptStreamingData* data_;
};
+ /**
+ * A task which the embedder must run on a background thread to
+ * consume a V8 code cache. Returned by
+ * ScriptCompiler::StarConsumingCodeCache.
+ */
+ class V8_EXPORT ConsumeCodeCacheTask final {
+ public:
+ ~ConsumeCodeCacheTask();
+
+ void Run();
+
+ private:
+ friend class ScriptCompiler;
+
+ explicit ConsumeCodeCacheTask(
+ std::unique_ptr<internal::BackgroundDeserializeTask> impl);
+
+ std::unique_ptr<internal::BackgroundDeserializeTask> impl_;
+ };
+
enum CompileOptions {
kNoCompileOptions = 0,
kConsumeCodeCache,
@@ -2067,14 +2073,13 @@ class V8_EXPORT ScriptCompiler {
* This API allows to start the streaming with as little data as possible, and
* the remaining data (for example, the ScriptOrigin) is passed to Compile.
*/
- V8_DEPRECATED("Use ScriptCompiler::StartStreaming instead.")
- static ScriptStreamingTask* StartStreamingScript(
- Isolate* isolate, StreamedSource* source,
- CompileOptions options = kNoCompileOptions);
static ScriptStreamingTask* StartStreaming(
Isolate* isolate, StreamedSource* source,
ScriptType type = ScriptType::kClassic);
+ static ConsumeCodeCacheTask* StartConsumingCodeCache(
+ Isolate* isolate, std::unique_ptr<CachedData> source);
+
/**
* Compiles a streamed script (bound to current context).
*
@@ -4309,11 +4314,13 @@ class V8_EXPORT Object : public Value {
/**
* Returns the context in which the object was created.
*/
+ // TODO(chromium:1166077): Mark as deprecate once users are updated.
V8_DEPRECATE_SOON("Use MaybeLocal<Context> GetCreationContext()")
Local<Context> CreationContext();
MaybeLocal<Context> GetCreationContext();
/** Same as above, but works for Persistents */
+ // TODO(chromium:1166077): Mark as deprecate once users are updated.
V8_DEPRECATE_SOON(
"Use MaybeLocal<Context> GetCreationContext(const "
"PersistentBase<Object>& object)")
@@ -6524,9 +6531,9 @@ class V8_EXPORT FunctionTemplate : public Template {
Local<Signature> signature = Local<Signature>(), int length = 0,
ConstructorBehavior behavior = ConstructorBehavior::kAllow,
SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const CFunction* c_function = nullptr, uint8_t instance_type = 0,
- uint8_t allowed_receiver_range_start = 0,
- uint8_t allowed_receiver_range_end = 0);
+ const CFunction* c_function = nullptr, uint16_t instance_type = 0,
+ uint16_t allowed_receiver_instance_type_range_start = 0,
+ uint16_t allowed_receiver_instance_type_range_end = 0);
/** Creates a function template for multiple overloaded fast API calls.*/
static Local<FunctionTemplate> NewWithCFunctionOverloads(
@@ -7246,7 +7253,9 @@ using MessageCallback = void (*)(Local<Message> message, Local<Value> data);
// --- Tracing ---
-using LogEventCallback = void (*)(const char* name, int event);
+enum LogEventStatus : int { kStart = 0, kEnd = 1, kStamp = 2 };
+using LogEventCallback = void (*)(const char* name,
+ int /* LogEventStatus */ status);
/**
* Create new error objects by calling the corresponding error object
@@ -7322,7 +7331,8 @@ using CallCompletedCallback = void (*)(Isolate*);
* fails (e.g. due to stack overflow), the embedder must propagate
* that exception by returning an empty MaybeLocal.
*/
-using HostImportModuleDynamicallyCallback =
+using HostImportModuleDynamicallyCallback V8_DEPRECATED(
+ "Use HostImportModuleDynamicallyWithImportAssertionsCallback instead") =
MaybeLocal<Promise> (*)(Local<Context> context,
Local<ScriptOrModule> referrer,
Local<String> specifier);
@@ -8369,11 +8379,6 @@ class V8_EXPORT Isolate {
*/
int embedder_wrapper_type_index = -1;
int embedder_wrapper_object_index = -1;
-
- V8_DEPRECATED(
- "Setting this has no effect. Embedders should ignore import assertions "
- "that they do not use.")
- std::vector<std::string> supported_import_assertions;
};
/**
@@ -8712,7 +8717,7 @@ class V8_EXPORT Isolate {
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use the version of SetHostImportModuleDynamicallyCallback that takes a "
"HostImportModuleDynamicallyWithImportAssertionsCallback instead")
void SetHostImportModuleDynamicallyCallback(
@@ -8885,10 +8890,6 @@ class V8_EXPORT Isolate {
std::unique_ptr<MeasureMemoryDelegate> delegate,
MeasureMemoryExecution execution = MeasureMemoryExecution::kDefault);
- V8_DEPRECATED("Use the version with a delegate")
- MaybeLocal<Promise> MeasureMemory(Local<Context> context,
- MeasureMemoryMode mode);
-
/**
* Get a call stack sample from the isolate.
* \param state Execution state.
@@ -9566,13 +9567,6 @@ class V8_EXPORT Isolate {
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
- V8_DEPRECATED(
- "Use Isolate::SetModifyCodeGenerationFromStringsCallback with "
- "ModifyCodeGenerationFromStringsCallback2 instead. See "
- "http://crbug.com/1096017 and TC39 Dynamic Code Brand Checks proposal "
- "at https://github.com/tc39/proposal-dynamic-code-brand-checks.")
- void SetModifyCodeGenerationFromStringsCallback(
- ModifyCodeGenerationFromStringsCallback callback);
void SetModifyCodeGenerationFromStringsCallback(
ModifyCodeGenerationFromStringsCallback2 callback);
@@ -9920,30 +9914,6 @@ class V8_EXPORT V8 {
*/
static void ShutdownPlatform();
-#if V8_OS_POSIX
- /**
- * Give the V8 signal handler a chance to handle a fault.
- *
- * This function determines whether a memory access violation can be recovered
- * by V8. If so, it will return true and modify context to return to a code
- * fragment that can recover from the fault. Otherwise, TryHandleSignal will
- * return false.
- *
- * The parameters to this function correspond to those passed to a Linux
- * signal handler.
- *
- * \param signal_number The signal number.
- *
- * \param info A pointer to the siginfo_t structure provided to the signal
- * handler.
- *
- * \param context The third argument passed to the Linux signal handler, which
- * points to a ucontext_t structure.
- */
- V8_DEPRECATED("Use TryHandleWebAssemblyTrapPosix")
- static bool TryHandleSignal(int signal_number, void* info, void* context);
-#endif // V8_OS_POSIX
-
/**
* Activate trap-based bounds checking for WebAssembly.
*
@@ -9971,15 +9941,6 @@ class V8_EXPORT V8 {
*/
static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics);
- /**
- * Notifies V8 that the process is cross-origin-isolated, which enables
- * defining the SharedArrayBuffer function on the global object of Contexts.
- */
- V8_DEPRECATED(
- "Use the command line argument --enable-sharedarraybuffer-per-context "
- "together with SetSharedArrayBufferConstructorEnabledCallback")
- static void SetIsCrossOriginIsolated();
-
private:
V8();
@@ -11590,7 +11551,8 @@ int ScriptOrigin::ScriptId() const { return script_id_; }
Local<Value> ScriptOrigin::SourceMapUrl() const { return source_map_url_; }
ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
- CachedData* data)
+ CachedData* data,
+ ConsumeCodeCacheTask* consume_cache_task)
: source_string(string),
resource_name(origin.ResourceName()),
resource_line_offset(origin.LineOffset()),
@@ -11598,21 +11560,18 @@ ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
resource_options(origin.Options()),
source_map_url(origin.SourceMapUrl()),
host_defined_options(origin.HostDefinedOptions()),
- cached_data(data) {}
-
-ScriptCompiler::Source::Source(Local<String> string,
- CachedData* data)
- : source_string(string), cached_data(data) {}
-
-
-ScriptCompiler::Source::~Source() {
- delete cached_data;
-}
+ cached_data(data),
+ consume_cache_task(consume_cache_task) {}
+ScriptCompiler::Source::Source(Local<String> string, CachedData* data,
+ ConsumeCodeCacheTask* consume_cache_task)
+ : source_string(string),
+ cached_data(data),
+ consume_cache_task(consume_cache_task) {}
const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
const {
- return cached_data;
+ return cached_data.get();
}
const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const {
@@ -11665,10 +11624,8 @@ Local<Value> Object::GetInternalField(int index) {
A obj = *reinterpret_cast<A*>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
- auto instance_type = I::GetInstanceType(obj);
- if (instance_type == I::kJSObjectType ||
- instance_type == I::kJSApiObjectType ||
- instance_type == I::kJSSpecialApiObjectType) {
+ int instance_type = I::GetInstanceType(obj);
+ if (v8::internal::CanHaveInternalField(instance_type)) {
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
A value = I::ReadRawField<A>(obj, offset);
#ifdef V8_COMPRESS_POINTERS
@@ -11694,9 +11651,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
- if (V8_LIKELY(instance_type == I::kJSObjectType ||
- instance_type == I::kJSApiObjectType ||
- instance_type == I::kJSSpecialApiObjectType)) {
+ if (v8::internal::CanHaveInternalField(instance_type)) {
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
#ifdef V8_HEAP_SANDBOX
offset += I::kEmbedderDataSlotRawPayloadOffset;
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index c1bb691f87..b010b65dfd 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -86,51 +86,80 @@ path. Add it with -I<path> to the command line
# define V8_OS_ANDROID 1
# define V8_OS_LINUX 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "android"
+
#elif defined(__APPLE__)
# define V8_OS_BSD 1
# define V8_OS_MACOSX 1
# define V8_OS_POSIX 1
# if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
# define V8_OS_IOS 1
+# define V8_OS_STRING "ios"
+# else
+# define V8_OS_STRING "macos"
# endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+
#elif defined(__CYGWIN__)
# define V8_OS_CYGWIN 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "cygwin"
+
#elif defined(__linux__)
# define V8_OS_LINUX 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "linux"
+
#elif defined(__sun)
# define V8_OS_POSIX 1
# define V8_OS_SOLARIS 1
+# define V8_OS_STRING "sun"
+
#elif defined(STARBOARD)
# define V8_OS_STARBOARD 1
+# define V8_OS_STRING "starboard"
+
#elif defined(_AIX)
-#define V8_OS_POSIX 1
-#define V8_OS_AIX 1
+# define V8_OS_POSIX 1
+# define V8_OS_AIX 1
+# define V8_OS_STRING "aix"
+
#elif defined(__FreeBSD__)
# define V8_OS_BSD 1
# define V8_OS_FREEBSD 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "freebsd"
+
#elif defined(__Fuchsia__)
# define V8_OS_FUCHSIA 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "fuchsia"
+
#elif defined(__DragonFly__)
# define V8_OS_BSD 1
# define V8_OS_DRAGONFLYBSD 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "dragonflybsd"
+
#elif defined(__NetBSD__)
# define V8_OS_BSD 1
# define V8_OS_NETBSD 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "netbsd"
+
#elif defined(__OpenBSD__)
# define V8_OS_BSD 1
# define V8_OS_OPENBSD 1
# define V8_OS_POSIX 1
+# define V8_OS_STRING "openbsd"
+
#elif defined(__QNXNTO__)
# define V8_OS_POSIX 1
# define V8_OS_QNX 1
+# define V8_OS_STRING "qnx"
+
#elif defined(_WIN32)
# define V8_OS_WIN 1
+# define V8_OS_STRING "windows"
#endif
// -----------------------------------------------------------------------------
@@ -195,6 +224,22 @@ path. Add it with -I<path> to the command line
#endif // V8_HAVE_TARGET_OS
+#if defined(V8_TARGET_OS_ANDROID)
+# define V8_TARGET_OS_STRING "android"
+#elif defined(V8_TARGET_OS_FUCHSIA)
+# define V8_TARGET_OS_STRING "fuchsia"
+#elif defined(V8_TARGET_OS_IOS)
+# define V8_TARGET_OS_STRING "ios"
+#elif defined(V8_TARGET_OS_LINUX)
+# define V8_TARGET_OS_STRING "linux"
+#elif defined(V8_TARGET_OS_MACOSX)
+# define V8_TARGET_OS_STRING "macos"
+#elif defined(V8_TARGET_OS_WINDOWS)
+# define V8_TARGET_OS_STRING "windows"
+#else
+# define V8_TARGET_OS_STRING "unknown"
+#endif
+
// -----------------------------------------------------------------------------
// C library detection
//
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index aaeda39f1c..236dc1e847 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -62,7 +62,9 @@
'V8 Linux - verify csa': 'release_x86_verify_csa',
# Linux64.
'V8 Linux64 - builder': 'release_x64',
+ 'V8 Linux64 - builder (goma cache silo)': 'release_x64',
'V8 Linux64 - builder (reclient)': 'release_x64_reclient',
+ 'V8 Linux64 - builder (reclient compare)': 'release_x64_reclient',
'V8 Linux64 - debug builder': 'debug_x64',
'V8 Linux64 - dict tracking - debug - builder': 'debug_x64_dict_tracking_trybot',
'V8 Linux64 - external code space - debug - builder': 'debug_x64_external_code_space',
@@ -104,6 +106,7 @@
# FYI.
'V8 iOS - sim': 'release_x64_ios_simulator',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
+ 'V8 Linux64 - disable runtime call stats': 'release_x64_disable_runtime_call_stats',
'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation',
'V8 Linux64 - pointer compression': 'release_x64_pointer_compression',
'V8 Linux64 - pointer compression without dchecks':
@@ -215,6 +218,7 @@
'release_simulate_arm64_pointer_compression',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot',
+ 'v8_linux64_disable_runtime_call_stats_rel': 'release_x64_disable_runtime_call_stats',
'v8_linux64_external_code_space_dbg_ng': 'debug_x64_external_code_space',
'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom',
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
@@ -254,7 +258,10 @@
'v8_mac_arm64_rel_ng': 'release_arm64',
'v8_mac_arm64_dbg_ng': 'debug_arm64',
'v8_mac_arm64_full_dbg_ng': 'full_debug_arm64',
+ 'v8_mac_arm64_compile_dbg': 'debug_arm64',
'v8_mac_arm64_compile_rel': 'release_arm64',
+ 'v8_mac_arm64_sim_compile_dbg': 'debug_simulate_arm64',
+ 'v8_mac_arm64_sim_compile_rel': 'release_simulate_arm64',
'v8_mac_arm64_sim_rel_ng': 'release_simulate_arm64_trybot',
'v8_mac_arm64_sim_dbg_ng': 'debug_simulate_arm64',
'v8_mac_arm64_sim_nodcheck_rel_ng': 'release_simulate_arm64',
@@ -483,6 +490,8 @@
'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'],
'release_x64_correctness_fuzzer' : [
'release_bot', 'x64', 'v8_correctness_fuzzer'],
+ 'release_x64_disable_runtime_call_stats': [
+ 'release_bot', 'x64', 'v8_disable_runtime_call_stats'],
'release_x64_fuchsia': [
'release_bot', 'x64', 'fuchsia'],
'release_x64_fuchsia_trybot': [
@@ -779,7 +788,7 @@
},
'release': {
- 'gn_args': 'is_debug=false',
+ 'gn_args': 'is_debug=false dcheck_always_on=false',
},
'release_bot': {
@@ -876,6 +885,10 @@
'gn_args': 'v8_control_flow_integrity=true',
},
+ 'v8_disable_runtime_call_stats': {
+ 'gn_args': 'v8_enable_runtime_call_stats=false',
+ },
+
'v8_enable_heap_sandbox': {
'gn_args': 'v8_enable_heap_sandbox=true',
},
diff --git a/deps/v8/infra/playground/OWNERS b/deps/v8/infra/playground/OWNERS
new file mode 100644
index 0000000000..8082f8328b
--- /dev/null
+++ b/deps/v8/infra/playground/OWNERS
@@ -0,0 +1,5 @@
+set noparent
+
+almuthanna@chromium.org
+liviurau@chromium.org
+tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/infra/playground/README.md b/deps/v8/infra/playground/README.md
new file mode 100644
index 0000000000..0e26001058
--- /dev/null
+++ b/deps/v8/infra/playground/README.md
@@ -0,0 +1 @@
+This directory's purpose is test OWNERS enforcement. \ No newline at end of file
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index d40e4ed9e7..f37c66ba90 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -1517,8 +1517,8 @@
'priority': 35,
},
'tests': [
- {'name': 'd8testing', 'shards': 2},
- {'name': 'd8testing', 'variant': 'extra', 'shards': 2},
+ {'name': 'v8testing', 'shards': 2},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
],
},
'V8 Mac - arm64 - sim - debug': {
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 0a5e3b8480..b3fcddf2f4 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -58,6 +58,7 @@ include_rules = [
"+src/trap-handler/handler-inside-posix.h",
"+src/trap-handler/handler-inside-win.h",
"+src/trap-handler/trap-handler.h",
+ "+src/trap-handler/trap-handler-simulator.h",
"+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform",
"-include/libplatform",
diff --git a/deps/v8/src/api/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h
index f6825e5922..786f849be6 100644
--- a/deps/v8/src/api/api-arguments-inl.h
+++ b/deps/v8/src/api/api-arguments-inl.h
@@ -76,7 +76,6 @@ inline JSReceiver FunctionCallbackArguments::holder() {
CALLBACK_INFO, RECEIVER, Debug::k##ACCESSOR_KIND)) { \
return RETURN_VALUE(); \
} \
- VMState<EXTERNAL> state(ISOLATE); \
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(values_);
@@ -85,7 +84,6 @@ inline JSReceiver FunctionCallbackArguments::holder() {
if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects) { \
return RETURN_VALUE(); \
} \
- VMState<EXTERNAL> state(ISOLATE); \
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(values_);
@@ -149,7 +147,6 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo handler) {
Debug::kNotAccessor)) {
return Handle<Object>();
}
- VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(values_, argv_, argc_);
f(info);
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index 9bd266395e..c5c774800b 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -9,6 +9,7 @@
#include "src/api/api.h"
#include "src/execution/interrupts-scope.h"
#include "src/execution/microtask-queue.h"
+#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/foreign-inl.h"
@@ -279,20 +280,32 @@ bool CopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
i::DisallowGarbageCollection no_gc;
i::JSArray obj = *reinterpret_cast<i::JSArray*>(*src);
+ if (obj.IterationHasObservableEffects()) {
+ // The array has a custom iterator.
+ return false;
+ }
i::FixedArrayBase elements = obj.elements();
- if (obj.HasSmiElements()) {
- CopySmiElementsToTypedBuffer(dst, length, i::FixedArray::cast(elements));
- return true;
- } else if (obj.HasDoubleElements()) {
- CopyDoubleElementsToTypedBuffer(dst, length,
- i::FixedDoubleArray::cast(elements));
- return true;
- } else {
- return false;
+ switch (obj.GetElementsKind()) {
+ case i::PACKED_SMI_ELEMENTS:
+ CopySmiElementsToTypedBuffer(dst, length, i::FixedArray::cast(elements));
+ return true;
+ case i::PACKED_DOUBLE_ELEMENTS:
+ CopyDoubleElementsToTypedBuffer(dst, length,
+ i::FixedDoubleArray::cast(elements));
+ return true;
+ default:
+ return false;
}
}
+template <const CTypeInfo* type_info, typename T>
+inline bool V8_EXPORT TryCopyAndConvertArrayToCppBuffer(Local<Array> src,
+ T* dst,
+ uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<type_info, T>(src, dst, max_length);
+}
+
namespace internal {
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index 985f5956a8..c64107f3b8 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -528,12 +528,14 @@ MaybeHandle<JSFunction> InstantiateFunction(
Handle<HeapObject>::cast(parent_prototype));
}
}
- InstanceType function_type =
- (!data->needs_access_check() &&
- data->GetNamedPropertyHandler().IsUndefined(isolate) &&
- data->GetIndexedPropertyHandler().IsUndefined(isolate))
- ? JS_API_OBJECT_TYPE
- : JS_SPECIAL_API_OBJECT_TYPE;
+ InstanceType function_type = JS_SPECIAL_API_OBJECT_TYPE;
+ if (!data->needs_access_check() &&
+ data->GetNamedPropertyHandler().IsUndefined(isolate) &&
+ data->GetIndexedPropertyHandler().IsUndefined(isolate)) {
+ function_type = FLAG_embedder_instance_types && data->HasInstanceType()
+ ? static_cast<InstanceType>(data->InstanceType())
+ : JS_API_OBJECT_TYPE;
+ }
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
isolate, native_context, data, prototype, function_type, maybe_name);
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 84295e5cde..a8af304a53 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -30,10 +30,11 @@
#include "src/builtins/builtins-utils.h"
#include "src/codegen/compiler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/script-details.h"
#include "src/common/assert-scope.h"
#include "src/common/external-pointer.h"
#include "src/common/globals.h"
-#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/date/date.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -57,7 +58,7 @@
#include "src/init/v8.h"
#include "src/json/json-parser.h"
#include "src/json/json-stringifier.h"
-#include "src/logging/counters.h"
+#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/logging/tracing-flags.h"
@@ -130,9 +131,11 @@
#endif
#if V8_OS_WIN
-#include <versionhelpers.h>
#include <windows.h>
+// This has to come after windows.h.
+#include <versionhelpers.h>
+
#include "include/v8-wasm-trap-handler-win.h"
#include "src/trap-handler/handler-inside-win.h"
#if defined(V8_OS_WIN64)
@@ -1223,8 +1226,9 @@ static Local<FunctionTemplate> FunctionTemplateNew(
v8::Local<Private> cached_property_name = v8::Local<Private>(),
SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
const MemorySpan<const CFunction>& c_function_overloads = {},
- uint8_t instance_type = 0, uint8_t allowed_receiver_range_start = 0,
- uint8_t allowed_receiver_range_end = 0) {
+ uint8_t instance_type = 0,
+ uint8_t allowed_receiver_instance_type_range_start = 0,
+ uint8_t allowed_receiver_instance_type_range_end = 0) {
i::Handle<i::Struct> struct_obj = isolate->factory()->NewStruct(
i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld);
i::Handle<i::FunctionTemplateInfo> obj =
@@ -1247,8 +1251,10 @@ static Local<FunctionTemplate> FunctionTemplateNew(
: *Utils::OpenHandle(*cached_property_name));
if (behavior == ConstructorBehavior::kThrow) raw.set_remove_prototype(true);
raw.SetInstanceType(instance_type);
- raw.set_allowed_receiver_range_start(allowed_receiver_range_start);
- raw.set_allowed_receiver_range_end(allowed_receiver_range_end);
+ raw.set_allowed_receiver_instance_type_range_start(
+ allowed_receiver_instance_type_range_start);
+ raw.set_allowed_receiver_instance_type_range_end(
+ allowed_receiver_instance_type_range_end);
}
if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type,
@@ -1261,8 +1267,8 @@ Local<FunctionTemplate> FunctionTemplate::New(
Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
v8::Local<Signature> signature, int length, ConstructorBehavior behavior,
SideEffectType side_effect_type, const CFunction* c_function,
- uint8_t instance_type, uint8_t allowed_receiver_range_start,
- uint8_t allowed_receiver_range_end) {
+ uint16_t instance_type, uint16_t allowed_receiver_instance_type_range_start,
+ uint16_t allowed_receiver_instance_type_range_end) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
@@ -1273,7 +1279,8 @@ Local<FunctionTemplate> FunctionTemplate::New(
Local<Private>(), side_effect_type,
c_function ? MemorySpan<const CFunction>{c_function, 1}
: MemorySpan<const CFunction>{},
- instance_type, allowed_receiver_range_start, allowed_receiver_range_end);
+ instance_type, allowed_receiver_instance_type_range_start,
+ allowed_receiver_instance_type_range_end);
}
Local<FunctionTemplate> FunctionTemplate::NewWithCFunctionOverloads(
@@ -1874,10 +1881,6 @@ bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; }
void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); }
-ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
- Encoding encoding)
- : StreamedSource(std::unique_ptr<ExternalSourceStream>(stream), encoding) {}
-
ScriptCompiler::StreamedSource::StreamedSource(
std::unique_ptr<ExternalSourceStream> stream, Encoding encoding)
: impl_(new i::ScriptStreamingData(std::move(stream), encoding)) {}
@@ -1964,10 +1967,11 @@ MaybeLocal<Value> Script::Run(Local<Context> context) {
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
ENTER_V8(isolate, context, Script, Run, MaybeLocal<Value>(),
InternalEscapableScope);
- i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
+ i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ i::NestedTimedHistogramScope execute_timer(
+ isolate->counters()->execute_precise());
i::AggregatingHistogramTimerScope histogram_timer(
isolate->counters()->compile_lazy());
- i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
// TODO(crbug.com/1193459): remove once ablation study is completed
@@ -2102,14 +2106,15 @@ Local<FixedArray> ModuleRequest::GetImportAssertions() const {
Module::Status Module::GetStatus() const {
i::Handle<i::Module> self = Utils::OpenHandle(this);
switch (self->status()) {
- case i::Module::kUninstantiated:
- case i::Module::kPreInstantiating:
+ case i::Module::kUnlinked:
+ case i::Module::kPreLinking:
return kUninstantiated;
- case i::Module::kInstantiating:
+ case i::Module::kLinking:
return kInstantiating;
- case i::Module::kInstantiated:
+ case i::Module::kLinked:
return kInstantiated;
case i::Module::kEvaluating:
+ case i::Module::kEvaluatingAsync:
return kEvaluating;
case i::Module::kEvaluated:
return kEvaluated;
@@ -2290,13 +2295,14 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
ENTER_V8(isolate, context, Module, Evaluate, MaybeLocal<Value>(),
InternalEscapableScope);
- i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
- i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ i::NestedTimedHistogramScope execute_timer(
+ isolate->counters()->execute_precise());
+ i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::Handle<i::Module> self = Utils::OpenHandle(this);
- Utils::ApiCheck(self->status() >= i::Module::kInstantiated,
- "Module::Evaluate", "Expected instantiated module");
+ Utils::ApiCheck(self->status() >= i::Module::kLinked, "Module::Evaluate",
+ "Expected instantiated module");
Local<Value> result;
has_pending_exception = !ToLocal(i::Module::Evaluate(isolate, self), &result);
@@ -2345,31 +2351,17 @@ Maybe<bool> Module::SetSyntheticModuleExport(Isolate* isolate,
return Just(true);
}
-void Module::SetSyntheticModuleExport(Local<String> export_name,
- Local<v8::Value> export_value) {
- i::Handle<i::String> i_export_name = Utils::OpenHandle(*export_name);
- i::Handle<i::Object> i_export_value = Utils::OpenHandle(*export_value);
- i::Handle<i::Module> self = Utils::OpenHandle(this);
- ASSERT_NO_SCRIPT_NO_EXCEPTION(self->GetIsolate());
- Utils::ApiCheck(self->IsSyntheticModule(),
- "v8::Module::SetSyntheticModuleExport",
- "v8::Module::SetSyntheticModuleExport must only be called on "
- "a SyntheticModule");
- i::SyntheticModule::SetExportStrict(self->GetIsolate(),
- i::Handle<i::SyntheticModule>::cast(self),
- i_export_name, i_export_value);
-}
-
namespace {
-i::Compiler::ScriptDetails GetScriptDetails(
- i::Isolate* isolate, Local<Value> resource_name, int resource_line_offset,
- int resource_column_offset, Local<Value> source_map_url,
- Local<PrimitiveArray> host_defined_options) {
- i::Compiler::ScriptDetails script_details;
- if (!resource_name.IsEmpty()) {
- script_details.name_obj = Utils::OpenHandle(*(resource_name));
- }
+i::ScriptDetails GetScriptDetails(i::Isolate* isolate,
+ Local<Value> resource_name,
+ int resource_line_offset,
+ int resource_column_offset,
+ Local<Value> source_map_url,
+ Local<PrimitiveArray> host_defined_options,
+ ScriptOriginOptions origin_options) {
+ i::ScriptDetails script_details(Utils::OpenHandle(*(resource_name), true),
+ origin_options);
script_details.line_offset = resource_line_offset;
script_details.column_offset = resource_column_offset;
script_details.host_defined_options =
@@ -2393,29 +2385,44 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
CompileUnbound, MaybeLocal<UnboundScript>(),
InternalEscapableScope);
- i::ScriptData* script_data = nullptr;
+ i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
+
+ std::unique_ptr<i::AlignedCachedData> cached_data;
if (options == kConsumeCodeCache) {
- DCHECK(source->cached_data);
- // ScriptData takes care of pointer-aligning the data.
- script_data = new i::ScriptData(source->cached_data->data,
- source->cached_data->length);
+ if (source->consume_cache_task) {
+ // If there's a cache consume task, finish it
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
+ source->consume_cache_task->impl_->Finish(isolate, str,
+ source->resource_options);
+ i::Handle<i::SharedFunctionInfo> result;
+ if (maybe_function_info.ToHandle(&result)) {
+ RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
+ }
+ // If the above failed, then we must have rejected the cache. Continue
+ // with normal compilation, disabling the code cache consumption.
+ source->cached_data->rejected = true;
+ options = kNoCompileOptions;
+ } else {
+ DCHECK(source->cached_data);
+ // AlignedCachedData takes care of pointer-aligning the data.
+ cached_data.reset(new i::AlignedCachedData(source->cached_data->data,
+ source->cached_data->length));
+ }
}
- i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
i::Handle<i::SharedFunctionInfo> result;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
- i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ i::ScriptDetails script_details = GetScriptDetails(
isolate, source->resource_name, source->resource_line_offset,
source->resource_column_offset, source->source_map_url,
- source->host_defined_options);
+ source->host_defined_options, source->resource_options);
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, script_details, source->resource_options, nullptr,
- script_data, options, no_cache_reason, i::NOT_NATIVES_CODE);
+ isolate, str, script_details, nullptr, cached_data.get(), options,
+ no_cache_reason, i::NOT_NATIVES_CODE);
if (options == kConsumeCodeCache) {
- source->cached_data->rejected = script_data->rejected();
+ source->cached_data->rejected = cached_data->rejected();
}
- delete script_data;
has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
@@ -2532,30 +2539,28 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
extension);
}
- i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ i::ScriptDetails script_details = GetScriptDetails(
isolate, source->resource_name, source->resource_line_offset,
source->resource_column_offset, source->source_map_url,
- source->host_defined_options);
+ source->host_defined_options, source->resource_options);
- i::ScriptData* script_data = nullptr;
+ std::unique_ptr<i::AlignedCachedData> cached_data;
if (options == kConsumeCodeCache) {
DCHECK(source->cached_data);
// ScriptData takes care of pointer-aligning the data.
- script_data = new i::ScriptData(source->cached_data->data,
- source->cached_data->length);
+ cached_data.reset(new i::AlignedCachedData(source->cached_data->data,
+ source->cached_data->length));
}
i::Handle<i::JSFunction> scoped_result;
has_pending_exception =
!i::Compiler::GetWrappedFunction(
Utils::OpenHandle(*source->source_string), arguments_list, context,
- script_details, source->resource_options, script_data, options,
- no_cache_reason)
+ script_details, cached_data.get(), options, no_cache_reason)
.ToHandle(&scoped_result);
if (options == kConsumeCodeCache) {
- source->cached_data->rejected = script_data->rejected();
+ source->cached_data->rejected = cached_data->rejected();
}
- delete script_data;
RETURN_ON_FAILED_EXECUTION(Function);
result = handle_scope.Escape(Utils::CallableToLocal(scoped_result));
}
@@ -2574,14 +2579,6 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
-ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
- Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
- // We don't support other compile options on streaming background compiles.
- // TODO(rmcilroy): remove CompileOptions from the API.
- CHECK(options == ScriptCompiler::kNoCompileOptions);
- return StartStreaming(v8_isolate, source);
-}
-
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming(
Isolate* v8_isolate, StreamedSource* source, v8::ScriptType type) {
if (!i::FLAG_script_streaming) return nullptr;
@@ -2594,18 +2591,36 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming(
return new ScriptCompiler::ScriptStreamingTask(data);
}
+ScriptCompiler::ConsumeCodeCacheTask::ConsumeCodeCacheTask(
+ std::unique_ptr<i::BackgroundDeserializeTask> impl)
+ : impl_(std::move(impl)) {}
+
+ScriptCompiler::ConsumeCodeCacheTask::~ConsumeCodeCacheTask() = default;
+
+void ScriptCompiler::ConsumeCodeCacheTask::Run() { impl_->Run(); }
+
+ScriptCompiler::ConsumeCodeCacheTask* ScriptCompiler::StartConsumingCodeCache(
+ Isolate* v8_isolate, std::unique_ptr<CachedData> cached_data) {
+ if (!i::FLAG_concurrent_cache_deserialization) return nullptr;
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
+ return new ScriptCompiler::ConsumeCodeCacheTask(
+ std::make_unique<i::BackgroundDeserializeTask>(isolate,
+ std::move(cached_data)));
+}
+
namespace {
i::MaybeHandle<i::SharedFunctionInfo> CompileStreamedSource(
i::Isolate* isolate, ScriptCompiler::StreamedSource* v8_source,
Local<String> full_source_string, const ScriptOrigin& origin) {
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
- i::Compiler::ScriptDetails script_details =
+ i::ScriptDetails script_details =
GetScriptDetails(isolate, origin.ResourceName(), origin.LineOffset(),
origin.ColumnOffset(), origin.SourceMapUrl(),
- origin.HostDefinedOptions());
+ origin.HostDefinedOptions(), origin.Options());
i::ScriptStreamingData* data = v8_source->impl();
return i::Compiler::GetSharedFunctionInfoForStreamedScript(
- isolate, str, script_details, origin.Options(), data);
+ isolate, str, script_details, data);
}
} // namespace
@@ -3676,6 +3691,13 @@ bool i::ShouldThrowOnError(i::Isolate* isolate) {
i::ShouldThrow::kThrowOnError;
}
+bool i::CanHaveInternalField(int instance_type) {
+ return instance_type == i::Internals::kJSObjectType ||
+ instance_type == i::Internals::kJSSpecialApiObjectType ||
+ v8::internal::InstanceTypeChecker::IsJSApiObject(
+ static_cast<v8::internal::InstanceType>(instance_type));
+}
+
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
Utils::ApiCheck(isolate != nullptr && !isolate->IsDead(),
@@ -4947,6 +4969,8 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
ENTER_V8(isolate, context, Object, CallAsFunction, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ i::NestedTimedHistogramScope execute_timer(
+ isolate->counters()->execute_precise());
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
@@ -4965,6 +4989,8 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
ENTER_V8(isolate, context, Object, CallAsConstructor, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ i::NestedTimedHistogramScope execute_timer(
+ isolate->counters()->execute_precise());
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -5002,6 +5028,8 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
ENTER_V8(isolate, context, Function, NewInstance, MaybeLocal<Object>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ i::NestedTimedHistogramScope execute_timer(
+ isolate->counters()->execute_precise());
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
bool should_set_has_no_side_effect =
@@ -5051,6 +5079,8 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
ENTER_V8(isolate, context, Function, Call, MaybeLocal<Value>(),
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ i::NestedTimedHistogramScope execute_timer(
+ isolate->counters()->execute_precise());
auto self = Utils::OpenHandle(this);
Utils::ApiCheck(!self.is_null(), "v8::Function::Call",
"Function to be called is a null pointer");
@@ -5859,29 +5889,17 @@ bool v8::V8::Initialize(const int build_config) {
#if V8_OS_LINUX || V8_OS_MACOSX
bool TryHandleWebAssemblyTrapPosix(int sig_code, siginfo_t* info,
void* context) {
- // When the target code runs on the V8 arm simulator, the trap handler does
- // not behave as expected: the instruction pointer points inside the simulator
- // code rather than the wasm code, so the trap handler cannot find the landing
- // pad and lets the process crash. Therefore, only enable trap handlers if
- // the host and target arch are the same.
-#if V8_ENABLE_WEBASSEMBLY && \
- ((V8_TARGET_ARCH_X64 && !V8_OS_ANDROID) || \
- (V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64 && V8_OS_MACOSX))
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
return i::trap_handler::TryHandleSignal(sig_code, info, context);
#else
return false;
#endif
}
-
-bool V8::TryHandleSignal(int signum, void* info, void* context) {
- return TryHandleWebAssemblyTrapPosix(
- signum, reinterpret_cast<siginfo_t*>(info), context);
-}
#endif
#if V8_OS_WIN
bool TryHandleWebAssemblyTrapWindows(EXCEPTION_POINTERS* exception) {
-#if V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_X64
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
return i::trap_handler::TryHandleWasmTrap(exception);
#else
return false;
@@ -5984,8 +6002,6 @@ void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
-void V8::SetIsCrossOriginIsolated() {}
-
template <typename ObjectType>
struct InvokeBootstrapper;
@@ -8684,11 +8700,6 @@ bool Isolate::GetHeapCodeAndMetadataStatistics(
return true;
}
-v8::MaybeLocal<v8::Promise> Isolate::MeasureMemory(
- v8::Local<v8::Context> context, MeasureMemoryMode mode) {
- return v8::MaybeLocal<v8::Promise>();
-}
-
bool Isolate::MeasureMemory(std::unique_ptr<MeasureMemoryDelegate> delegate,
MeasureMemoryExecution execution) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8888,7 +8899,7 @@ bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
void Isolate::LowMemoryNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
{
- i::HistogramTimerScope idle_notification_scope(
+ i::NestedTimedHistogramScope idle_notification_scope(
isolate->counters()->gc_low_memory_notification());
TRACE_EVENT0("v8", "V8.GCLowMemoryNotification");
isolate->heap()->CollectAllAvailableGarbage(
@@ -9631,7 +9642,7 @@ CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
void CpuProfiler::UseDetailedSourcePositionsForProfiling(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)
- ->set_detailed_source_positions_for_profiling(true);
+ ->SetDetailedSourcePositionsForProfiling(true);
}
uintptr_t CodeEvent::GetCodeStartAddress() {
@@ -10034,6 +10045,10 @@ const CTypeInfo& CFunctionInfo::ArgumentInfo(unsigned int index) const {
return arg_info_[index];
}
+void FastApiTypedArrayBase::ValidateIndex(size_t index) const {
+ DCHECK_LT(index, length_);
+}
+
RegisterState::RegisterState()
: pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
RegisterState::~RegisterState() = default;
@@ -10234,7 +10249,6 @@ void InvokeAccessorGetterCallback(
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorGetterCallback);
Address getter_address = reinterpret_cast<Address>(getter);
- VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, getter_address);
getter(property, info);
}
@@ -10244,7 +10258,6 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback);
Address callback_address = reinterpret_cast<Address>(callback);
- VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, callback_address);
callback(info);
}
@@ -10322,6 +10335,7 @@ bool ConvertDouble(double d) {
#undef CALLBACK_SETTER
} // namespace internal
+
} // namespace v8
#undef TRACE_BS
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 30fb983d28..888157dc61 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -316,6 +316,43 @@ inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
}
}
+inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src,
+ size_t bytes) {
+ // Use Relaxed_Memcpy if copying forwards is safe. This is the case if there
+ // is no overlap, or {dst} lies before {src}.
+ // This single check checks for both:
+ if (reinterpret_cast<uintptr_t>(dst) - reinterpret_cast<uintptr_t>(src) >=
+ bytes) {
+ Relaxed_Memcpy(dst, src, bytes);
+ return;
+ }
+
+ // Otherwise copy backwards.
+ dst += bytes;
+ src += bytes;
+ constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
+ while (bytes > 0 &&
+ !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
+ Relaxed_Store(--dst, Relaxed_Load(--src));
+ --bytes;
+ }
+ if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
+ IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
+ while (bytes >= kAtomicWordSize) {
+ dst -= kAtomicWordSize;
+ src -= kAtomicWordSize;
+ bytes -= kAtomicWordSize;
+ Relaxed_Store(
+ reinterpret_cast<volatile AtomicWord*>(dst),
+ Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
+ }
+ }
+ while (bytes > 0) {
+ Relaxed_Store(--dst, Relaxed_Load(--src));
+ --bytes;
+ }
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index f790dfaab4..5c31addd39 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -194,7 +194,9 @@ inline size_t RoundUpToPowerOfTwo(size_t value) {
if (sizeof(size_t) == sizeof(uint64_t)) {
return RoundUpToPowerOfTwo64(value);
} else {
- return RoundUpToPowerOfTwo32(value);
+ // Without windows.h included this line triggers a truncation warning on
+ // 64-bit builds. Presumably windows.h disables the relevant warning.
+ return RoundUpToPowerOfTwo32(static_cast<uint32_t>(value));
}
}
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
index e6c1a61bcb..fa7b10324d 100644
--- a/deps/v8/src/base/bounded-page-allocator.cc
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -14,9 +14,9 @@ BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
commit_page_size_(page_allocator->CommitPageSize()),
page_allocator_(page_allocator),
region_allocator_(start, size, allocate_page_size_) {
- CHECK_NOT_NULL(page_allocator);
- CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
- CHECK(IsAligned(allocate_page_size_, commit_page_size_));
+ DCHECK_NOT_NULL(page_allocator);
+ DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
+ DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
}
BoundedPageAllocator::Address BoundedPageAllocator::begin() const {
@@ -29,11 +29,11 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
size_t alignment,
PageAllocator::Permission access) {
MutexGuard guard(&mutex_);
- CHECK(IsAligned(alignment, region_allocator_.page_size()));
+ DCHECK(IsAligned(alignment, region_allocator_.page_size()));
// Region allocator does not support alignments bigger than it's own
// allocation alignment.
- CHECK_LE(alignment, allocate_page_size_);
+ DCHECK_LE(alignment, allocate_page_size_);
// TODO(ishell): Consider using randomized version here.
Address address = region_allocator_.AllocateRegion(size);
@@ -47,13 +47,18 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
PageAllocator::Permission access) {
- CHECK(IsAligned(address, allocate_page_size_));
- CHECK(IsAligned(size, allocate_page_size_));
- CHECK(region_allocator_.contains(address, size));
+ DCHECK(IsAligned(address, allocate_page_size_));
+ DCHECK(IsAligned(size, allocate_page_size_));
- if (!region_allocator_.AllocateRegionAt(address, size)) {
- return false;
+ {
+ MutexGuard guard(&mutex_);
+ DCHECK(region_allocator_.contains(address, size));
+
+ if (!region_allocator_.AllocateRegionAt(address, size)) {
+ return false;
+ }
}
+
CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
access));
return true;
@@ -62,16 +67,20 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr,
size_t size) {
Address address = reinterpret_cast<Address>(ptr);
- CHECK(IsAligned(address, allocate_page_size_));
- CHECK(IsAligned(size, commit_page_size_));
- CHECK(region_allocator_.contains(address, size));
-
- // Region allocator requires page size rather than commit size so just over-
- // allocate there since any extra space couldn't be used anyway.
- size_t region_size = RoundUp(size, allocate_page_size_);
- if (!region_allocator_.AllocateRegionAt(
- address, region_size, RegionAllocator::RegionState::kExcluded)) {
- return false;
+ DCHECK(IsAligned(address, allocate_page_size_));
+ DCHECK(IsAligned(size, commit_page_size_));
+
+ {
+ MutexGuard guard(&mutex_);
+ DCHECK(region_allocator_.contains(address, size));
+
+ // Region allocator requires page size rather than commit size so just over-
+ // allocate there since any extra space couldn't be used anyway.
+ size_t region_size = RoundUp(size, allocate_page_size_);
+ if (!region_allocator_.AllocateRegionAt(
+ address, region_size, RegionAllocator::RegionState::kExcluded)) {
+ return false;
+ }
}
CHECK(page_allocator_->SetPermissions(ptr, size,
@@ -93,7 +102,7 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
size_t new_size) {
Address address = reinterpret_cast<Address>(raw_address);
- CHECK(IsAligned(address, allocate_page_size_));
+ DCHECK(IsAligned(address, allocate_page_size_));
DCHECK_LT(new_size, size);
DCHECK(IsAligned(size - new_size, commit_page_size_));
@@ -107,7 +116,7 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
// There must be an allocated region at given |address| of a size not
// smaller than |size|.
MutexGuard guard(&mutex_);
- CHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
+ DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
}
#endif
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 260747201a..d7a0c9f3cf 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -211,6 +211,13 @@
#error Unknown target architecture endianness
#endif
+// pthread_jit_write_protect is only available on arm64 Mac.
+#if defined(V8_OS_MACOSX) && !defined(V8_OS_IOS) && defined(V8_HOST_ARCH_ARM64)
+#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 1
+#else
+#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 0
+#endif
+
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK true
#else
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index 2defe61928..1438c88337 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -45,7 +45,7 @@ void* PageAllocator::GetRandomMmapAddr() {
void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment,
PageAllocator::Permission access) {
-#if !(V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT))
+#if !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
// kNoAccessWillJitLater is only used on Apple Silicon. Map it to regular
// kNoAccess on other platforms, so code doesn't have to handle both enum
// values.
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 04ea29181b..5ab66d39a4 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -9,6 +9,10 @@
#include "src/base/platform/time.h"
+#if V8_OS_WIN
+#include <windows.h>
+#endif
+
namespace v8 {
namespace base {
@@ -119,22 +123,25 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
#elif V8_OS_WIN
ConditionVariable::ConditionVariable() {
- InitializeConditionVariable(&native_handle_);
+ InitializeConditionVariable(V8ToWindowsType(&native_handle_));
}
ConditionVariable::~ConditionVariable() {}
-void ConditionVariable::NotifyOne() { WakeConditionVariable(&native_handle_); }
+void ConditionVariable::NotifyOne() {
+ WakeConditionVariable(V8ToWindowsType(&native_handle_));
+}
void ConditionVariable::NotifyAll() {
- WakeAllConditionVariable(&native_handle_);
+ WakeAllConditionVariable(V8ToWindowsType(&native_handle_));
}
void ConditionVariable::Wait(Mutex* mutex) {
mutex->AssertHeldAndUnmark();
- SleepConditionVariableSRW(&native_handle_, &mutex->native_handle(), INFINITE,
+ SleepConditionVariableSRW(V8ToWindowsType(&native_handle_),
+ V8ToWindowsType(&mutex->native_handle()), INFINITE,
0);
mutex->AssertUnheldAndMark();
}
@@ -144,7 +151,8 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
int64_t msec = rel_time.InMilliseconds();
mutex->AssertHeldAndUnmark();
BOOL result = SleepConditionVariableSRW(
- &native_handle_, &mutex->native_handle(), static_cast<DWORD>(msec), 0);
+ V8ToWindowsType(&native_handle_),
+ V8ToWindowsType(&mutex->native_handle()), static_cast<DWORD>(msec), 0);
#ifdef DEBUG
if (!result) {
// On failure, we only expect the CV to timeout. Any other error value means
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 79e653a32a..3ca6ba8d1b 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -69,7 +69,7 @@ class V8_BASE_EXPORT ConditionVariable final {
#if V8_OS_POSIX
using NativeHandle = pthread_cond_t;
#elif V8_OS_WIN
- using NativeHandle = CONDITION_VARIABLE;
+ using NativeHandle = V8_CONDITION_VARIABLE;
#elif V8_OS_STARBOARD
using NativeHandle = SbConditionVariable;
#endif
diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h
index 3406831cbe..2947c31237 100644
--- a/deps/v8/src/base/platform/elapsed-timer.h
+++ b/deps/v8/src/base/platform/elapsed-timer.h
@@ -13,16 +13,17 @@ namespace base {
class ElapsedTimer final {
public:
-#ifdef DEBUG
- ElapsedTimer() : started_(false) {}
-#endif
+ ElapsedTimer() : start_ticks_() {}
// Starts this timer. Once started a timer can be checked with
// |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
// This method must not be called on an already started timer.
- void Start() {
+ void Start() { Start(Now()); }
+
+ void Start(TimeTicks now) {
+ DCHECK(!now.IsNull());
DCHECK(!IsStarted());
- start_ticks_ = Now();
+ set_start_ticks(now);
#ifdef DEBUG
started_ = true;
#endif
@@ -33,7 +34,7 @@ class ElapsedTimer final {
// started before.
void Stop() {
DCHECK(IsStarted());
- start_ticks_ = TimeTicks();
+ set_start_ticks(TimeTicks());
#ifdef DEBUG
started_ = false;
#endif
@@ -42,31 +43,65 @@ class ElapsedTimer final {
// Returns |true| if this timer was started previously.
bool IsStarted() const {
- DCHECK(started_ || start_ticks_.IsNull());
- DCHECK(!started_ || !start_ticks_.IsNull());
+ DCHECK(!paused_);
+ DCHECK_NE(started_, start_ticks_.IsNull());
return !start_ticks_.IsNull();
}
+#if DEBUG
+ bool IsPaused() const { return paused_; }
+#endif
+
// Restarts the timer and returns the time elapsed since the previous start.
// This method is equivalent to obtaining the elapsed time with |Elapsed()|
// and then starting the timer again, but does so in one single operation,
// avoiding the need to obtain the clock value twice. It may only be called
// on a previously started timer.
- TimeDelta Restart() {
+ TimeDelta Restart() { return Restart(Now()); }
+
+ TimeDelta Restart(TimeTicks now) {
+ DCHECK(!now.IsNull());
DCHECK(IsStarted());
- TimeTicks ticks = Now();
- TimeDelta elapsed = ticks - start_ticks_;
+ TimeDelta elapsed = now - start_ticks_;
DCHECK_GE(elapsed.InMicroseconds(), 0);
- start_ticks_ = ticks;
+ set_start_ticks(now);
DCHECK(IsStarted());
return elapsed;
}
+ void Pause() { Pause(Now()); }
+
+ void Pause(TimeTicks now) {
+ TimeDelta elapsed = Elapsed(now);
+ DCHECK(IsStarted());
+#ifdef DEBUG
+ paused_ = true;
+#endif
+ set_paused_elapsed(elapsed);
+ }
+
+ void Resume() { Resume(Now()); }
+
+ void Resume(TimeTicks now) {
+ DCHECK(!now.IsNull());
+ DCHECK(started_);
+ DCHECK(paused_);
+ TimeDelta elapsed = paused_elapsed();
+#ifdef DEBUG
+ paused_ = false;
+#endif
+ set_start_ticks(now - elapsed);
+ DCHECK(IsStarted());
+ }
+
// Returns the time elapsed since the previous start. This method may only
// be called on a previously started timer.
- TimeDelta Elapsed() const {
+ TimeDelta Elapsed() const { return Elapsed(Now()); }
+
+ TimeDelta Elapsed(TimeTicks now) const {
+ DCHECK(!now.IsNull());
DCHECK(IsStarted());
- TimeDelta elapsed = Now() - start_ticks_;
+ TimeDelta elapsed = now - start_ticks();
DCHECK_GE(elapsed.InMicroseconds(), 0);
return elapsed;
}
@@ -86,9 +121,35 @@ class ElapsedTimer final {
return now;
}
- TimeTicks start_ticks_;
+ TimeDelta paused_elapsed() {
+ // Only used started_ since paused_elapsed_ can be 0.
+ DCHECK(paused_);
+ DCHECK(started_);
+ return paused_elapsed_;
+ }
+
+ void set_paused_elapsed(TimeDelta delta) {
+ DCHECK(paused_);
+ DCHECK(started_);
+ paused_elapsed_ = delta;
+ }
+
+ TimeTicks start_ticks() const {
+ DCHECK(!paused_);
+ return start_ticks_;
+ }
+ void set_start_ticks(TimeTicks start_ticks) {
+ DCHECK(!paused_);
+ start_ticks_ = start_ticks;
+ }
+
+ union {
+ TimeTicks start_ticks_;
+ TimeDelta paused_elapsed_;
+ };
#ifdef DEBUG
- bool started_;
+ bool started_ = false;
+ bool paused_ = false;
#endif
};
diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc
index 5a347246a9..7bf60996ee 100644
--- a/deps/v8/src/base/platform/mutex.cc
+++ b/deps/v8/src/base/platform/mutex.cc
@@ -10,6 +10,10 @@
#include <unordered_set>
#endif // DEBUG
+#if V8_OS_WIN
+#include <windows.h>
+#endif
+
namespace v8 {
namespace base {
@@ -218,6 +222,37 @@ bool RecursiveMutex::TryLock() {
return true;
}
+#if V8_OS_MACOSX
+
+SharedMutex::SharedMutex() { InitializeNativeHandle(&native_handle_); }
+
+SharedMutex::~SharedMutex() { DestroyNativeHandle(&native_handle_); }
+
+void SharedMutex::LockShared() { LockExclusive(); }
+
+void SharedMutex::LockExclusive() {
+ DCHECK(TryHoldSharedMutex(this));
+ LockNativeHandle(&native_handle_);
+}
+
+void SharedMutex::UnlockShared() { UnlockExclusive(); }
+
+void SharedMutex::UnlockExclusive() {
+ DCHECK(TryReleaseSharedMutex(this));
+ UnlockNativeHandle(&native_handle_);
+}
+
+bool SharedMutex::TryLockShared() { return TryLockExclusive(); }
+
+bool SharedMutex::TryLockExclusive() {
+ DCHECK(SharedMutexNotHeld(this));
+ if (!TryLockNativeHandle(&native_handle_)) return false;
+ DCHECK(TryHoldSharedMutex(this));
+ return true;
+}
+
+#else // !V8_OS_MACOSX
+
SharedMutex::SharedMutex() { pthread_rwlock_init(&native_handle_, nullptr); }
SharedMutex::~SharedMutex() {
@@ -266,6 +301,8 @@ bool SharedMutex::TryLockExclusive() {
return result;
}
+#endif // !V8_OS_MACOSX
+
#elif V8_OS_WIN
Mutex::Mutex() : native_handle_(SRWLOCK_INIT) {
@@ -281,19 +318,19 @@ Mutex::~Mutex() {
void Mutex::Lock() {
- AcquireSRWLockExclusive(&native_handle_);
+ AcquireSRWLockExclusive(V8ToWindowsType(&native_handle_));
AssertUnheldAndMark();
}
void Mutex::Unlock() {
AssertHeldAndUnmark();
- ReleaseSRWLockExclusive(&native_handle_);
+ ReleaseSRWLockExclusive(V8ToWindowsType(&native_handle_));
}
bool Mutex::TryLock() {
- if (!TryAcquireSRWLockExclusive(&native_handle_)) {
+ if (!TryAcquireSRWLockExclusive(V8ToWindowsType(&native_handle_))) {
return false;
}
AssertUnheldAndMark();
@@ -302,7 +339,7 @@ bool Mutex::TryLock() {
RecursiveMutex::RecursiveMutex() {
- InitializeCriticalSection(&native_handle_);
+ InitializeCriticalSection(V8ToWindowsType(&native_handle_));
#ifdef DEBUG
level_ = 0;
#endif
@@ -310,13 +347,13 @@ RecursiveMutex::RecursiveMutex() {
RecursiveMutex::~RecursiveMutex() {
- DeleteCriticalSection(&native_handle_);
+ DeleteCriticalSection(V8ToWindowsType(&native_handle_));
DCHECK_EQ(0, level_);
}
void RecursiveMutex::Lock() {
- EnterCriticalSection(&native_handle_);
+ EnterCriticalSection(V8ToWindowsType(&native_handle_));
#ifdef DEBUG
DCHECK_LE(0, level_);
level_++;
@@ -329,12 +366,12 @@ void RecursiveMutex::Unlock() {
DCHECK_LT(0, level_);
level_--;
#endif
- LeaveCriticalSection(&native_handle_);
+ LeaveCriticalSection(V8ToWindowsType(&native_handle_));
}
bool RecursiveMutex::TryLock() {
- if (!TryEnterCriticalSection(&native_handle_)) {
+ if (!TryEnterCriticalSection(V8ToWindowsType(&native_handle_))) {
return false;
}
#ifdef DEBUG
@@ -350,34 +387,34 @@ SharedMutex::~SharedMutex() {}
void SharedMutex::LockShared() {
DCHECK(TryHoldSharedMutex(this));
- AcquireSRWLockShared(&native_handle_);
+ AcquireSRWLockShared(V8ToWindowsType(&native_handle_));
}
void SharedMutex::LockExclusive() {
DCHECK(TryHoldSharedMutex(this));
- AcquireSRWLockExclusive(&native_handle_);
+ AcquireSRWLockExclusive(V8ToWindowsType(&native_handle_));
}
void SharedMutex::UnlockShared() {
DCHECK(TryReleaseSharedMutex(this));
- ReleaseSRWLockShared(&native_handle_);
+ ReleaseSRWLockShared(V8ToWindowsType(&native_handle_));
}
void SharedMutex::UnlockExclusive() {
DCHECK(TryReleaseSharedMutex(this));
- ReleaseSRWLockExclusive(&native_handle_);
+ ReleaseSRWLockExclusive(V8ToWindowsType(&native_handle_));
}
bool SharedMutex::TryLockShared() {
DCHECK(SharedMutexNotHeld(this));
- bool result = TryAcquireSRWLockShared(&native_handle_);
+ bool result = TryAcquireSRWLockShared(V8ToWindowsType(&native_handle_));
if (result) DCHECK(TryHoldSharedMutex(this));
return result;
}
bool SharedMutex::TryLockExclusive() {
DCHECK(SharedMutexNotHeld(this));
- bool result = TryAcquireSRWLockExclusive(&native_handle_);
+ bool result = TryAcquireSRWLockExclusive(V8ToWindowsType(&native_handle_));
if (result) DCHECK(TryHoldSharedMutex(this));
return result;
}
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 328c593a30..5fefa25ab6 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -66,7 +66,7 @@ class V8_BASE_EXPORT Mutex final {
#if V8_OS_POSIX
using NativeHandle = pthread_mutex_t;
#elif V8_OS_WIN
- using NativeHandle = SRWLOCK;
+ using NativeHandle = V8_SRWLOCK;
#elif V8_OS_STARBOARD
using NativeHandle = SbMutex;
#endif
@@ -171,7 +171,7 @@ class V8_BASE_EXPORT RecursiveMutex final {
#if V8_OS_POSIX
using NativeHandle = pthread_mutex_t;
#elif V8_OS_WIN
- using NativeHandle = CRITICAL_SECTION;
+ using NativeHandle = V8_CRITICAL_SECTION;
#elif V8_OS_STARBOARD
using NativeHandle = starboard::RecursiveMutex;
#endif
@@ -265,10 +265,15 @@ class V8_BASE_EXPORT SharedMutex final {
private:
// The implementation-defined native handle type.
-#if V8_OS_POSIX
+#if V8_OS_MACOSX
+ // pthread_rwlock_t is broken on MacOS when signals are being sent to the
+ // process (see https://crbug.com/v8/11399). Until Apple fixes that in the OS,
+ // we have to fall back to a non-shared mutex.
+ using NativeHandle = pthread_mutex_t;
+#elif V8_OS_POSIX
using NativeHandle = pthread_rwlock_t;
#elif V8_OS_WIN
- using NativeHandle = SRWLOCK;
+ using NativeHandle = V8_SRWLOCK;
#elif V8_OS_STARBOARD
using NativeHandle = starboard::RWLock;
#endif
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 9538d81671..bd0000c4a1 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -127,8 +127,10 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
// static
bool OS::DiscardSystemPages(void* address, size_t size) {
- // TODO(hpayer): Does Fuchsia have madvise?
- return true;
+ uint64_t address_int = reinterpret_cast<uint64_t>(address);
+ zx_status_t status = zx::vmar::root_self()->op_range(
+ ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0);
+ return status == ZX_OK;
}
// static
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index ea6ba0585f..179a17cc0f 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -153,7 +153,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access,
flags |= MAP_LAZY;
#endif // V8_OS_QNX
}
-#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT)
+#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
if (access == OS::MemoryPermission::kNoAccessWillJitLater) {
flags |= MAP_JIT;
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 9fbb257076..79c1aa06ce 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -15,9 +15,15 @@
#endif // MINGW_HAS_SECURE_API
#endif // __MINGW32__
-#include <limits>
+#include <windows.h>
-#include "src/base/win32-headers.h"
+// This has to come after windows.h.
+#include <VersionHelpers.h>
+#include <dbghelp.h> // For SymLoadModule64 and al.
+#include <mmsystem.h> // For timeGetTime().
+#include <tlhelp32.h> // For Module32First and al.
+
+#include <limits>
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
@@ -26,13 +32,34 @@
#include "src/base/platform/time.h"
#include "src/base/timezone-cache.h"
#include "src/base/utils/random-number-generator.h"
-
-#include <VersionHelpers.h>
+#include "src/base/win32-headers.h"
#if defined(_MSC_VER)
#include <crtdbg.h>
#endif // defined(_MSC_VER)
+// Check that type sizes and alignments match.
+STATIC_ASSERT(sizeof(V8_CONDITION_VARIABLE) == sizeof(CONDITION_VARIABLE));
+STATIC_ASSERT(alignof(V8_CONDITION_VARIABLE) == alignof(CONDITION_VARIABLE));
+STATIC_ASSERT(sizeof(V8_SRWLOCK) == sizeof(SRWLOCK));
+STATIC_ASSERT(alignof(V8_SRWLOCK) == alignof(SRWLOCK));
+STATIC_ASSERT(sizeof(V8_CRITICAL_SECTION) == sizeof(CRITICAL_SECTION));
+STATIC_ASSERT(alignof(V8_CRITICAL_SECTION) == alignof(CRITICAL_SECTION));
+
+// Check that CRITICAL_SECTION offsets match.
+STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, DebugInfo) ==
+ offsetof(CRITICAL_SECTION, DebugInfo));
+STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, LockCount) ==
+ offsetof(CRITICAL_SECTION, LockCount));
+STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, RecursionCount) ==
+ offsetof(CRITICAL_SECTION, RecursionCount));
+STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, OwningThread) ==
+ offsetof(CRITICAL_SECTION, OwningThread));
+STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, LockSemaphore) ==
+ offsetof(CRITICAL_SECTION, LockSemaphore));
+STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, SpinCount) ==
+ offsetof(CRITICAL_SECTION, SpinCount));
+
// Extra functions for MinGW. Most of these are the _s functions which are in
// the Microsoft Visual Studio C++ CRT.
#ifdef __MINGW32__
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 7beefbe572..6ad724db08 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -40,6 +40,17 @@
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
+#ifndef V8_NO_FAST_TLS
+#if V8_CC_MSVC && V8_HOST_ARCH_IA32
+// __readfsdword is supposed to be declared in intrin.h but it is missing from
+// some versions of that file. See https://bugs.llvm.org/show_bug.cgi?id=51188
+// And, intrin.h is a very expensive header that we want to avoid here, and
+// the cheaper intrin0.h is not available for all build configurations. That is
+// why we declare this intrinsic.
+unsigned long __readfsdword(unsigned long); // NOLINT(runtime/int)
+#endif // V8_CC_MSVC && V8_HOST_ARCH_IA32
+#endif // V8_NO_FAST_TLS
+
namespace v8 {
namespace base {
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 0cd04634ba..2fc748da87 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -6,6 +6,8 @@
#if V8_OS_MACOSX
#include <dispatch/dispatch.h>
+#elif V8_OS_WIN
+#include <windows.h>
#endif
#include <errno.h>
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index c399c52cb2..9979f33fce 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -19,6 +19,11 @@
#include <ostream>
#if V8_OS_WIN
+#include <windows.h>
+
+// This has to come after windows.h.
+#include <mmsystem.h> // For timeGetTime().
+
#include "src/base/lazy-instance.h"
#include "src/base/win32-headers.h"
#endif
@@ -69,19 +74,22 @@ int64_t ComputeThreadTicks() {
V8_INLINE int64_t ClockNow(clockid_t clk_id) {
#if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
-// On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
-// resolution of 10ms. thread_cputime API provides the time in ns
#if defined(V8_OS_AIX)
- thread_cputime_t tc;
+ // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
+ // resolution of 10ms. thread_cputime API provides the time in ns.
if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
#if defined(__PASE__) // CLOCK_THREAD_CPUTIME_ID clock not supported on IBMi
return 0;
-#endif
+#else
+ thread_cputime_t tc;
if (thread_cputime(-1, &tc) != 0) {
UNREACHABLE();
}
+ return (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond)
+ + (tc.utime / v8::base::Time::kNanosecondsPerMicrosecond);
+#endif // defined(__PASE__)
}
-#endif
+#endif // defined(V8_OS_AIX)
struct timespec ts;
if (clock_gettime(clk_id, &ts) != 0) {
UNREACHABLE();
@@ -94,15 +102,7 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) {
1;
CHECK_GT(kSecondsLimit, ts.tv_sec);
int64_t result = int64_t{ts.tv_sec} * v8::base::Time::kMicrosecondsPerSecond;
-#if defined(V8_OS_AIX)
- if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
- result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
- } else {
- result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
- }
-#else
result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
-#endif
return result;
#else // Monotonic clock not supported.
return 0;
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 01035185e0..6f69e2aa9c 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -23,6 +23,8 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#if V8_OS_WIN
+#include <windows.h>
+
#include "src/base/win32-headers.h"
#endif
diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h
index 82555463c0..e4e845d86d 100644
--- a/deps/v8/src/base/win32-headers.h
+++ b/deps/v8/src/base/win32-headers.h
@@ -5,6 +5,12 @@
#ifndef V8_BASE_WIN32_HEADERS_H_
#define V8_BASE_WIN32_HEADERS_H_
+// This file contains defines and typedefs that allow popular Windows types to
+// be used without the overhead of including windows.h.
+// This file no longer includes windows.h but it still sets the defines that
+// tell windows.h to omit some includes so that the V8 source files that do
+// include windows.h will still get the minimal version.
+
#ifndef WIN32_LEAN_AND_MEAN
// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
#define WIN32_LEAN_AND_MEAN
@@ -33,9 +39,6 @@
#define _WIN32_WINNT 0x0600
#endif
-#include <windows.h>
-
-#include <mmsystem.h> // For timeGetTime().
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
#ifdef __MINGW32__
@@ -45,40 +48,81 @@
#define _WIN32_WINNT 0x501
#endif // __MINGW32__
#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
-#include <dbghelp.h> // For SymLoadModule64 and al.
#include <errno.h> // For STRUNCATE
-#include <versionhelpers.h> // For IsWindows8OrGreater().
#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
#include <limits.h> // For INT_MAX and al.
-#include <tlhelp32.h> // For Module32First and al.
-
-// These additional WIN32 includes have to be right here as the #undef's below
-// makes it impossible to have them elsewhere.
-#include <winsock2.h>
-#include <ws2tcpip.h>
-#ifndef __MINGW32__
-#include <wspiapi.h>
-#endif // __MINGW32__
#include <process.h> // For _beginthreadex().
#include <stdlib.h>
-#undef VOID
-#undef DELETE
-#undef IN
-#undef THIS
-#undef CONST
-#undef NAN
-#undef UNKNOWN
-#undef NONE
-#undef ANY
-#undef IGNORE
-#undef STRICT
-#undef GetObject
-#undef CreateSemaphore
-#undef Yield
-#undef RotateRight32
-#undef RotateLeft32
-#undef RotateRight64
-#undef RotateLeft64
+// typedef and define the most commonly used Windows integer types.
+
+typedef int BOOL; // NOLINT(runtime/int)
+typedef unsigned long DWORD; // NOLINT(runtime/int)
+typedef long LONG; // NOLINT(runtime/int)
+typedef void* LPVOID;
+typedef void* PVOID;
+typedef void* HANDLE;
+
+#define WINAPI __stdcall
+
+#if defined(_WIN64)
+typedef unsigned __int64 ULONG_PTR, *PULONG_PTR;
+#else
+typedef __w64 unsigned long ULONG_PTR, *PULONG_PTR; // NOLINT(runtime/int)
+#endif
+
+typedef struct _RTL_SRWLOCK SRWLOCK;
+typedef struct _RTL_CONDITION_VARIABLE CONDITION_VARIABLE;
+typedef struct _RTL_CRITICAL_SECTION CRITICAL_SECTION;
+typedef struct _RTL_CRITICAL_SECTION_DEBUG* PRTL_CRITICAL_SECTION_DEBUG;
+
+// Declare V8 versions of some Windows structures. These are needed for
+// when we need a concrete type but don't want to pull in Windows.h. We can't
+// declare the Windows types so we declare our types and cast to the Windows
+// types in a few places. The sizes must match the Windows types so we verify
+// that with static asserts in platform-win32.cc.
+// ChromeToWindowsType functions are provided for pointer conversions.
+
+struct V8_SRWLOCK {
+ PVOID Ptr;
+};
+
+struct V8_CONDITION_VARIABLE {
+ PVOID Ptr;
+};
+
+struct V8_CRITICAL_SECTION {
+ PRTL_CRITICAL_SECTION_DEBUG DebugInfo;
+ LONG LockCount;
+ LONG RecursionCount;
+ HANDLE OwningThread;
+ HANDLE LockSemaphore;
+ ULONG_PTR SpinCount;
+};
+
+inline SRWLOCK* V8ToWindowsType(V8_SRWLOCK* p) {
+ return reinterpret_cast<SRWLOCK*>(p);
+}
+
+inline const SRWLOCK* V8ToWindowsType(const V8_SRWLOCK* p) {
+ return reinterpret_cast<const SRWLOCK*>(p);
+}
+
+inline CONDITION_VARIABLE* V8ToWindowsType(V8_CONDITION_VARIABLE* p) {
+ return reinterpret_cast<CONDITION_VARIABLE*>(p);
+}
+
+inline const CONDITION_VARIABLE* V8ToWindowsType(
+ const V8_CONDITION_VARIABLE* p) {
+ return reinterpret_cast<const CONDITION_VARIABLE*>(p);
+}
+
+inline CRITICAL_SECTION* V8ToWindowsType(V8_CRITICAL_SECTION* p) {
+ return reinterpret_cast<CRITICAL_SECTION*>(p);
+}
+
+inline const CRITICAL_SECTION* V8ToWindowsType(const V8_CRITICAL_SECTION* p) {
+ return reinterpret_cast<const CRITICAL_SECTION*>(p);
+}
#endif // V8_BASE_WIN32_HEADERS_H_
diff --git a/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h
index 0fc2389c35..86a62b658b 100644
--- a/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h
+++ b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h
@@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() {
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
- int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
diff --git a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
index 333c0a2a34..59cffa47b3 100644
--- a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
@@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() {
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
- int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h
index 416fdbf98b..83c102176f 100644
--- a/deps/v8/src/baseline/baseline-assembler-inl.h
+++ b/deps/v8/src/baseline/baseline-assembler-inl.h
@@ -5,16 +5,15 @@
#ifndef V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
#define V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
+#include "src/baseline/baseline-assembler.h"
+
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#if ENABLE_SPARKPLUG
#include <type_traits>
#include <unordered_map>
-#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/feedback-cell.h"
@@ -142,6 +141,6 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
} // namespace internal
} // namespace v8
-#endif
+#endif // ENABLE_SPARKPLUG
#endif // V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
index 8cafa8d94a..e1063ff2b2 100644
--- a/deps/v8/src/baseline/baseline-assembler.h
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -7,9 +7,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#include "src/flags/flags.h"
+#if ENABLE_SPARKPLUG
#include "src/codegen/macro-assembler.h"
#include "src/objects/tagged-index.h"
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index 83286c0aa7..f30812c85a 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -4,14 +4,13 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#include "src/base/bits.h"
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#include "src/flags/flags.h"
+#if ENABLE_SPARKPLUG
#include <algorithm>
#include <type_traits>
+#include "src/base/bits.h"
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-assembler.h"
#include "src/baseline/baseline-compiler.h"
@@ -242,8 +241,10 @@ namespace {
// than pre-allocating a large enough buffer.
#ifdef V8_TARGET_ARCH_IA32
const int kAverageBytecodeToInstructionRatio = 5;
+const int kMinimumEstimatedInstructionSize = 200;
#else
const int kAverageBytecodeToInstructionRatio = 7;
+const int kMinimumEstimatedInstructionSize = 300;
#endif
std::unique_ptr<AssemblerBuffer> AllocateBuffer(
Isolate* isolate, Handle<BytecodeArray> bytecodes,
@@ -259,9 +260,6 @@ std::unique_ptr<AssemblerBuffer> AllocateBuffer(
if (code_location == BaselineCompiler::kOnHeap &&
Code::SizeFor(estimated_size) <
heap->MaxRegularHeapObjectSize(AllocationType::kCode)) {
- // TODO(victorgomes): We're currently underestimating the size of the
- // buffer, since we don't know how big the reloc info will be. We could
- // use a separate zone vector for the RelocInfo.
return NewOnHeapAssemblerBuffer(isolate, estimated_size);
}
return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB));
@@ -271,7 +269,7 @@ std::unique_ptr<AssemblerBuffer> AllocateBuffer(
BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode, CodeLocation code_location)
- : isolate_(isolate),
+ : local_isolate_(isolate->AsLocalIsolate()),
stats_(isolate->counters()->runtime_call_stats()),
shared_function_info_(shared_function_info),
bytecode_(bytecode),
@@ -329,7 +327,8 @@ MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
}
int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) {
- return bytecode.length() * kAverageBytecodeToInstructionRatio;
+ return bytecode.length() * kAverageBytecodeToInstructionRatio +
+ kMinimumEstimatedInstructionSize;
}
interpreter::Register BaselineCompiler::RegisterOperand(int operand_index) {
@@ -354,7 +353,7 @@ void BaselineCompiler::StoreRegisterPair(int operand_index, Register val0,
template <typename Type>
Handle<Type> BaselineCompiler::Constant(int operand_index) {
return Handle<Type>::cast(
- iterator().GetConstantForIndexOperand(operand_index, isolate_));
+ iterator().GetConstantForIndexOperand(operand_index, local_isolate_));
}
Smi BaselineCompiler::ConstantSmi(int operand_index) {
return iterator().GetConstantAtIndexAsSmi(operand_index);
@@ -559,7 +558,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
if (weight < 0) {
SaveAccumulatorScope accumulator_scope(&basm_);
- CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode,
+ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode,
__ FunctionOperand());
}
}
@@ -1871,7 +1870,7 @@ void BaselineCompiler::VisitJumpLoop() {
Register osr_level = scratch;
__ LoadRegister(osr_level, interpreter::Register::bytecode_array());
__ LoadByteField(osr_level, osr_level,
- BytecodeArray::kOsrNestingLevelOffset);
+ BytecodeArray::kOsrLoopNestingLevelOffset);
int loop_depth = iterator().GetImmediateOperand(1);
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth,
&osr_not_armed);
@@ -2057,7 +2056,7 @@ void BaselineCompiler::VisitSetPendingMessage() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register pending_message = scratch_scope.AcquireScratch();
__ Move(pending_message,
- ExternalReference::address_of_pending_message_obj(isolate_));
+ ExternalReference::address_of_pending_message(local_isolate_));
Register tmp = scratch_scope.AcquireScratch();
__ Move(tmp, kInterpreterAccumulatorRegister);
__ Move(kInterpreterAccumulatorRegister, MemOperand(pending_message, 0));
@@ -2252,4 +2251,4 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
} // namespace internal
} // namespace v8
-#endif
+#endif // ENABLE_SPARKPLUG
diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h
index 68478804ea..d8cd9ac5c6 100644
--- a/deps/v8/src/baseline/baseline-compiler.h
+++ b/deps/v8/src/baseline/baseline-compiler.h
@@ -7,9 +7,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#include "src/flags/flags.h"
+#if ENABLE_SPARKPLUG
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
@@ -160,7 +159,7 @@ class BaselineCompiler {
const interpreter::BytecodeArrayIterator& iterator() { return iterator_; }
- Isolate* isolate_;
+ LocalIsolate* local_isolate_;
RuntimeCallStats* stats_;
Handle<SharedFunctionInfo> shared_function_info_;
Handle<BytecodeArray> bytecode_;
@@ -197,6 +196,6 @@ class BaselineCompiler {
} // namespace internal
} // namespace v8
-#endif
+#endif // ENABLE_SPARKPLUG
#endif // V8_BASELINE_BASELINE_COMPILER_H_
diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc
index be6e4ec3b3..cec0805aec 100644
--- a/deps/v8/src/baseline/baseline.cc
+++ b/deps/v8/src/baseline/baseline.cc
@@ -9,9 +9,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#include "src/flags/flags.h"
+#if ENABLE_SPARKPLUG
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
diff --git a/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
index 6ce19ec60d..f68d2c21fb 100644
--- a/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
+++ b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
@@ -17,7 +17,8 @@ namespace baseline {
void BaselineCompiler::Prologue() {
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
- int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
index 2e41e554da..31bc96861b 100644
--- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
+++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
@@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
- wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+ wrapped_scope_.Include(t4.bit() | t5.bit() | t6.bit() | t7.bit());
}
assembler_->scratch_register_scope_ = this;
}
diff --git a/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h
index 6897d9b48c..3e8bb98e14 100644
--- a/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h
+++ b/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h
@@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
- int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
diff --git a/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h
index 5971e17aa4..f919635674 100644
--- a/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h
+++ b/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h
@@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
- int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
index deb5aba0ef..fc73105b8e 100644
--- a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
@@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() {
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
- int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
diff --git a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
index cc7956fadc..b4742f3955 100644
--- a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
@@ -18,7 +18,8 @@ namespace baseline {
void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
- int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
@@ -33,7 +34,8 @@ void BaselineCompiler::PrologueFillFrame() {
bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) {
__ masm()->Cmp(kInterpreterAccumulatorRegister,
- isolate_->factory()->undefined_value());
+ handle(ReadOnlyRoots(local_isolate_).undefined_value(),
+ local_isolate_));
__ masm()->Assert(equal, AbortReason::kUnexpectedValue);
}
int register_count = bytecode_->register_count();
diff --git a/deps/v8/src/bigint/bigint-internal.cc b/deps/v8/src/bigint/bigint-internal.cc
index 828a450e8a..2d74f3572c 100644
--- a/deps/v8/src/bigint/bigint-internal.cc
+++ b/deps/v8/src/bigint/bigint-internal.cc
@@ -7,6 +7,15 @@
namespace v8 {
namespace bigint {
+// Used for checking consistency between library and public header.
+#if DEBUG
+#if V8_ADVANCED_BIGINT_ALGORITHMS
+bool kAdvancedAlgorithmsEnabledInLibrary = true;
+#else
+bool kAdvancedAlgorithmsEnabledInLibrary = false;
+#endif // V8_ADVANCED_BIGINT_ALGORITHMS
+#endif // DEBUG
+
ProcessorImpl::ProcessorImpl(Platform* platform) : platform_(platform) {}
ProcessorImpl::~ProcessorImpl() { delete platform_; }
@@ -58,7 +67,16 @@ void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) {
if (B.len() < kBurnikelThreshold) {
return DivideSchoolbook(Q, RWDigits(nullptr, 0), A, B);
}
+#if !V8_ADVANCED_BIGINT_ALGORITHMS
return DivideBurnikelZiegler(Q, RWDigits(nullptr, 0), A, B);
+#else
+ if (B.len() < kBarrettThreshold || A.len() == B.len()) {
+ DivideBurnikelZiegler(Q, RWDigits(nullptr, 0), A, B);
+ } else {
+ ScratchDigits R(B.len());
+ DivideBarrett(Q, R, A, B);
+ }
+#endif
}
void ProcessorImpl::Modulo(RWDigits R, Digits A, Digits B) {
@@ -84,7 +102,15 @@ void ProcessorImpl::Modulo(RWDigits R, Digits A, Digits B) {
}
int q_len = DivideResultLength(A, B);
ScratchDigits Q(q_len);
+#if !V8_ADVANCED_BIGINT_ALGORITHMS
return DivideBurnikelZiegler(Q, R, A, B);
+#else
+ if (B.len() < kBarrettThreshold || A.len() == B.len()) {
+ DivideBurnikelZiegler(Q, R, A, B);
+ } else {
+ DivideBarrett(Q, R, A, B);
+ }
+#endif
}
Status Processor::Multiply(RWDigits Z, Digits X, Digits Y) {
diff --git a/deps/v8/src/bigint/bigint-internal.h b/deps/v8/src/bigint/bigint-internal.h
index 41ef9526e5..4c214153bf 100644
--- a/deps/v8/src/bigint/bigint-internal.h
+++ b/deps/v8/src/bigint/bigint-internal.h
@@ -18,6 +18,10 @@ constexpr int kFftThreshold = 1500;
constexpr int kFftInnerThreshold = 200;
constexpr int kBurnikelThreshold = 57;
+constexpr int kNewtonInversionThreshold = 50;
+// kBarrettThreshold is defined in bigint.h.
+
+constexpr int kToStringFastThreshold = 43;
class ProcessorImpl : public Processor {
public:
@@ -47,11 +51,24 @@ class ProcessorImpl : public Processor {
void Toom3Main(RWDigits Z, Digits X, Digits Y);
void MultiplyFFT(RWDigits Z, Digits X, Digits Y);
+
+ void DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B);
+ void DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B, Digits I,
+ RWDigits scratch);
+
+ void Invert(RWDigits Z, Digits V, RWDigits scratch);
+ void InvertBasecase(RWDigits Z, Digits V, RWDigits scratch);
+ void InvertNewton(RWDigits Z, Digits V, RWDigits scratch);
#endif // V8_ADVANCED_BIGINT_ALGORITHMS
// {out_length} initially contains the allocated capacity of {out}, and
// upon return will be set to the actual length of the result string.
void ToString(char* out, int* out_length, Digits X, int radix, bool sign);
+ void ToStringImpl(char* out, int* out_length, Digits X, int radix, bool sign,
+ bool use_fast_algorithm);
+
+ void FromString(RWDigits Z, FromStringAccumulator* accumulator);
+ void FromStringClassic(RWDigits Z, FromStringAccumulator* accumulator);
bool should_terminate() { return status_ == Status::kInterrupted; }
@@ -78,6 +95,20 @@ class ProcessorImpl : public Processor {
Platform* platform_;
};
+// These constants are primarily needed for Barrett division in div-barrett.cc,
+// and they're also needed by fast to-string conversion in tostring.cc.
+constexpr int DivideBarrettScratchSpace(int n) { return n + 2; }
+// Local values S and W need "n plus a few" digits; U needs 2*n "plus a few".
+// In all tested cases the "few" were either 2 or 3, so give 5 to be safe.
+// S and W are not live at the same time.
+constexpr int kInvertNewtonExtraSpace = 5;
+constexpr int InvertNewtonScratchSpace(int n) {
+ return 3 * n + 2 * kInvertNewtonExtraSpace;
+}
+constexpr int InvertScratchSpace(int n) {
+ return n < kNewtonInversionThreshold ? 2 * n : InvertNewtonScratchSpace(n);
+}
+
#define CHECK(cond) \
if (!(cond)) { \
std::cerr << __FILE__ << ":" << __LINE__ << ": "; \
diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h
index 6d3790808c..218bf4616c 100644
--- a/deps/v8/src/bigint/bigint.h
+++ b/deps/v8/src/bigint/bigint.h
@@ -10,6 +10,7 @@
#include <algorithm>
#include <cstring>
#include <iostream>
+#include <vector>
namespace v8 {
namespace bigint {
@@ -23,6 +24,8 @@ namespace bigint {
std::cerr << "Assertion failed: " #cond "\n"; \
abort(); \
}
+
+extern bool kAdvancedAlgorithmsEnabledInLibrary;
#else
#define BIGINT_H_DCHECK(cond) (void(0))
#endif
@@ -233,6 +236,8 @@ bool SubtractSigned(RWDigits Z, Digits X, bool x_negative, Digits Y,
enum class Status { kOk, kInterrupted };
+class FromStringAccumulator;
+
class Processor {
public:
// Takes ownership of {platform}.
@@ -256,6 +261,8 @@ class Processor {
// {out_length} initially contains the allocated capacity of {out}, and
// upon return will be set to the actual length of the result string.
Status ToString(char* out, int* out_length, Digits X, int radix, bool sign);
+
+ Status FromString(RWDigits Z, FromStringAccumulator* accumulator);
};
inline int AddResultLength(int x_length, int y_length) {
@@ -274,8 +281,19 @@ inline int SubtractSignedResultLength(int x_length, int y_length,
inline int MultiplyResultLength(Digits X, Digits Y) {
return X.len() + Y.len();
}
+constexpr int kBarrettThreshold = 13310;
inline int DivideResultLength(Digits A, Digits B) {
- return A.len() - B.len() + 1;
+#if V8_ADVANCED_BIGINT_ALGORITHMS
+ BIGINT_H_DCHECK(kAdvancedAlgorithmsEnabledInLibrary);
+ // The Barrett division algorithm needs one extra digit for temporary use.
+ int kBarrettExtraScratch = B.len() >= kBarrettThreshold ? 1 : 0;
+#else
+ // If this fails, set -DV8_ADVANCED_BIGINT_ALGORITHMS in any compilation unit
+ // that #includes this header.
+ BIGINT_H_DCHECK(!kAdvancedAlgorithmsEnabledInLibrary);
+ constexpr int kBarrettExtraScratch = 0;
+#endif
+ return A.len() - B.len() + 1 + kBarrettExtraScratch;
}
inline int ModuloResultLength(Digits B) { return B.len(); }
@@ -283,9 +301,207 @@ int ToStringResultLength(Digits X, int radix, bool sign);
// In DEBUG builds, the result of {ToString} will be initialized to this value.
constexpr char kStringZapValue = '?';
+// Support for parsing BigInts from Strings, using an Accumulator object
+// for intermediate state.
+
+class ProcessorImpl;
+
+#if !defined(DEBUG) && (defined(__GNUC__) || defined(__clang__))
+// Clang supports this since 3.9, GCC since 4.x.
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#elif !defined(DEBUG) && defined(_MSC_VER)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+
+static constexpr int kStackParts = 8;
+
+// A container object for all metadata required for parsing a BigInt from
+// a string.
+// Aggressively optimized not to waste instructions for small cases, while
+// also scaling transparently to huge cases.
+// Defined here in the header so that it can be inlined.
+class FromStringAccumulator {
+ public:
+ enum class Result { kOk, kMaxSizeExceeded };
+
+ // Step 1: Create a FromStringAccumulator instance. For best performance,
+ // stack allocation is recommended.
+ // {max_digits} is only used for refusing to grow beyond a given size
+ // (see "Step 2" below). It does not cause pre-allocation, so feel free to
+ // specify a large maximum.
+ // TODO(jkummerow): The limit applies to the number of intermediate chunks,
+ // whereas the final result will be slightly smaller (depending on {radix}).
+ // So for sufficiently large N, setting max_digits=N here will not actually
+ // allow parsing BigInts with N digits. We can fix that if/when anyone cares.
+ explicit FromStringAccumulator(int max_digits)
+ : max_digits_(std::max(max_digits - kStackParts, kStackParts)) {}
+
+ // Step 2: Call this method to read all characters.
+ // {Char} should be a character type, such as uint8_t or uint16_t.
+ // {end} should be one past the last character (i.e. {start == end} would
+ // indicate an empty string).
+ // Returns the current position when an invalid character is encountered.
+ template <class Char>
+ ALWAYS_INLINE const Char* Parse(const Char* start, const Char* end,
+ digit_t radix);
+
+ // Step 3: Check if a result is available, and determine its required
+ // allocation size.
+ Result result() { return result_; }
+ int ResultLength() {
+ return std::max(stack_parts_used_, static_cast<int>(heap_parts_.size()));
+ }
+
+ // Step 4: Use BigIntProcessor::FromString() to retrieve the result into an
+ // {RWDigits} struct allocated for the size returned by step 2.
+
+ private:
+ friend class ProcessorImpl;
+
+ ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part,
+ bool is_last = false);
+
+ digit_t stack_parts_[kStackParts];
+ std::vector<digit_t> heap_parts_;
+ digit_t max_multiplier_{0};
+ digit_t last_multiplier_;
+ const int max_digits_;
+ Result result_{Result::kOk};
+ int stack_parts_used_{0};
+ bool inline_everything_{false};
+};
+
+// The rest of this file is the inlineable implementation of
+// FromStringAccumulator methods.
+
+#if defined(__GNUC__) || defined(__clang__)
+// Clang supports this since 3.9, GCC since 5.x.
+#define HAVE_BUILTIN_MUL_OVERFLOW 1
+#else
+#define HAVE_BUILTIN_MUL_OVERFLOW 0
+#endif
+
+// Numerical value of the first 127 ASCII characters, using 255 as sentinel
+// for "invalid".
+static constexpr uint8_t kCharValue[] = {
+ 255, 255, 255, 255, 255, 255, 255, 255, // 0..7
+ 255, 255, 255, 255, 255, 255, 255, 255, // 8..15
+ 255, 255, 255, 255, 255, 255, 255, 255, // 16..23
+ 255, 255, 255, 255, 255, 255, 255, 255, // 24..31
+ 255, 255, 255, 255, 255, 255, 255, 255, // 32..39
+ 255, 255, 255, 255, 255, 255, 255, 255, // 40..47
+ 0, 1, 2, 3, 4, 5, 6, 7, // 48..55 '0' == 48
+ 8, 9, 255, 255, 255, 255, 255, 255, // 56..63 '9' == 57
+ 255, 10, 11, 12, 13, 14, 15, 16, // 64..71 'A' == 65
+ 17, 18, 19, 20, 21, 22, 23, 24, // 72..79
+ 25, 26, 27, 28, 29, 30, 31, 32, // 80..87
+ 33, 34, 35, 255, 255, 255, 255, 255, // 88..95 'Z' == 90
+ 255, 10, 11, 12, 13, 14, 15, 16, // 96..103 'a' == 97
+ 17, 18, 19, 20, 21, 22, 23, 24, // 104..111
+ 25, 26, 27, 28, 29, 30, 31, 32, // 112..119
+ 33, 34, 35, 255, 255, 255, 255, 255, // 120..127 'z' == 122
+};
+template <class Char>
+const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
+ digit_t radix) {
+ BIGINT_H_DCHECK(2 <= radix && radix <= 36);
+ const Char* current = start;
+#if !HAVE_BUILTIN_MUL_OVERFLOW
+ const digit_t kMaxMultiplier = (~digit_t{0}) / radix;
+#endif
+#if HAVE_TWODIGIT_T // The inlined path requires twodigit_t availability.
+ // The max supported radix is 36, and Math.log2(36) == 5.169..., so we
+ // need at most 5.17 bits per char.
+ static constexpr int kInlineThreshold = kStackParts * kDigitBits * 100 / 517;
+ inline_everything_ = (end - start) <= kInlineThreshold;
+#endif
+ bool done = false;
+ do {
+ digit_t multiplier = 1;
+ digit_t part = 0;
+ while (true) {
+ digit_t d;
+ uint32_t c = *current;
+ if (c > 127 || (d = bigint::kCharValue[c]) >= radix) {
+ done = true;
+ break;
+ }
+
+#if HAVE_BUILTIN_MUL_OVERFLOW
+ digit_t new_multiplier;
+ if (__builtin_mul_overflow(multiplier, radix, &new_multiplier)) break;
+ multiplier = new_multiplier;
+#else
+ if (multiplier > kMaxMultiplier) break;
+ multiplier *= radix;
+#endif
+ part = part * radix + d;
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+ if (!AddPart(multiplier, part, done)) return current;
+ } while (!done);
+ return current;
+}
+
+bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part,
+ bool is_last) {
+#if HAVE_TWODIGIT_T
+ if (inline_everything_) {
+ // Inlined version of {MultiplySingle}.
+ digit_t carry = part;
+ digit_t high = 0;
+ for (int i = 0; i < stack_parts_used_; i++) {
+ twodigit_t result = twodigit_t{stack_parts_[i]} * multiplier;
+ digit_t new_high = result >> bigint::kDigitBits;
+ digit_t low = static_cast<digit_t>(result);
+ result = twodigit_t{low} + high + carry;
+ carry = result >> bigint::kDigitBits;
+ stack_parts_[i] = static_cast<digit_t>(result);
+ high = new_high;
+ }
+ stack_parts_[stack_parts_used_++] = carry + high;
+ return true;
+ }
+#else
+ BIGINT_H_DCHECK(!inline_everything_);
+#endif
+ if (is_last) {
+ last_multiplier_ = multiplier;
+ } else {
+ BIGINT_H_DCHECK(max_multiplier_ == 0 || max_multiplier_ == multiplier);
+ max_multiplier_ = multiplier;
+ }
+ if (stack_parts_used_ < kStackParts) {
+ stack_parts_[stack_parts_used_++] = part;
+ return true;
+ }
+ if (heap_parts_.size() == 0) {
+ // Initialize heap storage. Copy the stack part to make things easier later.
+ heap_parts_.reserve(kStackParts * 2);
+ for (int i = 0; i < kStackParts; i++) {
+ heap_parts_.push_back(stack_parts_[i]);
+ }
+ }
+ if (static_cast<int>(heap_parts_.size()) >= max_digits_ && !is_last) {
+ result_ = Result::kMaxSizeExceeded;
+ return false;
+ }
+ heap_parts_.push_back(part);
+ return true;
+}
+
} // namespace bigint
} // namespace v8
#undef BIGINT_H_DCHECK
+#undef ALWAYS_INLINE
+#undef HAVE_BUILTIN_MUL_OVERFLOW
#endif // V8_BIGINT_BIGINT_H_
diff --git a/deps/v8/src/bigint/div-barrett.cc b/deps/v8/src/bigint/div-barrett.cc
new file mode 100644
index 0000000000..39f09d0ac1
--- /dev/null
+++ b/deps/v8/src/bigint/div-barrett.cc
@@ -0,0 +1,366 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Barrett division, finding the inverse with Newton's method.
+// Reference: "Fast Division of Large Integers" by Karl Hasselström,
+// found at https://treskal.com/s/masters-thesis.pdf
+
+// Many thanks to Karl Wiberg, k@w5.se, for both writing up an
+// understandable theoretical description of the algorithm and privately
+// providing a demo implementation, on which the implementation in this file is
+// based.
+
+#include <algorithm>
+
+#include "src/bigint/bigint-internal.h"
+#include "src/bigint/digit-arithmetic.h"
+#include "src/bigint/div-helpers.h"
+#include "src/bigint/vector-arithmetic.h"
+
+namespace v8 {
+namespace bigint {
+
+namespace {
+
+void DcheckIntegerPartRange(Digits X, digit_t min, digit_t max) {
+#if DEBUG
+ digit_t integer_part = X.msd();
+ DCHECK(integer_part >= min);
+ DCHECK(integer_part <= max);
+#else
+ USE(X);
+ USE(min);
+ USE(max);
+#endif
+}
+
+} // namespace
+
+// Z := (the fractional part of) 1/V, via naive division.
+// See comments at {Invert} and {InvertNewton} below for details.
+void ProcessorImpl::InvertBasecase(RWDigits Z, Digits V, RWDigits scratch) {
+ DCHECK(Z.len() > V.len());
+ DCHECK(V.len() > 0); // NOLINT(readability/check)
+ DCHECK(scratch.len() >= 2 * V.len());
+ int n = V.len();
+ RWDigits X(scratch, 0, 2 * n);
+ digit_t borrow = 0;
+ int i = 0;
+ for (; i < n; i++) X[i] = 0;
+ for (; i < 2 * n; i++) X[i] = digit_sub2(0, V[i - n], borrow, &borrow);
+ DCHECK(borrow == 1); // NOLINT(readability/check)
+ RWDigits R(nullptr, 0); // We don't need the remainder.
+ if (n < kBurnikelThreshold) {
+ DivideSchoolbook(Z, R, X, V);
+ } else {
+ DivideBurnikelZiegler(Z, R, X, V);
+ }
+}
+
+// This is Algorithm 4.2 from the paper.
+// Computes the inverse of V, shifted by kDigitBits * 2 * V.len, accurate to
+// V.len+1 digits. The V.len low digits of the result digits will be written
+// to Z, plus there is an implicit top digit with value 1.
+// Needs InvertNewtonScratchSpace(V.len) of scratch space.
+// The result is either correct or off by one (about half the time it is
+// correct, half the time it is one too much, and in the corner case where V is
+// minimal and the implicit top digit would have to be 2 it is one too little).
+// Barrett's division algorithm can handle that, so we don't care.
+void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) {
+ const int vn = V.len();
+ DCHECK(Z.len() >= vn);
+ DCHECK(scratch.len() >= InvertNewtonScratchSpace(vn));
+ const int kSOffset = 0;
+ const int kWOffset = 0; // S and W can share their scratch space.
+ const int kUOffset = vn + kInvertNewtonExtraSpace;
+
+ // The base case won't work otherwise.
+ DCHECK(V.len() >= 3); // NOLINT(readability/check)
+
+ constexpr int kBasecasePrecision = kNewtonInversionThreshold - 1;
+ // V must have more digits than the basecase.
+ DCHECK(V.len() > kBasecasePrecision);
+ DCHECK(IsBitNormalized(V));
+
+ // Step (1): Setup.
+ // Calculate precision required at each step.
+ // {k} is the number of fraction bits for the current iteration.
+ int k = vn * kDigitBits;
+ int target_fraction_bits[8 * sizeof(vn)]; // "k_i" in the paper.
+ int iteration = -1; // "i" in the paper, except inverted to run downwards.
+ while (k > kBasecasePrecision * kDigitBits) {
+ iteration++;
+ target_fraction_bits[iteration] = k;
+ k = DIV_CEIL(k, 2);
+ }
+ // At this point, k <= kBasecasePrecision*kDigitBits is the number of
+ // fraction bits to use in the base case. {iteration} is the highest index
+ // in use for f[].
+
+ // Step (2): Initial approximation.
+ int initial_digits = DIV_CEIL(k + 1, kDigitBits);
+ Digits top_part_of_v(V, vn - initial_digits, initial_digits);
+ InvertBasecase(Z, top_part_of_v, scratch);
+ Z[initial_digits] = Z[initial_digits] + 1; // Implicit top digit.
+ // From now on, we'll keep Z.len updated to the part that's already computed.
+ Z.set_len(initial_digits + 1);
+
+ // Step (3): Precision doubling loop.
+ while (true) {
+ DcheckIntegerPartRange(Z, 1, 2);
+
+ // (3b): S = Z^2
+ RWDigits S(scratch, kSOffset, 2 * Z.len());
+ Multiply(S, Z, Z);
+ if (should_terminate()) return;
+ S.TrimOne(); // Top digit of S is unused.
+ DcheckIntegerPartRange(S, 1, 4);
+
+ // (3c): T = V, truncated so that at least 2k+3 fraction bits remain.
+ int fraction_digits = DIV_CEIL(2 * k + 3, kDigitBits);
+ int t_len = std::min(V.len(), fraction_digits);
+ Digits T(V, V.len() - t_len, t_len);
+
+ // (3d): U = T * S, truncated so that at least 2k+1 fraction bits remain
+ // (U has one integer digit, which might be zero).
+ fraction_digits = DIV_CEIL(2 * k + 1, kDigitBits);
+ RWDigits U(scratch, kUOffset, S.len() + T.len());
+ DCHECK(U.len() > fraction_digits);
+ Multiply(U, S, T);
+ if (should_terminate()) return;
+ U = U + (U.len() - (1 + fraction_digits));
+ DcheckIntegerPartRange(U, 0, 3);
+
+ // (3e): W = 2 * Z, padded with "0" fraction bits so that it has the
+ // same number of fraction bits as U.
+ DCHECK(U.len() >= Z.len());
+ RWDigits W(scratch, kWOffset, U.len());
+ int padding_digits = U.len() - Z.len();
+ for (int i = 0; i < padding_digits; i++) W[i] = 0;
+ LeftShift(W + padding_digits, Z, 1);
+ DcheckIntegerPartRange(W, 2, 4);
+
+ // (3f): Z = W - U.
+ // This check is '<=' instead of '<' because U's top digit is its
+ // integer part, and we want vn fraction digits.
+ if (U.len() <= vn) {
+ // Normal subtraction.
+ // This is not the last iteration.
+ DCHECK(iteration > 0); // NOLINT(readability/check)
+ Z.set_len(U.len());
+ digit_t borrow = SubtractAndReturnBorrow(Z, W, U);
+ DCHECK(borrow == 0); // NOLINT(readability/check)
+ USE(borrow);
+ DcheckIntegerPartRange(Z, 1, 2);
+ } else {
+ // Truncate some least significant digits so that we get vn
+ // fraction digits, and compute the integer digit separately.
+ // This is the last iteration.
+ DCHECK(iteration == 0); // NOLINT(readability/check)
+ Z.set_len(vn);
+ Digits W_part(W, W.len() - vn - 1, vn);
+ Digits U_part(U, U.len() - vn - 1, vn);
+ digit_t borrow = SubtractAndReturnBorrow(Z, W_part, U_part);
+ digit_t integer_part = W.msd() - U.msd() - borrow;
+ DCHECK(integer_part == 1 || integer_part == 2);
+ if (integer_part == 2) {
+ // This is the rare case where the correct result would be 2.0, but
+ // since we can't express that by returning only the fractional part
+ // with an implicit 1-digit, we have to return [1.]9999... instead.
+ for (int i = 0; i < Z.len(); i++) Z[i] = ~digit_t{0};
+ }
+ break;
+ }
+ // (3g, 3h): Update local variables and loop.
+ k = target_fraction_bits[iteration];
+ iteration--;
+ }
+}
+
+// Computes the inverse of V, shifted by kDigitBits * 2 * V.len, accurate to
+// V.len+1 digits. The V.len low digits of the result digits will be written
+// to Z, plus there is an implicit top digit with value 1.
+// (Corner case: if V is minimal, the implicit digit should be 2; in that case
+// we return one less than the correct answer. DivideBarrett can handle that.)
+// Needs InvertScratchSpace(V.len) digits of scratch space.
+void ProcessorImpl::Invert(RWDigits Z, Digits V, RWDigits scratch) {
+ DCHECK(Z.len() > V.len());
+ DCHECK(V.len() >= 1); // NOLINT(readability/check)
+ DCHECK(IsBitNormalized(V));
+ DCHECK(scratch.len() >= InvertScratchSpace(V.len()));
+
+ int vn = V.len();
+ if (vn >= kNewtonInversionThreshold) {
+ return InvertNewton(Z, V, scratch);
+ }
+ if (vn == 1) {
+ digit_t d = V[0];
+ digit_t dummy_remainder;
+ Z[0] = digit_div(~d, ~digit_t{0}, d, &dummy_remainder);
+ Z[1] = 0;
+ } else {
+ InvertBasecase(Z, V, scratch);
+ if (Z[vn] == 1) {
+ for (int i = 0; i < vn; i++) Z[i] = ~digit_t{0};
+ Z[vn] = 0;
+ }
+ }
+}
+
+// This is algorithm 3.5 from the paper.
+// Computes Q(uotient) and R(emainder) for A/B using I, which is a
+// precomputed approximation of 1/B (e.g. with Invert() above).
+// Needs DivideBarrettScratchSpace(A.len) scratch space.
+void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B,
+ Digits I, RWDigits scratch) {
+ DCHECK(Q.len() > A.len() - B.len());
+ DCHECK(R.len() >= B.len());
+ DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' !
+ DCHECK(A.len() <= 2 * B.len());
+ DCHECK(B.len() > 0); // NOLINT(readability/check)
+ DCHECK(IsBitNormalized(B));
+ DCHECK(I.len() == A.len() - B.len());
+ DCHECK(scratch.len() >= DivideBarrettScratchSpace(A.len()));
+
+ int orig_q_len = Q.len();
+
+ // (1): A1 = A with B.len fewer digits.
+ Digits A1 = A + B.len();
+ DCHECK(A1.len() == I.len());
+
+ // (2): Q = A1*I with I.len fewer digits.
+ // {I} has an implicit high digit with value 1, so we add {A1} to the high
+ // part of the multiplication result.
+ RWDigits K(scratch, 0, 2 * I.len());
+ Multiply(K, A1, I);
+ if (should_terminate()) return;
+ Q.set_len(I.len() + 1);
+ Add(Q, K + I.len(), A1);
+ // K is no longer used, can re-use {scratch} for P.
+
+ // (3): R = A - B*Q (approximate remainder).
+ RWDigits P(scratch, 0, A.len() + 1);
+ Multiply(P, B, Q);
+ if (should_terminate()) return;
+ digit_t borrow = SubtractAndReturnBorrow(R, A, Digits(P, 0, B.len()));
+ // R may be allocated wider than B, zero out any extra digits if so.
+ for (int i = B.len(); i < R.len(); i++) R[i] = 0;
+ digit_t r_high = A[B.len()] - P[B.len()] - borrow;
+
+ // Adjust R and Q so that they become the correct remainder and quotient.
+ // The number of iterations is guaranteed to be at most some very small
+ // constant, unless the caller gave us a bad approximate quotient.
+ if (r_high >> (kDigitBits - 1) == 1) {
+ // (5b): R < 0, so R += B
+ digit_t q_sub = 0;
+ do {
+ r_high += AddAndReturnCarry(R, R, B);
+ q_sub++;
+ DCHECK(q_sub <= 5); // NOLINT(readability/check)
+ } while (r_high != 0);
+ Subtract(Q, q_sub);
+ } else {
+ digit_t q_add = 0;
+ while (r_high != 0 || GreaterThanOrEqual(R, B)) {
+ // (5c): R >= B, so R -= B
+ r_high -= SubtractAndReturnBorrow(R, R, B);
+ q_add++;
+ DCHECK(q_add <= 5); // NOLINT(readability/check)
+ }
+ Add(Q, q_add);
+ }
+ // (5a): Return.
+ int final_q_len = Q.len();
+ Q.set_len(orig_q_len);
+ for (int i = final_q_len; i < orig_q_len; i++) Q[i] = 0;
+}
+
+// Computes Q(uotient) and R(emainder) for A/B, using Barrett division.
+void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) {
+ DCHECK(Q.len() > A.len() - B.len());
+ DCHECK(R.len() >= B.len());
+ DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' !
+ DCHECK(B.len() > 0); // NOLINT(readability/check)
+
+ // Normalize B, and shift A by the same amount.
+ ShiftedDigits b_normalized(B);
+ ShiftedDigits a_normalized(A, b_normalized.shift());
+ // Keep the code below more concise.
+ B = b_normalized;
+ A = a_normalized;
+
+ // The core DivideBarrett function above only supports A having at most
+ // twice as many digits as B. We generalize this to arbitrary inputs
+ // similar to Burnikel-Ziegler division by performing a t-by-1 division
+ // of B-sized chunks. It's easy to special-case the situation where we
+ // don't need to bother.
+ int barrett_dividend_length = A.len() <= 2 * B.len() ? A.len() : 2 * B.len();
+ int i_len = barrett_dividend_length - B.len();
+ ScratchDigits I(i_len + 1); // +1 is for temporary use by Invert().
+ int scratch_len =
+ std::max(InvertScratchSpace(i_len),
+ DivideBarrettScratchSpace(barrett_dividend_length));
+ ScratchDigits scratch(scratch_len);
+ Invert(I, Digits(B, B.len() - i_len, i_len), scratch);
+ if (should_terminate()) return;
+ I.TrimOne();
+ DCHECK(I.len() == i_len);
+ if (A.len() > 2 * B.len()) {
+ // This follows the variable names and and algorithmic steps of
+ // DivideBurnikelZiegler().
+ int n = B.len(); // Chunk length.
+ // (5): {t} is the number of B-sized chunks of A.
+ int t = DIV_CEIL(A.len(), n);
+ DCHECK(t >= 3); // NOLINT(readability/check)
+ // (6)/(7): Z is used for the current 2-chunk block to be divided by B,
+ // initialized to the two topmost chunks of A.
+ int z_len = n * 2;
+ ScratchDigits Z(z_len);
+ PutAt(Z, A + n * (t - 2), z_len);
+ // (8): For i from t-2 downto 0 do
+ int qi_len = n + 1;
+ ScratchDigits Qi(qi_len);
+ ScratchDigits Ri(n);
+ // First iteration unrolled and specialized.
+ {
+ int i = t - 2;
+ DivideBarrett(Qi, Ri, Z, B, I, scratch);
+ if (should_terminate()) return;
+ RWDigits target = Q + n * i;
+ // In the first iteration, all qi_len = n + 1 digits may be used.
+ int to_copy = std::min(qi_len, target.len());
+ for (int j = 0; j < to_copy; j++) target[j] = Qi[j];
+ for (int j = to_copy; j < target.len(); j++) target[j] = 0;
+#if DEBUG
+ for (int j = to_copy; j < Qi.len(); j++) {
+ DCHECK(Qi[j] == 0); // NOLINT(readability/check)
+ }
+#endif
+ }
+ // Now loop over any remaining iterations.
+ for (int i = t - 3; i >= 0; i--) {
+ // (8b): If i > 0, set Z_(i-1) = [Ri, A_(i-1)].
+ // (De-duped with unrolled first iteration, hence reading A_(i).)
+ PutAt(Z + n, Ri, n);
+ PutAt(Z, A + n * i, n);
+ // (8a): Compute Qi, Ri such that Zi = B*Qi + Ri.
+ DivideBarrett(Qi, Ri, Z, B, I, scratch);
+ DCHECK(Qi[qi_len - 1] == 0); // NOLINT(readability/check)
+ if (should_terminate()) return;
+ // (9): Return Q = [Q_(t-2), ..., Q_0]...
+ PutAt(Q + n * i, Qi, n);
+ }
+ Ri.Normalize();
+ DCHECK(Ri.len() <= R.len());
+ // (9): ...and R = R_0 * 2^(-leading_zeros).
+ RightShift(R, Ri, b_normalized.shift());
+ } else {
+ DivideBarrett(Q, R, A, B, I, scratch);
+ if (should_terminate()) return;
+ RightShift(R, R, b_normalized.shift());
+ }
+}
+
+} // namespace bigint
+} // namespace v8
diff --git a/deps/v8/src/bigint/fromstring.cc b/deps/v8/src/bigint/fromstring.cc
new file mode 100644
index 0000000000..0307745cad
--- /dev/null
+++ b/deps/v8/src/bigint/fromstring.cc
@@ -0,0 +1,72 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bigint/bigint-internal.h"
+#include "src/bigint/vector-arithmetic.h"
+
+namespace v8 {
+namespace bigint {
+
+// The classic algorithm: for every part, multiply the accumulator with
+// the appropriate multiplier, and add the part. O(n²) overall.
+void ProcessorImpl::FromStringClassic(RWDigits Z,
+ FromStringAccumulator* accumulator) {
+ // We always have at least one part to process.
+ DCHECK(accumulator->stack_parts_used_ > 0); // NOLINT(readability/check)
+ Z[0] = accumulator->stack_parts_[0];
+ RWDigits already_set(Z, 0, 1);
+ for (int i = 1; i < Z.len(); i++) Z[i] = 0;
+
+ // The {FromStringAccumulator} uses stack-allocated storage for the first
+ // few parts; if heap storage is used at all then all parts are copied there.
+ int num_stack_parts = accumulator->stack_parts_used_;
+ if (num_stack_parts == 1) return;
+ const std::vector<digit_t>& heap_parts = accumulator->heap_parts_;
+ int num_heap_parts = static_cast<int>(heap_parts.size());
+ // All multipliers are the same, except possibly for the last.
+ const digit_t max_multiplier = accumulator->max_multiplier_;
+
+ if (num_heap_parts == 0) {
+ for (int i = 1; i < num_stack_parts - 1; i++) {
+ MultiplySingle(Z, already_set, max_multiplier);
+ Add(Z, accumulator->stack_parts_[i]);
+ already_set.set_len(already_set.len() + 1);
+ }
+ MultiplySingle(Z, already_set, accumulator->last_multiplier_);
+ Add(Z, accumulator->stack_parts_[num_stack_parts - 1]);
+ return;
+ }
+ // Parts are stored on the heap.
+ for (int i = 1; i < num_heap_parts - 1; i++) {
+ MultiplySingle(Z, already_set, max_multiplier);
+ if (should_terminate()) return;
+ Add(Z, accumulator->heap_parts_[i]);
+ already_set.set_len(already_set.len() + 1);
+ }
+ MultiplySingle(Z, already_set, accumulator->last_multiplier_);
+ Add(Z, accumulator->heap_parts_.back());
+}
+
+void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
+ if (accumulator->inline_everything_) {
+ int i = 0;
+ for (; i < accumulator->stack_parts_used_; i++) {
+ Z[i] = accumulator->stack_parts_[i];
+ }
+ for (; i < Z.len(); i++) Z[i] = 0;
+ } else if (accumulator->stack_parts_used_ == 0) {
+ for (int i = 0; i < Z.len(); i++) Z[i] = 0;
+ } else {
+ FromStringClassic(Z, accumulator);
+ }
+}
+
+Status Processor::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
+ ProcessorImpl* impl = static_cast<ProcessorImpl*>(this);
+ impl->FromString(Z, accumulator);
+ return impl->get_and_clear_status();
+}
+
+} // namespace bigint
+} // namespace v8
diff --git a/deps/v8/src/bigint/mul-karatsuba.cc b/deps/v8/src/bigint/mul-karatsuba.cc
index 2a141f213c..d4b5a58383 100644
--- a/deps/v8/src/bigint/mul-karatsuba.cc
+++ b/deps/v8/src/bigint/mul-karatsuba.cc
@@ -201,5 +201,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y,
USE(overflow);
}
+#undef MAYBE_TERMINATE
+
} // namespace bigint
} // namespace v8
diff --git a/deps/v8/src/bigint/tostring.cc b/deps/v8/src/bigint/tostring.cc
index b426c864cd..51fb75957a 100644
--- a/deps/v8/src/bigint/tostring.cc
+++ b/deps/v8/src/bigint/tostring.cc
@@ -106,6 +106,16 @@ char* DivideByMagic(RWDigits rest, Digits input, char* output) {
return output;
}
+class RecursionLevel;
+
+// The classic algorithm must check for interrupt requests if no faster
+// algorithm is available.
+#if V8_ADVANCED_BIGINT_ALGORITHMS
+#define MAYBE_INTERRUPT(code) ((void)0)
+#else
+#define MAYBE_INTERRUPT(code) code
+#endif
+
class ToStringFormatter {
public:
ToStringFormatter(Digits X, int radix, bool sign, char* out,
@@ -142,16 +152,16 @@ class ToStringFormatter {
if (radix_ == 10) {
// Faster but costs binary size, so we optimize the most common case.
out_ = DivideByMagic<10>(rest, dividend, out_);
- processor_->AddWorkEstimate(rest.len() * 2);
+ MAYBE_INTERRUPT(processor_->AddWorkEstimate(rest.len() * 2));
} else {
digit_t chunk;
processor_->DivideSingle(rest, &chunk, dividend, chunk_divisor_);
out_ = BasecaseMiddle(chunk, out_);
// Assume that a division is about ten times as expensive as a
// multiplication.
- processor_->AddWorkEstimate(rest.len() * 10);
+ MAYBE_INTERRUPT(processor_->AddWorkEstimate(rest.len() * 10));
}
- if (processor_->should_terminate()) return;
+ MAYBE_INTERRUPT(if (processor_->should_terminate()) return );
rest.Normalize();
dividend = rest;
} while (rest.len() > 1);
@@ -160,6 +170,12 @@ class ToStringFormatter {
void BasePowerOfTwo();
+ void Fast();
+ char* FillWithZeros(RecursionLevel* level, char* prev_cursor, char* out,
+ bool is_last_on_level);
+ char* ProcessLevel(RecursionLevel* level, Digits chunk, char* out,
+ bool is_last_on_level);
+
private:
// When processing the last (most significant) digit, don't write leading
// zeros.
@@ -197,6 +213,8 @@ class ToStringFormatter {
ProcessorImpl* processor_;
};
+#undef MAYBE_INTERRUPT
+
// Prepares data for {Classic}. Not needed for {BasePowerOfTwo}.
void ToStringFormatter::Start() {
max_bits_per_char_ = kMaxBitsPerChar[radix_];
@@ -251,16 +269,305 @@ void ToStringFormatter::BasePowerOfTwo() {
}
}
+#if V8_ADVANCED_BIGINT_ALGORITHMS
+
+// "Fast" divide-and-conquer conversion to string. The basic idea is to
+// recursively cut the BigInt in half (using a division with remainder,
+// the divisor being ~half as large (in bits) as the current dividend).
+//
+// As preparation, we build up a linked list of metadata for each recursion
+// level. We do this bottom-up, i.e. start with the level that will produce
+// two halves that are register-sized and bail out to the base case.
+// Each higher level (executed earlier, prepared later) uses a divisor that is
+// the square of the previously-created "next" level's divisor. Preparation
+// terminates when the current divisor is at least half as large as the bigint.
+// We also precompute each level's divisor's inverse, so we can use
+// Barrett division later.
+//
+// Example: say we want to format 1234567890123, and we can fit two decimal
+// digits into a register for the base case.
+//
+// 1234567890123
+// ↓
+// %100000000 (a) // RecursionLevel 2,
+// / \ // is_toplevel_ == true.
+// 12345 67890123
+// ↓ ↓
+// (e) %10000 %10000 (b) // RecursionLevel 1
+// / \ / \
+// 1 2345 6789 0123
+// ↓ (f) ↓ ↓ (d) ↓
+// (g) %100 %100 %100 %100 (c) // RecursionLevel 0
+// / \ / \ / \ / \
+// 00 01 23 45 67 89 01 23
+// ↓ ↓ ↓ ↓ ↓ ↓ ↓ // Base case.
+// "1" "23" "45" "67" "89" "01" "23"
+//
+// We start building RecursionLevels in order 0 -> 1 -> 2, performing the
+// squarings 100² = 10000 and 10000² = 100000000 each only once. Execution
+// then happens in order (a) through (g); lower-level divisors are used
+// repeatedly. We build the string from right to left.
+// Note that we can skip the division at (g) and fall through directly.
+// Also, note that there are two chunks with value 1: one of them must produce
+// a leading "0" in its string representation, the other must not.
+//
+// In this example, {base_divisor} is 100 and {base_char_count} is 2.
+
+// TODO(jkummerow): Investigate whether it is beneficial to build one or two
+// fewer RecursionLevels, and use the topmost level for more than one division.
+
+class RecursionLevel {
+ public:
+ static RecursionLevel* CreateLevels(digit_t base_divisor, int base_char_count,
+ int target_bit_length,
+ ProcessorImpl* processor);
+ ~RecursionLevel() { delete next_; }
+
+ void ComputeInverse(ProcessorImpl* proc, int dividend_length = 0);
+ Digits GetInverse(int dividend_length);
+
+ private:
+ friend class ToStringFormatter;
+ RecursionLevel(digit_t base_divisor, int base_char_count)
+ : char_count_(base_char_count), divisor_(1) {
+ divisor_[0] = base_divisor;
+ }
+ explicit RecursionLevel(RecursionLevel* next)
+ : char_count_(next->char_count_ * 2),
+ next_(next),
+ divisor_(next->divisor_.len() * 2) {
+ next->is_toplevel_ = false;
+ }
+
+ void LeftShiftDivisor() {
+ leading_zero_shift_ = CountLeadingZeros(divisor_.msd());
+ LeftShift(divisor_, divisor_, leading_zero_shift_);
+ }
+
+ int leading_zero_shift_{0};
+ // The number of characters generated by *each half* of this level.
+ int char_count_;
+ bool is_toplevel_{true};
+ RecursionLevel* next_{nullptr};
+ ScratchDigits divisor_;
+ std::unique_ptr<Storage> inverse_storage_;
+ Digits inverse_{nullptr, 0};
+};
+
+// static
+RecursionLevel* RecursionLevel::CreateLevels(digit_t base_divisor,
+ int base_char_count,
+ int target_bit_length,
+ ProcessorImpl* processor) {
+ RecursionLevel* level = new RecursionLevel(base_divisor, base_char_count);
+ // We can stop creating levels when the next level's divisor, which is the
+ // square of the current level's divisor, would be strictly bigger (in terms
+ // of its numeric value) than the input we're formatting. Since computing that
+ // next divisor is expensive, we want to predict the necessity based on bit
+ // lengths. Bit lengths are an imperfect predictor of numeric value, so we
+ // have to be careful:
+ // - since we can't estimate which one of two numbers of equal bit length
+ // is bigger, we have to aim for a strictly bigger bit length.
+ // - when squaring, the bit length sometimes doubles (e.g. 0b11² == 0b1001),
+ // but usually we "lose" a bit (e.g. 0b10² == 0b100).
+ while (BitLength(level->divisor_) * 2 - 1 <= target_bit_length) {
+ RecursionLevel* prev = level;
+ level = new RecursionLevel(prev);
+ processor->Multiply(level->divisor_, prev->divisor_, prev->divisor_);
+ if (processor->should_terminate()) {
+ delete level;
+ return nullptr;
+ }
+ level->divisor_.Normalize();
+ // Left-shifting the divisor must only happen after it's been used to
+ // compute the next divisor.
+ prev->LeftShiftDivisor();
+ prev->ComputeInverse(processor);
+ }
+ level->LeftShiftDivisor();
+ // Not calling info->ComputeInverse here so that it can take the input's
+ // length into account to save some effort on inverse generation.
+ return level;
+}
+
+// The top level might get by with a smaller inverse than we could maximally
+// compute, so the caller should provide the dividend length.
+void RecursionLevel::ComputeInverse(ProcessorImpl* processor,
+ int dividend_length) {
+ int inverse_len = divisor_.len();
+ if (dividend_length != 0) {
+ inverse_len = dividend_length - divisor_.len();
+ DCHECK(inverse_len <= divisor_.len());
+ }
+ int scratch_len = InvertScratchSpace(inverse_len);
+ ScratchDigits scratch(scratch_len);
+ Storage* inv_storage = new Storage(inverse_len + 1);
+ inverse_storage_.reset(inv_storage);
+ RWDigits inverse_initializer(inv_storage->get(), inverse_len + 1);
+ Digits input(divisor_, divisor_.len() - inverse_len, inverse_len);
+ processor->Invert(inverse_initializer, input, scratch);
+ inverse_initializer.TrimOne();
+ inverse_ = inverse_initializer;
+}
+
+Digits RecursionLevel::GetInverse(int dividend_length) {
+ DCHECK(inverse_.len() != 0); // NOLINT(readability/check)
+ int inverse_len = dividend_length - divisor_.len();
+ DCHECK(inverse_len <= inverse_.len());
+ return inverse_ + (inverse_.len() - inverse_len);
+}
+
+void ToStringFormatter::Fast() {
+ std::unique_ptr<RecursionLevel> recursion_levels(RecursionLevel::CreateLevels(
+ chunk_divisor_, chunk_chars_, BitLength(digits_), processor_));
+ if (processor_->should_terminate()) return;
+ out_ = ProcessLevel(recursion_levels.get(), digits_, out_, true);
+}
+
+// Writes '0' characters right-to-left, starting at {out}-1, until the distance
+// from {right_boundary} to {out} equals the number of characters that {level}
+// is supposed to produce.
+char* ToStringFormatter::FillWithZeros(RecursionLevel* level,
+ char* right_boundary, char* out,
+ bool is_last_on_level) {
+ // Fill up with zeros up to the character count expected to be generated
+ // on this level; unless this is the left edge of the result.
+ if (is_last_on_level) return out;
+ int chunk_chars = level == nullptr ? chunk_chars_ : level->char_count_ * 2;
+ char* end = right_boundary - chunk_chars;
+ DCHECK(out >= end);
+ while (out > end) {
+ *(--out) = '0';
+ }
+ return out;
+}
+
+char* ToStringFormatter::ProcessLevel(RecursionLevel* level, Digits chunk,
+ char* out, bool is_last_on_level) {
+ // Step 0: if only one digit is left, bail out to the base case.
+ Digits normalized = chunk;
+ normalized.Normalize();
+ if (normalized.len() <= 1) {
+ char* right_boundary = out;
+ if (normalized.len() == 1) {
+ out = BasecaseLast(normalized[0], out);
+ }
+ return FillWithZeros(level, right_boundary, out, is_last_on_level);
+ }
+
+ // Step 1: If the chunk is guaranteed to remain smaller than the divisor
+ // even after left-shifting, fall through to the next level immediately.
+ if (normalized.len() < level->divisor_.len()) {
+ char* right_boundary = out;
+ out = ProcessLevel(level->next_, chunk, out, is_last_on_level);
+ return FillWithZeros(level, right_boundary, out, is_last_on_level);
+ }
+ // Step 2: Prepare the chunk.
+ bool allow_inplace_modification = chunk.digits() != digits_.digits();
+ Digits original_chunk = chunk;
+ ShiftedDigits chunk_shifted(chunk, level->leading_zero_shift_,
+ allow_inplace_modification);
+ chunk = chunk_shifted;
+ chunk.Normalize();
+ // Check (now precisely) if the chunk is smaller than the divisor.
+ int comparison = Compare(chunk, level->divisor_);
+ if (comparison <= 0) {
+ char* right_boundary = out;
+ if (comparison < 0) {
+ // If the chunk is strictly smaller than the divisor, we can process
+ // it directly on the next level as the right half, and know that the
+ // left half is all '0'.
+ // In case we shifted {chunk} in-place, we must undo that
+ // before the call...
+ chunk_shifted.Reset();
+ // ...and otherwise undo the {chunk = chunk_shifted} assignment above.
+ chunk = original_chunk;
+ out = ProcessLevel(level->next_, chunk, out, is_last_on_level);
+ } else {
+ DCHECK(comparison == 0); // NOLINT(readability/check)
+ // If the chunk is equal to the divisor, we know that the right half
+ // is all '0', and the left half is '...0001'.
+ // Handling this case specially is an optimization; we could also
+ // fall through to the generic "chunk > divisor" path below.
+ out = FillWithZeros(level->next_, right_boundary, out, false);
+ *(--out) = '1';
+ }
+ // In both cases, make sure the left half is fully written.
+ return FillWithZeros(level, right_boundary, out, is_last_on_level);
+ }
+ // Step 3: Allocate space for the results.
+ // Allocate one extra digit so the next level can left-shift in-place.
+ ScratchDigits right(level->divisor_.len() + 1);
+ // Allocate one extra digit because DivideBarrett requires it.
+ ScratchDigits left(chunk.len() - level->divisor_.len() + 1);
+
+ // Step 4: Divide to split {chunk} into {left} and {right}.
+ int inverse_len = chunk.len() - level->divisor_.len();
+ if (inverse_len == 0) {
+ processor_->DivideSchoolbook(left, right, chunk, level->divisor_);
+ } else if (level->divisor_.len() == 1) {
+ processor_->DivideSingle(left, right.digits(), chunk, level->divisor_[0]);
+ for (int i = 1; i < right.len(); i++) right[i] = 0;
+ } else {
+ ScratchDigits scratch(DivideBarrettScratchSpace(chunk.len()));
+ // The top level only computes its inverse when {chunk.len()} is
+ // available. Other levels have precomputed theirs.
+ if (level->is_toplevel_) {
+ level->ComputeInverse(processor_, chunk.len());
+ if (processor_->should_terminate()) return out;
+ }
+ Digits inverse = level->GetInverse(chunk.len());
+ processor_->DivideBarrett(left, right, chunk, level->divisor_, inverse,
+ scratch);
+ if (processor_->should_terminate()) return out;
+ }
+ RightShift(right, right, level->leading_zero_shift_);
+#if DEBUG
+ Digits left_test = left;
+ left_test.Normalize();
+ DCHECK(left_test.len() <= level->divisor_.len());
+#endif
+
+ // Step 5: Recurse.
+ char* end_of_right_part = ProcessLevel(level->next_, right, out, false);
+ // The recursive calls are required and hence designed to write exactly as
+ // many characters as their level is responsible for.
+ DCHECK(end_of_right_part == out - level->char_count_);
+ USE(end_of_right_part);
+ if (processor_->should_terminate()) return out;
+ // We intentionally don't use {end_of_right_part} here to be prepared for
+ // potential future multi-threaded execution.
+ return ProcessLevel(level->next_, left, out - level->char_count_,
+ is_last_on_level);
+}
+
+#endif // V8_ADVANCED_BIGINT_ALGORITHMS
+
} // namespace
void ProcessorImpl::ToString(char* out, int* out_length, Digits X, int radix,
bool sign) {
+ const bool use_fast_algorithm = X.len() >= kToStringFastThreshold;
+ ToStringImpl(out, out_length, X, radix, sign, use_fast_algorithm);
+}
+
+// Factored out so that tests can call it.
+void ProcessorImpl::ToStringImpl(char* out, int* out_length, Digits X,
+ int radix, bool sign, bool fast) {
#if DEBUG
for (int i = 0; i < *out_length; i++) out[i] = kStringZapValue;
#endif
ToStringFormatter formatter(X, radix, sign, out, *out_length, this);
if (IsPowerOfTwo(radix)) {
formatter.BasePowerOfTwo();
+#if V8_ADVANCED_BIGINT_ALGORITHMS
+ } else if (fast) {
+ formatter.Start();
+ formatter.Fast();
+ if (should_terminate()) return;
+#else
+ USE(fast);
+#endif // V8_ADVANCED_BIGINT_ALGORITHMS
} else {
formatter.Start();
formatter.Classic();
diff --git a/deps/v8/src/bigint/vector-arithmetic.h b/deps/v8/src/bigint/vector-arithmetic.h
index 3247660f95..d8b79a961a 100644
--- a/deps/v8/src/bigint/vector-arithmetic.h
+++ b/deps/v8/src/bigint/vector-arithmetic.h
@@ -45,6 +45,9 @@ digit_t AddAndReturnCarry(RWDigits Z, Digits X, Digits Y);
digit_t SubtractAndReturnBorrow(RWDigits Z, Digits X, Digits Y);
inline bool IsDigitNormalized(Digits X) { return X.len() == 0 || X.msd() != 0; }
+inline bool IsBitNormalized(Digits X) {
+ return (X.msd() >> (kDigitBits - 1)) == 1;
+}
inline bool GreaterThanOrEqual(Digits A, Digits B) {
return Compare(A, B) >= 0;
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 20312d8336..f45c927e67 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -129,9 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(sp, sp, Operand(kPointerSize));
+ __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@@ -276,9 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(sp, sp, Operand(kPointerSize));
+ __ DropArguments(r1, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@@ -828,7 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ add(sp, sp, params_size, LeaveCC);
+ __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1113,14 +1112,15 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
- __ strh(scratch, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ __ strh(scratch,
+ FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
}
__ Push(argc, bytecodeArray);
@@ -1266,11 +1266,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@@ -1861,8 +1861,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(2), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
- __ str(r5, MemOperand(sp, 0));
+ __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1938,8 +1938,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(3), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
- __ str(r5, MemOperand(sp, 0));
+ __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1981,8 +1981,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
__ cmp(r0, Operand(3), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
- __ str(r4, MemOperand(sp, 0)); // set undefined to the receiver
+ __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -3479,12 +3479,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -3492,6 +3493,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = r1;
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = r4;
+ __ ldr(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ b(eq, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ ldr(code_obj,
+ FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Load the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
@@ -3513,15 +3546,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = r4;
- __ ldr(code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ ldr(code_obj,
- FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -3554,6 +3578,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = r0;
Register arg_reg_2 = r1;
@@ -3575,8 +3601,9 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
- __ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ __ strh(scratch,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -3600,8 +3627,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ b(&start);
@@ -3609,17 +3638,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 06245ea51f..b1f9a63e3c 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -1297,10 +1297,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(bytecode_array,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecode_array);
@@ -1456,10 +1456,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
@@ -4005,12 +4005,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(padreg, kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -4018,6 +4019,43 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = x1;
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = x22;
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ B(eq, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ LoadTaggedPointerField(
+ code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
+ }
+
// Load the feedback vector.
Register feedback_vector = x2;
__ LoadTaggedPointerField(
@@ -4040,20 +4078,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = x22;
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
- }
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4086,6 +4110,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(padreg, kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = x0;
Register arg_reg_2 = x1;
@@ -4104,7 +4130,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
@@ -4127,8 +4153,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(padreg, kInterpreterAccumulatorRegister);
__ PushArgument(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister, padreg);
}
// Retry from the start after installing baseline code.
__ B(&start);
@@ -4136,17 +4164,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/array-concat.tq b/deps/v8/src/builtins/array-concat.tq
new file mode 100644
index 0000000000..5eb66e6ce8
--- /dev/null
+++ b/deps/v8/src/builtins/array-concat.tq
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+
+extern builtin ArrayConcat(Context, JSFunction, JSAny, int32): JSAny;
+
+transitioning javascript builtin
+ArrayPrototypeConcat(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // Fast path if we invoke as `x.concat()`.
+ if (arguments.length == 0) {
+ typeswitch (receiver) {
+ case (a: FastJSArrayForConcat): {
+ return CloneFastJSArray(context, a);
+ }
+ case (JSAny): {
+ // Fallthrough.
+ }
+ }
+ }
+
+ // Fast path if we invoke as `[].concat(x)`.
+ try {
+ const receiverAsArray: FastJSArrayForConcat =
+ Cast<FastJSArrayForConcat>(receiver)
+ otherwise ReceiverIsNotFastJSArrayForConcat;
+ if (receiverAsArray.IsEmpty() && arguments.length == 1) {
+ typeswitch (arguments[0]) {
+ case (a: FastJSArrayForCopy): {
+ return CloneFastJSArray(context, a);
+ }
+ case (JSAny): {
+ // Fallthrough.
+ }
+ }
+ }
+ } label ReceiverIsNotFastJSArrayForConcat {
+ // Fallthrough.
+ }
+
+ // TODO(victorgomes): Implement slow path ArrayConcat in Torque.
+ tail ArrayConcat(
+ context, LoadTargetFromFrame(), Undefined,
+ Convert<int32>(arguments.length));
+}
+
+} // namespace array
diff --git a/deps/v8/src/builtins/array-findlast.tq b/deps/v8/src/builtins/array-findlast.tq
new file mode 100644
index 0000000000..a359ec915f
--- /dev/null
+++ b/deps/v8/src/builtins/array-findlast.tq
@@ -0,0 +1,110 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast
+transitioning builtin ArrayFindLastLoopContinuation(implicit context: Context)(
+ predicate: Callable, thisArg: JSAny, o: JSReceiver,
+ initialK: Number): JSAny {
+ // 5. Repeat, while k >= 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
+
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny = Call(context, predicate, thisArg, value, k, o);
+
+ // 5d. If testResult is true, return kValue.
+ if (ToBoolean(testResult)) {
+ return value;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast
+transitioning macro FastArrayFindLast(implicit context: Context)(
+ o: JSReceiver, len: Number, predicate: Callable, thisArg: JSAny): JSAny
+ labels Bailout(Number) {
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1);
+ // 4. Let k be len - 1.
+ let k: Smi = smiLen - 1;
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // 5. Repeat, while k ≥ 0
+ // Build a fast loop over the smi array.
+ for (; k >= 0; k--) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence there is no need to
+ // cast ToString for LoadElementOrUndefined.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = fastOW.LoadElementOrUndefined(k);
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny =
+ Call(context, predicate, thisArg, value, k, fastOW.Get());
+ // 5d. If testResult is true, return kValue.
+ if (ToBoolean(testResult)) {
+ return value;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast
+transitioning javascript builtin
+ArrayPrototypeFindLast(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.findLast');
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? LengthOfArrayLike(O).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(predicate) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
+ }
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallableError;
+
+ // If a thisArg parameter is provided, it will be used as the this value for
+ // each invocation of predicate. If it is not provided, undefined is used
+ // instead.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
+ try {
+ return FastArrayFindLast(o, len, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return ArrayFindLastLoopContinuation(predicate, thisArg, o, k);
+ }
+ } label NotCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/array-findlastindex.tq b/deps/v8/src/builtins/array-findlastindex.tq
new file mode 100644
index 0000000000..3b5498f961
--- /dev/null
+++ b/deps/v8/src/builtins/array-findlastindex.tq
@@ -0,0 +1,111 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex
+transitioning builtin ArrayFindLastIndexLoopContinuation(
+ implicit context: Context)(
+ predicate: Callable, thisArg: JSAny, o: JSReceiver,
+ initialK: Number): Number {
+ // 5. Repeat, while k >= 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
+
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny = Call(context, predicate, thisArg, value, k, o);
+
+ // 5d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(testResult)) {
+ return k;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return -1𝔽.
+ return Convert<Smi>(-1);
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex
+transitioning macro FastArrayFindLastIndex(implicit context: Context)(
+ o: JSReceiver, len: Number, predicate: Callable, thisArg: JSAny): Number
+ labels Bailout(Number) {
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1);
+ // 4. Let k be len - 1.
+ let k: Smi = smiLen - 1;
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // 5. Repeat, while k ≥ 0
+ // Build a fast loop over the smi array.
+ for (; k >= 0; k--) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ // 5a. Let Pk be ! ToString(𝔽(k)).
+ // k is guaranteed to be a positive integer, hence there is no need to
+ // cast ToString for LoadElementOrUndefined.
+
+ // 5b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = fastOW.LoadElementOrUndefined(k);
+ // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ const testResult: JSAny =
+ Call(context, predicate, thisArg, value, k, fastOW.Get());
+ // 5d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(testResult)) {
+ return k;
+ }
+
+ // 5e. Set k to k - 1. (done by the loop).
+ }
+
+ // 6. Return -1𝔽.
+ return -1;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex
+transitioning javascript builtin
+ArrayPrototypeFindLastIndex(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.findLastIndex');
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? LengthOfArrayLike(O).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(predicate) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
+ }
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallableError;
+
+ // If a thisArg parameter is provided, it will be used as the this value for
+ // each invocation of predicate. If it is not provided, undefined is used
+ // instead.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
+ try {
+ return FastArrayFindLastIndex(o, len, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return ArrayFindLastIndexLoopContinuation(predicate, thisArg, o, k);
+ }
+ } label NotCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/arraybuffer.tq b/deps/v8/src/builtins/arraybuffer.tq
index 5794414443..fc0152f51a 100644
--- a/deps/v8/src/builtins/arraybuffer.tq
+++ b/deps/v8/src/builtins/arraybuffer.tq
@@ -18,115 +18,103 @@ transitioning javascript builtin ArrayBufferPrototypeGetByteLength(
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
- if (IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
- // 5. If IsDetachedBuffer(O) is true, throw a TypeError exception.
- // TODO(v8:4895): We don't actually throw here.
- // 6. Let length be O.[[ArrayBufferByteLength]].
+ // 4. Let length be O.[[ArrayBufferByteLength]].
const length = o.byte_length;
- // 7. Return length.
+ // 5. Return length.
return Convert<Number>(length);
}
-// #sec-get-sharedarraybuffer.prototype.bytelength
-transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength(
+// #sec-get-arraybuffer.prototype.maxbytelength
+transitioning javascript builtin ArrayBufferPrototypeGetMaxByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
- const functionName = 'get SharedArrayBuffer.prototype.byteLength';
+ const functionName = 'get ArrayBuffer.prototype.maxByteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- // 3. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
- if (!IsSharedArrayBuffer(o)) {
+ // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
- if (IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ // 4. If IsDetachedBuffer(O) is true, return 0_F.
+ if (IsDetachedBuffer(o)) {
+ return 0;
}
- // 5. Let length be O.[[ArrayBufferByteLength]].
- const length = o.byte_length;
- // 6. Return length.
- return Convert<Number>(length);
+ // 5. If IsResizableArrayBuffer(O) is true, then
+ // a. Let length be O.[[ArrayBufferMaxByteLength]].
+ // 6. Else,
+ // a. Let length be O.[[ArrayBufferByteLength]].
+ // 7. Return F(length);
+ assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ return Convert<Number>(o.max_byte_length);
}
-// #sec-get-resizablearraybuffer.prototype.bytelength
-transitioning javascript builtin ResizableArrayBufferPrototypeGetByteLength(
- js-implicit context: NativeContext, receiver: JSAny)(): Number {
+// #sec-get-arraybuffer.prototype.resizable
+transitioning javascript builtin ArrayBufferPrototypeGetResizable(
+ js-implicit context: NativeContext, receiver: JSAny)(): Boolean {
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
- const functionName = 'get ResizableArrayBuffer.prototype.byteLength';
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get ArrayBuffer.prototype.resizable';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- if (!IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
// 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
if (IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. Let length be O.[[ArrayBufferByteLength]].
- const length = o.byte_length;
- // 5. Return length.
- return Convert<Number>(length);
+ // 4. Return IsResizableArrayBuffer(O).
+ if (IsResizableArrayBuffer(o)) {
+ return True;
+ }
+ return False;
}
-// #sec-get-resizablearraybuffer.prototype.maxbytelength
-transitioning javascript builtin ResizableArrayBufferPrototypeGetMaxByteLength(
+// #sec-get-growablesharedarraybuffer.prototype.maxbytelength
+transitioning javascript builtin
+SharedArrayBufferPrototypeGetMaxByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
- const functionName = 'get ResizableArrayBuffer.prototype.maxByteLength';
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get SharedArrayBuffer.prototype.maxByteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- if (!IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
- // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
- if (IsSharedArrayBuffer(o)) {
+ // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ if (!IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. Let length be O.[[ArrayBufferMaxByteLength]].
- const length = o.max_byte_length;
- // 5. Return length.
- return Convert<Number>(length);
+ // 4. If IsResizableArrayBuffer(O) is true, then
+ // a. Let length be O.[[ArrayBufferMaxByteLength]].
+ // 5. Else,
+ // a. Let length be O.[[ArrayBufferByteLength]].
+ // 6. Return F(length);
+ assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ return Convert<Number>(o.max_byte_length);
}
-// #sec-get-growablesharedarraybuffer.prototype.maxbytelength
-transitioning javascript builtin
-GrowableSharedArrayBufferPrototypeGetMaxByteLength(
- js-implicit context: NativeContext, receiver: JSAny)(): Number {
+// #sec-get-sharedarraybuffer.prototype.growable
+transitioning javascript builtin SharedArrayBufferPrototypeGetGrowable(
+ js-implicit context: NativeContext, receiver: JSAny)(): Boolean {
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
- const functionName = 'get GrowableSharedArrayBuffer.prototype.maxByteLength';
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get SharedArrayBuffer.prototype.growable';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- if (!IsResizableArrayBuffer(o)) {
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
- }
// 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
if (!IsSharedArrayBuffer(o)) {
ThrowTypeError(
MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. Let length be O.[[ArrayBufferMaxByteLength]].
- const length = o.max_byte_length;
- // 5. Return length.
- return Convert<Number>(length);
+ // 4. Return IsResizableArrayBuffer(O).
+ if (IsResizableArrayBuffer(o)) {
+ return True;
+ }
+ return False;
}
// #sec-arraybuffer.isview
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index c0acc90593..af1813b61d 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -214,7 +214,7 @@ extern class GlobalDictionary extends HashTable;
extern class SimpleNumberDictionary extends HashTable;
extern class EphemeronHashTable extends HashTable;
type ObjectHashTable extends HashTable
- generates 'TNode<ObjectHashTable>';
+ generates 'TNode<ObjectHashTable>' constexpr 'ObjectHashTable';
extern class NumberDictionary extends HashTable;
type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
@@ -552,8 +552,20 @@ extern class Filler extends HeapObject generates 'TNode<HeapObject>';
// but not their own class definitions:
// Like JSObject, but created from API function.
-@apiExposedInstanceTypeValue(0x420)
+@apiExposedInstanceTypeValue(0x422)
+@doNotGenerateCast
+@noVerifier
extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
+
+// TODO(gsathya): This only exists to make JSApiObject instance type into a
+// range.
+@apiExposedInstanceTypeValue(0x80A)
+@doNotGenerateCast
+@highestInstanceTypeWithinParentClassRange
+@noVerifier
+extern class JSLastDummyApiObject extends JSApiObject
+ generates 'TNode<JSObject>';
+
// Like JSApiObject, but requires access checks and/or has interceptors.
@apiExposedInstanceTypeValue(0x410)
extern class JSSpecialApiObject extends JSSpecialObject
@@ -669,6 +681,8 @@ extern macro ThrowTypeError(implicit context: Context)(
constexpr MessageTemplate, Object, Object, Object): never;
extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)(
Smi, Object, Object): void;
+extern transitioning runtime ThrowIteratorError(implicit context: Context)(
+ JSAny): never;
extern transitioning runtime ThrowCalledNonCallable(implicit context: Context)(
JSAny): never;
@@ -1198,6 +1212,7 @@ extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map):
extern macro IsNoElementsProtectorCellInvalid(): bool;
extern macro IsArrayIteratorProtectorCellInvalid(): bool;
extern macro IsArraySpeciesProtectorCellInvalid(): bool;
+extern macro IsIsConcatSpreadableProtectorCellInvalid(): bool;
extern macro IsTypedArraySpeciesProtectorCellInvalid(): bool;
extern macro IsPromiseSpeciesProtectorCellInvalid(): bool;
extern macro IsMockArrayBufferAllocatorFlag(): bool;
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index c3a7f1b98c..f995299b7e 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -50,16 +50,11 @@ bool RoundUpToPageSize(size_t byte_length, size_t page_size,
Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
Handle<Object> max_length, InitializedFlag initialized) {
- SharedFlag shared =
- (*target != target->native_context().array_buffer_fun() &&
- *target != target->native_context().resizable_array_buffer_fun())
- ? SharedFlag::kShared
- : SharedFlag::kNotShared;
- ResizableFlag resizable =
- (*target == target->native_context().resizable_array_buffer_fun() ||
- *target == target->native_context().growable_shared_array_buffer_fun())
- ? ResizableFlag::kResizable
- : ResizableFlag::kNotResizable;
+ SharedFlag shared = *target != target->native_context().array_buffer_fun()
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
+ ResizableFlag resizable = max_length.is_null() ? ResizableFlag::kNotResizable
+ : ResizableFlag::kResizable;
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -83,12 +78,9 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
if (resizable == ResizableFlag::kNotResizable) {
backing_store =
BackingStore::Allocate(isolate, byte_length, shared, initialized);
+ max_byte_length = byte_length;
} else {
- Handle<Object> number_max_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_max_length,
- Object::ToInteger(isolate, max_length));
-
- if (!TryNumberToSize(*number_max_length, &max_byte_length)) {
+ if (!TryNumberToSize(*max_length, &max_byte_length)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
@@ -116,8 +108,8 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
}
constexpr bool kIsWasmMemory = false;
backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
- isolate, byte_length, page_size, initial_pages, max_pages,
- kIsWasmMemory, shared);
+ isolate, byte_length, max_byte_length, page_size, initial_pages,
+ max_pages, kIsWasmMemory, shared);
}
if (!backing_store) {
// Allocation of backing store failed.
@@ -137,10 +129,7 @@ BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
DCHECK(*target == target->native_context().array_buffer_fun() ||
- *target == target->native_context().shared_array_buffer_fun() ||
- *target == target->native_context().resizable_array_buffer_fun() ||
- *target ==
- target->native_context().growable_shared_array_buffer_fun());
+ *target == target->native_context().shared_array_buffer_fun());
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
@@ -158,9 +147,22 @@ BUILTIN(ArrayBufferConstructor) {
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- Handle<Object> max_length = args.atOrUndefined(isolate, 2);
- return ConstructBuffer(isolate, target, new_target, number_length, max_length,
- InitializedFlag::kZeroInitialized);
+ Handle<Object> number_max_length;
+ if (FLAG_harmony_rab_gsab) {
+ Handle<Object> max_length;
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, max_length,
+ JSObject::ReadFromOptionsBag(
+ options, isolate->factory()->max_byte_length_string(), isolate));
+
+ if (!max_length->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, number_max_length, Object::ToInteger(isolate, max_length));
+ }
+ }
+ return ConstructBuffer(isolate, target, new_target, number_length,
+ number_max_length, InitializedFlag::kZeroInitialized);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
@@ -462,45 +464,48 @@ static Object ResizeHelper(BuiltinArguments args, Isolate* isolate,
return ReadOnlyRoots(isolate).undefined_value();
}
-// ES #sec-get-growablesharedarraybuffer.prototype.bytelength
-// get GrowableSharedArrayBuffer.prototype.byteLength
-BUILTIN(GrowableSharedArrayBufferPrototypeGetByteLength) {
- const char* const kMethodName =
- "get GrowableSharedArrayBuffer.prototype.byteLength";
+// ES #sec-get-sharedarraybuffer.prototype.bytelength
+// get SharedArrayBuffer.prototype.byteLength
+BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
+ const char* const kMethodName = "get SharedArrayBuffer.prototype.byteLength";
HandleScope scope(isolate);
-
// 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxLength]]).
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
- CHECK_RESIZABLE(true, array_buffer, kMethodName);
// 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(true, array_buffer, kMethodName);
- // 4. Let length be ArrayBufferByteLength(O, SeqCst).
-
- // Invariant: byte_length for GSAB is 0 (it needs to be read from the
- // BackingStore).
- DCHECK_EQ(0, array_buffer->byte_length());
+ DCHECK_EQ(array_buffer->max_byte_length(),
+ array_buffer->GetBackingStore()->max_byte_length());
- size_t byte_length =
- array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ // 4. Let length be ArrayBufferByteLength(O, SeqCst).
+ size_t byte_length;
+ if (array_buffer->is_resizable()) {
+ // Invariant: byte_length for GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ DCHECK_EQ(0, array_buffer->byte_length());
- // 5. Return length.
+ byte_length =
+ array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ } else {
+ byte_length = array_buffer->byte_length();
+ }
+ // 5. Return F(length).
return *isolate->factory()->NewNumberFromSize(byte_length);
}
-// ES #sec-resizablearraybuffer.prototype.resize
-// ResizableArrayBuffer.prototype.resize(new_size))
-BUILTIN(ResizableArrayBufferPrototypeResize) {
- const char* const kMethodName = "ResizableArrayBuffer.prototype.resize";
+// ES #sec-arraybuffer.prototype.resize
+// ArrayBuffer.prototype.resize(new_size))
+BUILTIN(ArrayBufferPrototypeResize) {
+ const char* const kMethodName = "ArrayBuffer.prototype.resize";
constexpr bool kIsShared = false;
return ResizeHelper(args, isolate, kMethodName, kIsShared);
}
-// ES #sec-growablesharedarraybuffer.prototype.grow
-// GrowableSharedArrayBuffer.prototype.grow(new_size))
-BUILTIN(GrowableSharedArrayBufferPrototypeGrow) {
- const char* const kMethodName = "GrowableSharedArrayBuffer.prototype.grow";
+// ES #sec-sharedarraybuffer.prototype.grow
+// SharedArrayBuffer.prototype.grow(new_size))
+BUILTIN(SharedArrayBufferPrototypeGrow) {
+ const char* const kMethodName = "SharedArrayBuffer.prototype.grow";
constexpr bool kIsShared = true;
return ResizeHelper(args, isolate, kMethodName, kIsShared);
}
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 21841e382c..a1359cd422 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -46,22 +46,6 @@ void ConsoleCall(
CHECK(!isolate->has_scheduled_exception());
if (!isolate->console_delegate()) return;
HandleScope scope(isolate);
-
- // Access check. The current context has to match the context of all
- // arguments, otherwise the inspector might leak objects across contexts.
- Handle<Context> context = handle(isolate->context(), isolate);
- for (int i = 0; i < args.length(); ++i) {
- Handle<Object> argument = args.at<Object>(i);
- if (!argument->IsJSObject()) continue;
-
- Handle<JSObject> argument_obj = Handle<JSObject>::cast(argument);
- if (argument->IsAccessCheckNeeded(isolate) &&
- !isolate->MayAccess(context, argument_obj)) {
- isolate->ReportFailedAccessCheck(argument_obj);
- return;
- }
- }
-
debug::ConsoleCallArguments wrapper(args);
Handle<Object> context_id_obj = JSObject::GetDataProperty(
args.target(), isolate->factory()->console_context_id_symbol());
@@ -78,7 +62,7 @@ void ConsoleCall(
}
void LogTimerEvent(Isolate* isolate, BuiltinArguments args,
- Logger::StartEnd se) {
+ v8::LogEventStatus se) {
if (!isolate->logger()->is_logging()) return;
HandleScope scope(isolate);
std::unique_ptr<char[]> name;
@@ -102,21 +86,21 @@ CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
#undef CONSOLE_BUILTIN_IMPLEMENTATION
BUILTIN(ConsoleTime) {
- LogTimerEvent(isolate, args, Logger::START);
+ LogTimerEvent(isolate, args, v8::LogEventStatus::kStart);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::Time);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(ConsoleTimeEnd) {
- LogTimerEvent(isolate, args, Logger::END);
+ LogTimerEvent(isolate, args, v8::LogEventStatus::kEnd);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeEnd);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(ConsoleTimeStamp) {
- LogTimerEvent(isolate, args, Logger::STAMP);
+ LogTimerEvent(isolate, args, v8::LogEventStatus::kStamp);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeStamp);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index b5caebd7c4..70eb349dab 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -170,8 +170,8 @@ namespace internal {
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
ASM(BaselineOnStackReplacement, Void) \
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
- ASM(BaselineEnterAtBytecode, Void) \
- ASM(BaselineEnterAtNextBytecode, Void) \
+ ASM(BaselineOrInterpreterEnterAtBytecode, Void) \
+ ASM(BaselineOrInterpreterEnterAtNextBytecode, Void) \
ASM(InterpreterOnStackReplacement_ToBaseline, Void) \
\
/* Code life-cycle */ \
@@ -394,6 +394,8 @@ namespace internal {
CPP(ArrayBufferConstructor) \
CPP(ArrayBufferConstructor_DoNotInitialize) \
CPP(ArrayBufferPrototypeSlice) \
+ /* https://tc39.es/proposal-resizablearraybuffer/ */ \
+ CPP(ArrayBufferPrototypeResize) \
\
/* AsyncFunction */ \
TFS(AsyncFunctionEnter, kClosure, kReceiver) \
@@ -799,11 +801,6 @@ namespace internal {
ASM(RegExpInterpreterTrampoline, CCall) \
ASM(RegExpExperimentalTrampoline, CCall) \
\
- /* ResizableArrayBuffer & GrowableSharedArrayBuffer */ \
- CPP(ResizableArrayBufferPrototypeResize) \
- CPP(GrowableSharedArrayBufferPrototypeGrow) \
- CPP(GrowableSharedArrayBufferPrototypeGetByteLength) \
- \
/* Set */ \
TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
@@ -823,7 +820,11 @@ namespace internal {
TFS(SetOrSetIteratorToList, kSource) \
\
/* SharedArrayBuffer */ \
+ CPP(SharedArrayBufferPrototypeGetByteLength) \
CPP(SharedArrayBufferPrototypeSlice) \
+ /* https://tc39.es/proposal-resizablearraybuffer/ */ \
+ CPP(SharedArrayBufferPrototypeGrow) \
+ \
TFJ(AtomicsLoad, 2, kReceiver, kArray, kIndex) \
TFJ(AtomicsStore, 3, kReceiver, kArray, kIndex, kValue) \
TFJ(AtomicsExchange, 3, kReceiver, kArray, kIndex, kValue) \
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 5920d9fe7c..535188c567 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -1085,6 +1085,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG("dotAll", JSRegExp::kDotAll);
CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
+ CASE_FOR_FLAG("hasIndices", JSRegExp::kHasIndices);
#undef CASE_FOR_FLAG
#define CASE_FOR_FLAG(NAME, V8_FLAG_EXTERN_REF, FLAG) \
@@ -1107,10 +1108,6 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
} while (false)
CASE_FOR_FLAG(
- "hasIndices",
- ExternalReference::address_of_harmony_regexp_match_indices_flag(),
- JSRegExp::kHasIndices);
- CASE_FOR_FLAG(
"linear",
ExternalReference::address_of_enable_experimental_regexp_engine(),
JSRegExp::kLinear);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 735d8b674f..a76650d052 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -154,13 +154,16 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was detached.
- TNode<JSArrayBuffer> receiver_buffer =
- LoadJSArrayBufferViewBuffer(CAST(receiver));
- TNode<UintPtrT> byte_offset = Select<UintPtrT>(
- IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
- [=] { return LoadJSArrayBufferViewByteOffset(CAST(receiver)); });
- Return(ChangeUintPtrToTagged(byte_offset));
+ // Default to zero if the {receiver}s buffer was detached / out of bounds.
+ Label detached_or_oob(this), not_detached_or_oob(this);
+ IsTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
+ &not_detached_or_oob);
+ BIND(&detached_or_oob);
+ Return(ChangeUintPtrToTagged(UintPtrConstant(0)));
+
+ BIND(&not_detached_or_oob);
+ Return(
+ ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(CAST(receiver))));
}
// ES6 #sec-get-%typedarray%.prototype.length
@@ -267,6 +270,17 @@ void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<RawPtrT> dest_ptr,
std::make_pair(MachineType::UintPtr(), byte_length));
}
+void TypedArrayBuiltinsAssembler::CallCRelaxedMemmove(
+ TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length) {
+ TNode<ExternalReference> memmove =
+ ExternalConstant(ExternalReference::relaxed_memmove_function());
+ CallCFunction(memmove, MachineType::AnyTagged(),
+ std::make_pair(MachineType::Pointer(), dest_ptr),
+ std::make_pair(MachineType::Pointer(), src_ptr),
+ std::make_pair(MachineType::UintPtr(), byte_length));
+}
+
void TypedArrayBuiltinsAssembler::CallCMemcpy(TNode<RawPtrT> dest_ptr,
TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length) {
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 0ec179ac9e..bb8a15ef02 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -52,6 +52,9 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void CallCMemmove(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length);
+ void CallCRelaxedMemmove(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length);
+
void CallCMemcpy(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length);
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index bb936e6e46..d6be81615d 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -99,7 +99,12 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
count = count * element_size;
uint8_t* data = static_cast<uint8_t*>(array->DataPtr());
- std::memmove(data + to, data + from, count);
+ if (array->buffer().is_shared()) {
+ base::Relaxed_Memmove(reinterpret_cast<base::Atomic8*>(data + to),
+ reinterpret_cast<base::Atomic8*>(data + from), count);
+ } else {
+ std::memmove(data + to, data + from, count);
+ }
return *array;
}
@@ -114,7 +119,7 @@ BUILTIN(TypedArrayPrototypeFill) {
ElementsKind kind = array->GetElementsKind();
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
- if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ if (IsBigIntTypedArrayElementsKind(kind)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
BigInt::FromObject(isolate, obj_value));
} else {
@@ -122,7 +127,7 @@ BUILTIN(TypedArrayPrototypeFill) {
Object::ToNumber(isolate, obj_value));
}
- int64_t len = array->length();
+ int64_t len = array->GetLength();
int64_t start = 0;
int64_t end = len;
@@ -142,11 +147,22 @@ BUILTIN(TypedArrayPrototypeFill) {
}
}
+ if (V8_UNLIKELY(array->IsVariableLength())) {
+ bool out_of_bounds = false;
+ array->GetLengthOrOutOfBounds(out_of_bounds);
+ if (out_of_bounds) {
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation));
+ }
+ } else if (V8_UNLIKELY(array->WasDetached())) {
+ return *array;
+ }
+
int64_t count = end - start;
if (count <= 0) return *array;
- if (V8_UNLIKELY(array->WasDetached())) return *array;
-
// Ensure processed indexes are within array bounds
DCHECK_GE(start, 0);
DCHECK_LT(start, len);
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 3d813cd598..e219aec65d 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -79,6 +79,7 @@ class BuiltinArguments : public JavaScriptArguments {
// through the BuiltinArguments object args.
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
+#ifdef V8_RUNTIME_CALL_STATS
#define BUILTIN(name) \
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate); \
@@ -105,6 +106,21 @@ class BuiltinArguments : public JavaScriptArguments {
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate)
+#else // V8_RUNTIME_CALL_STATS
+#define BUILTIN(name) \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate); \
+ \
+ V8_WARN_UNUSED_RESULT Address Builtin_##name( \
+ int args_length, Address* args_object, Isolate* isolate) { \
+ DCHECK(isolate->context().is_null() || isolate->context().IsContext()); \
+ BuiltinArguments args(args_length, args_object); \
+ return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \
+ } \
+ \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate)
+#endif // V8_RUNTIME_CALL_STATS
// ----------------------------------------------------------------------------
#define CHECK_RECEIVER(Type, name, method) \
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index a10bc7c946..b12ea5d9fe 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -547,10 +547,19 @@ Cast<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
FastJSArrayForCopy
labels CastError {
if (IsArraySpeciesProtectorCellInvalid()) goto CastError;
+ // TODO(victorgomes): Check if we can cast from FastJSArrayForRead instead.
const a = Cast<FastJSArray>(o) otherwise CastError;
return %RawDownCast<FastJSArrayForCopy>(a);
}
+Cast<FastJSArrayForConcat>(implicit context: Context)(o: HeapObject):
+ FastJSArrayForConcat
+ labels CastError {
+ if (IsIsConcatSpreadableProtectorCellInvalid()) goto CastError;
+ const a = Cast<FastJSArrayForCopy>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayForConcat>(a);
+}
+
Cast<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
o: HeapObject): FastJSArrayWithNoCustomIteration
labels CastError {
diff --git a/deps/v8/src/builtins/conversion.tq b/deps/v8/src/builtins/conversion.tq
index 636f49a024..266fcaa552 100644
--- a/deps/v8/src/builtins/conversion.tq
+++ b/deps/v8/src/builtins/conversion.tq
@@ -138,7 +138,7 @@ transitioning builtin ToObject(implicit context: Context)(input: JSAny):
}
case (o: JSAnyNotSmi): {
const index: intptr = Convert<intptr>(
- o.map.in_object_properties_start_or_constructor_function_index);
+ o.map.inobject_properties_start_or_constructor_function_index);
if (index != kNoConstructorFunctionIndex)
goto WrapPrimitive(
%RawDownCast<Slot<NativeContext, JSFunction>>(index));
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 54013e7698..7a8875fee9 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/register-arch.h"
#if V8_TARGET_ARCH_IA32
#include "src/api/api-arguments.h"
@@ -128,11 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
- 1 * kSystemPointerSize)); // 1 ~ receiver
- __ PushReturnAddressFrom(ecx);
+ __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@@ -283,11 +281,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
- 1 * kSystemPointerSize)); // 1 ~ receiver
- __ push(ecx);
+ __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@@ -776,10 +771,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
- Register return_pc = scratch2;
- __ PopReturnAddressTo(return_pc);
- __ add(esp, params_size);
- __ PushReturnAddressFrom(return_pc);
+ __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1102,10 +1095,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
// Push bytecode array.
@@ -1725,10 +1718,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(
- FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
+ FieldOperand(bytecode_array, BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
__ Push(bytecode_array);
@@ -1915,11 +1908,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ PopReturnAddressTo(ecx);
- __ lea(esp,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
- __ Push(edi);
- __ PushReturnAddressFrom(ecx);
+ __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@@ -2026,11 +2017,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
- __ PopReturnAddressTo(edx);
- __ lea(esp,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
- __ Push(ecx);
- __ PushReturnAddressFrom(edx);
+ __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@@ -2086,11 +2075,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// Spill argumentsList to use ecx as a scratch register.
__ movd(xmm0, ecx);
- __ PopReturnAddressTo(ecx);
- __ lea(esp,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(ecx);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);
@@ -3986,16 +3974,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ movsd(Operand(esi, dst_offset), xmm0);
}
- if (FLAG_debug_code) {
- const int kTopMask = 0x3800;
- __ push(eax);
- __ fwait();
- __ fnstsw_ax();
- __ test(eax, Immediate(kTopMask));
- __ Assert(zero, AbortReason::kFpuTopIsNotZeroInDeoptimizer);
- __ pop(eax);
- }
// Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Mark the stack as not iterable for the CPU profiler which won't be able to
@@ -4115,19 +4096,57 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
+ // Spill the accumulator register; note that we're not within a frame, so we
+ // have to make sure to pop it before doing any GC-visible calls.
+ __ push(kInterpreterAccumulatorRegister);
+
// Get function from the frame.
Register closure = eax;
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = esi;
+ __ mov(code_obj,
+ FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(code_obj,
+ FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
+ kInterpreterBytecodeOffsetRegister);
+ __ j(equal, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ __ pop(kInterpreterAccumulatorRegister);
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
+ kInterpreterBytecodeOffsetRegister);
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Load the feedback vector.
Register feedback_vector = ecx;
__ mov(feedback_vector,
@@ -4150,14 +4169,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
feedback_vector);
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = esi;
- __ mov(code_obj,
- FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ mov(code_obj,
- FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4209,7 +4220,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {
@@ -4230,10 +4241,23 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
}
__ bind(&install_baseline_code);
+ // Pop/re-push the accumulator so that it's spilled within the below frame
+ // scope, to keep the stack valid. Use ecx for this -- we can't save it in
+ // kInterpreterAccumulatorRegister because that aliases with closure.
+ DCHECK(!AreAliased(ecx, kContextRegister, closure));
+ __ pop(ecx);
+ // Restore the clobbered context register.
+ __ mov(kContextRegister,
+ Operand(ebp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ecx);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ // Now that we're restarting, we don't have to worry about closure and
+ // accumulator aliasing, so pop the spilled accumulator directly back into
+ // the right register.
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ jmp(&start);
@@ -4241,17 +4265,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 150e3d2cb5..c2652e7eb0 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -110,7 +110,7 @@ transitioning builtin CallIteratorWithFeedback(
iteratorMethod, %MakeLazy<JSAny, JSAny>('GetLazyReceiver', receiver),
context, feedback, callSlotUnTagged);
const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
- otherwise ThrowCalledNonCallable(iteratorMethod);
+ otherwise ThrowIteratorError(receiver);
return Call(context, iteratorCallable, receiver);
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 0f19f68c11..8f4bf4d06b 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -1018,7 +1018,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Include(s1.bit() | s2.bit());
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1085,10 +1085,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@@ -1243,10 +1243,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -3938,12 +3938,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the start or the end of the current bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -3951,6 +3952,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = a1;
__ Lw(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Lw(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Lw(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ GetObjectType(code_obj, t6, t6);
+ __ Branch(&start_with_baseline, eq, t6, Operand(BASELINE_DATA_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ GetObjectType(code_obj, t6, t6);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t6,
+ Operand(BASELINE_DATA_TYPE));
+ }
+
+ // Load baseline code from baseline data.
+ __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ Lw(feedback_vector,
@@ -3972,14 +4005,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = s1;
- __ Lw(code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4013,6 +4038,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ Lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
@@ -4034,7 +4061,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ Lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -4058,25 +4085,29 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
}
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index ce1df3bd6a..45e1c32f82 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -1030,7 +1030,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
- temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ temps.Include(s1.bit() | s2.bit());
auto descriptor =
Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
@@ -1097,10 +1097,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@@ -1255,10 +1255,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -3523,12 +3523,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the start or the end of the current bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ Push(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -3536,6 +3537,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = a1;
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Ld(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ GetObjectType(code_obj, t2, t2);
+ __ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ GetObjectType(code_obj, t2, t2);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2,
+ Operand(BASELINE_DATA_TYPE));
+ }
+
+ // Load baseline code from baseline data.
+ __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
@@ -3556,14 +3589,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = s1;
- __ Ld(code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -3597,6 +3622,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
@@ -3618,7 +3645,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -3642,8 +3669,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
@@ -3651,17 +3680,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index c0b7212aac..02b76175ec 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -125,11 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(scratch, scratch);
- __ add(sp, sp, scratch);
- __ addi(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ blr();
__ bind(&stack_overflow);
@@ -286,11 +283,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(r4, r4);
- __ add(sp, sp, r4);
- __ addi(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(r4, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ blr();
__ bind(&check_receiver);
@@ -407,7 +401,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ subi(r6, r6, Operand(1));
__ cmpi(r6, Operand::Zero());
__ blt(&done_loop);
- __ ShiftLeftImm(r10, r6, Operand(kTaggedSizeLog2));
+ __ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
__ LoadAnyTaggedField(
scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
@@ -725,7 +719,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmpi(r7, Operand::Zero());
__ beq(&done);
- __ ShiftLeftImm(r9, r7, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r9, r7, Operand(kSystemPointerSizeLog2));
__ add(r8, r8, r9); // point to last arg
__ mtctr(r7);
@@ -821,7 +815,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Compute the size of the actual parameters + receiver (in bytes).
__ LoadU64(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
- __ ShiftLeftImm(actual_params_size, actual_params_size,
+ __ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
__ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
@@ -835,7 +829,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
- __ add(sp, sp, params_size);
+ __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1129,12 +1124,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ li(r8, Operand(0));
__ StoreU16(r8,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
r0);
// Load initial bytecode offset.
@@ -1162,7 +1157,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ ShiftRightImm(r5, r5, Operand(kSystemPointerSizeLog2), SetRC);
+ __ ShiftRightU64(r5, r5, Operand(kSystemPointerSizeLog2), SetRC);
__ beq(&no_args, cr0);
__ mtctr(r5);
__ bind(&loop);
@@ -1181,7 +1176,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
r0);
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
- __ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2));
__ StoreU64(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
@@ -1204,7 +1199,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(r6, r6, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r6, r6, Operand(kSystemPointerSizeLog2));
__ LoadU64(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r6));
__ Call(kJavaScriptCallCodeStartRegister);
@@ -1277,7 +1272,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
Register scratch) {
ASM_CODE_COMMENT(masm);
__ subi(scratch, num_args, Operand(1));
- __ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch, r0,
@@ -1483,7 +1478,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Register scratch = temps.Acquire();
__ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ LoadU64(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
@@ -1572,7 +1567,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// from LAZY is always the last argument.
__ addi(r3, r3,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
- __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r0, r3, Operand(kSystemPointerSizeLog2));
__ StoreU64(scratch, MemOperand(sp, r0));
// Recover arguments count.
__ subi(r3, r3,
@@ -1698,9 +1693,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, ip);
- __ StoreU64(r8, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1783,9 +1777,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, ip);
- __ StoreU64(r8, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1833,9 +1826,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, r0);
- __ StoreU64(r7, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1902,7 +1894,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label copy;
Register src = r9, dest = r8;
__ addi(src, sp, Operand(-kSystemPointerSize));
- __ ShiftLeftImm(r0, r7, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r0, r7, Operand(kSystemPointerSizeLog2));
__ sub(sp, sp, r0);
// Update stack pointer.
__ addi(dest, sp, Operand(-kSystemPointerSize));
@@ -1997,7 +1989,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ addi(r7, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
- __ ShiftLeftImm(scratch, r5, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, r5, Operand(kSystemPointerSizeLog2));
__ add(r7, r7, scratch);
// Move the arguments already in the stack,
@@ -2007,7 +1999,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Register src = ip, dest = r5; // r7 and r10 are context and root.
__ addi(src, sp, Operand(-kSystemPointerSize));
// Update stack pointer.
- __ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2));
__ sub(sp, sp, scratch);
__ addi(dest, sp, Operand(-kSystemPointerSize));
__ addi(r0, r3, Operand(1));
@@ -2028,7 +2020,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&loop);
{
__ subi(r8, r8, Operand(1));
- __ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2));
__ LoadU64(r0, MemOperand(r7, scratch));
__ StoreU64(r0, MemOperand(r5, scratch));
__ cmpi(r8, Operand::Zero());
@@ -2176,7 +2168,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ ShiftLeftImm(r10, r7, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r10, r7, Operand(kSystemPointerSizeLog2));
__ sub(r0, sp, r10);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
@@ -2206,7 +2198,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ subi(r7, r7, Operand(1));
- __ ShiftLeftImm(scratch, r7, Operand(kTaggedSizeLog2));
+ __ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
__ add(scratch, scratch, r5);
__ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
__ Push(scratch);
@@ -2520,7 +2512,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mr(r4, r5);
} else {
// Compute the argv pointer.
- __ ShiftLeftImm(r4, r3, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r4, r3, Operand(kSystemPointerSizeLog2));
__ add(r4, r4, sp);
__ subi(r4, r4, Operand(kSystemPointerSize));
}
@@ -2756,7 +2748,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
__ oris(result_reg, result_reg,
Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
- __ slw(r0, result_reg, scratch);
+ __ ShiftLeftU32(r0, result_reg, scratch);
__ orx(result_reg, scratch_low, r0);
__ b(&negate);
@@ -2768,7 +2760,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent.
__ neg(scratch, scratch);
- __ slw(result_reg, scratch_low, scratch);
+ __ ShiftLeftU32(result_reg, scratch_low, scratch);
__ bind(&negate);
// If input was positive, scratch_high ASR 31 equals 0 and
@@ -2831,7 +2823,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ lbz(scratch, MemOperand(scratch, 0));
__ cmpi(scratch, Operand::Zero());
- if (CpuFeatures::IsSupported(ISELECT)) {
+ if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
__ Move(scratch, thunk_ref);
__ isel(eq, scratch, function_address, scratch);
} else {
@@ -3025,7 +3017,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// from the API function here.
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
- __ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2));
__ add(scratch, scratch, ip);
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
kSystemPointerSize));
@@ -3327,7 +3319,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
__ LoadU64(r7,
MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
- __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
@@ -3420,12 +3412,14 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index 03f20057e6..f79e392f48 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -1149,10 +1149,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(bytecodeArray,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@@ -1315,10 +1315,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -3633,11 +3633,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the start or the end of the current bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
__ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start;
__ bind(&start);
@@ -3646,6 +3648,46 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = a1;
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = a4;
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(code_obj, scratch, scratch);
+ __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ __ Pop(zero_reg, kInterpreterAccumulatorRegister);
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(code_obj, scratch, scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(BASELINE_DATA_TYPE));
+ }
+
+ // Load baseline code from baseline data.
+ __ LoadTaggedPointerField(
+ code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ LoadTaggedPointerField(
@@ -3668,17 +3710,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = type;
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Compute baseline pc for bytecode offset.
__ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref;
@@ -3731,7 +3762,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset));
+ BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
@@ -3764,17 +3795,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 2370f5ed57..5129cc6ee3 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -121,11 +121,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(scratch, scratch);
- __ AddS64(sp, sp, scratch);
- __ AddS64(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ bind(&stack_overflow);
@@ -278,11 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-
- __ SmiToPtrArrayOffset(r3, r3);
- __ AddS64(sp, sp, r3);
- __ AddS64(sp, sp, Operand(kSystemPointerSize));
+ __ DropArguments(r3, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ bind(&check_receiver);
@@ -870,7 +864,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LoadU64(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ LoadU32(params_size,
- FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
@@ -892,7 +886,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
- __ AddS64(sp, sp, params_size);
+ __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1174,12 +1169,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r1, Operand(0));
__ StoreU16(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
r0);
// Load the initial bytecode offset.
@@ -1730,9 +1725,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
- __ StoreU64(r7, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1816,9 +1810,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
- __ StoreU64(r7, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1867,9 +1860,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
- __ StoreU64(r6, MemOperand(sp));
+ __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -3411,12 +3403,14 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
// Implement on this platform, https://crrev.com/c/2695591.
__ bkpt(0);
}
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 6646bbfa80..2f94f6205f 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -161,7 +161,7 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct');
} else if (src.elements_kind != elementsInfo.kind) {
- goto IfSlow;
+ goto IfElementsKindMismatch(src.elements_kind);
} else if (length > 0) {
const byteLength = typedArray.byte_length;
@@ -174,6 +174,12 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
typedArray.data_ptr, src.data_ptr, byteLength);
}
}
+ } label IfElementsKindMismatch(srcKind: ElementsKind) deferred {
+ if (IsBigInt64ElementsKind(srcKind) !=
+ IsBigInt64ElementsKind(elementsInfo.kind)) {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ }
+ goto IfSlow;
} label IfSlow deferred {
if (length > 0) {
TypedArrayCopyElements(
diff --git a/deps/v8/src/builtins/typed-array-findlast.tq b/deps/v8/src/builtins/typed-array-findlast.tq
new file mode 100644
index 0000000000..634e17b936
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-findlast.tq
@@ -0,0 +1,112 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
+const kBuiltinNameFindLast: constexpr string =
+ '%TypedArray%.prototype.findLast';
+
+// Continuation part of
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
+// when array buffer was detached.
+transitioning builtin FindLastAllElementsDetachedContinuation(
+ implicit context: Context)(
+ array: JSTypedArray, predicate: Callable, thisArg: JSAny,
+ initialK: Number): JSAny {
+ // 6. Repeat, while k ≥ 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer was detached.
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const result =
+ Call(context, predicate, thisArg, Undefined, Convert<Number>(k), array);
+ // 6d. If testResult is true, return kValue.
+ if (ToBoolean(result)) {
+ return Undefined;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
+transitioning macro FindLastAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
+ thisArg: JSAny): JSAny labels
+Bailout(Number) {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ // 3. Let len be O.[[ArrayLength]].
+ const length: uintptr = witness.Get().length;
+ // 5. Let k be len - 1.
+ // 6. Repeat, while k ≥ 0
+ for (let k: uintptr = length; k-- > 0;) {
+ witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ const value: JSAny = witness.Load(k);
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const result = Call(
+ context, predicate, thisArg, value, Convert<Number>(k),
+ witness.GetStable());
+ // 6d. If testResult is true, return kValue.
+ if (ToBoolean(result)) {
+ return value;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return undefined.
+ return Undefined;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
+transitioning javascript builtin
+TypedArrayPrototypeFindLast(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg
+ try {
+ // 1. Let O be the this value.
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ // 2. Perform ? ValidateTypedArray(O).
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ // 4. If IsCallable(predicate) is false, throw a TypeError exception.
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ try {
+ return FindLastAllElements(uarray, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return FindLastAllElementsDetachedContinuation(
+ uarray, predicate, thisArg, k);
+ }
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLast);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFindLast);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/typed-array-findlastindex.tq b/deps/v8/src/builtins/typed-array-findlastindex.tq
new file mode 100644
index 0000000000..4b20114c91
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-findlastindex.tq
@@ -0,0 +1,115 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
+const kBuiltinNameFindLastIndex: constexpr string =
+ '%TypedArray%.prototype.findIndexLast';
+
+// Continuation part of
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
+// when array buffer was detached.
+transitioning builtin FindLastIndexAllElementsDetachedContinuation(
+ implicit context: Context)(
+ array: JSTypedArray, predicate: Callable, thisArg: JSAny,
+ initialK: Number): Number {
+ // 6. Repeat, while k ≥ 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer was detached.
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const indexNumber: Number = Convert<Number>(k);
+ const result =
+ Call(context, predicate, thisArg, Undefined, indexNumber, array);
+ // 6d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(result)) {
+ return indexNumber;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return -1𝔽.
+ return -1;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
+transitioning macro FindLastIndexAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
+ thisArg: JSAny): Number labels
+Bailout(Number) {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ // 3. Let len be O.[[ArrayLength]].
+ const length: uintptr = witness.Get().length;
+ // 5. Let k be len - 1.
+ // 6. Repeat, while k ≥ 0
+ for (let k: uintptr = length; k-- > 0;) {
+ witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // there is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ const value: JSAny = witness.Load(k);
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const indexNumber: Number = Convert<Number>(k);
+ const result = Call(
+ context, predicate, thisArg, value, indexNumber, witness.GetStable());
+ // 6d. If testResult is true, return 𝔽(k).
+ if (ToBoolean(result)) {
+ return indexNumber;
+ }
+
+ // 6e. Set k to k - 1. (done by the loop).
+ }
+
+ // 7. Return -1𝔽.
+ return -1;
+}
+
+// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
+transitioning javascript builtin
+TypedArrayPrototypeFindLastIndex(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg.
+ try {
+ // 1. Let O be the this value.
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ // 2. Perform ? ValidateTypedArray(O).
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ // 4. If IsCallable(predicate) is false, throw a TypeError exception.
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+
+ try {
+ return FindLastIndexAllElements(uarray, predicate, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Number) deferred {
+ return FindLastIndexAllElementsDetachedContinuation(
+ uarray, predicate, thisArg, k);
+ }
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLastIndex);
+ } label IsDetached deferred {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation, kBuiltinNameFindLastIndex);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index b5c9dcb261..f4d2a40f41 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -281,7 +281,12 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
// value, true, Unordered).
// iii. Set srcByteIndex to srcByteIndex + 1.
// iv. Set targetByteIndex to targetByteIndex + 1.
- CallCMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ if (IsSharedArrayBuffer(target.buffer)) {
+ // SABs need a relaxed memmove to preserve atomicity.
+ CallCRelaxedMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ } else {
+ CallCMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ }
} label IfSlow deferred {
// 22. If target.[[ContentType]] is not equal to
// typedArray.[[ContentType]], throw a TypeError exception.
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index 60604c548f..2a18433f93 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -36,7 +36,12 @@ macro FastCopy(
assert(countBytes <= dest.byte_length);
assert(countBytes <= src.byte_length - startOffset);
- typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes);
+ if (IsSharedArrayBuffer(src.buffer)) {
+ // SABs need a relaxed memmove to preserve atomicity.
+ typed_array::CallCRelaxedMemmove(dest.data_ptr, srcPtr, countBytes);
+ } else {
+ typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes);
+ }
}
macro SlowCopy(implicit context: Context)(
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 2686005ba5..87bcb2fb59 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -65,6 +65,8 @@ extern macro TypedArrayBuiltinsAssembler::CallCMemset(
RawPtr, intptr, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::CallCRelaxedMemcpy(
RawPtr, RawPtr, uintptr): void;
+extern macro TypedArrayBuiltinsAssembler::CallCRelaxedMemmove(
+ RawPtr, RawPtr, uintptr): void;
extern macro GetTypedArrayBuffer(implicit context: Context)(JSTypedArray):
JSArrayBuffer;
extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 993f8234af..14186e3be6 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -92,7 +92,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
Label stack_overflow;
- __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kFar);
+ __ StackOverflowCheck(rax, &stack_overflow, Label::kFar);
// Enter a construct frame.
{
@@ -129,10 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
- __ PushReturnAddressFrom(rcx);
+ __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
@@ -228,9 +226,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Check if we have enough stack space to push all arguments.
- // Argument count in rax. Clobbers rcx.
+ // Argument count in rax.
Label stack_overflow;
- __ StackOverflowCheck(rax, rcx, &stack_overflow);
+ __ StackOverflowCheck(rax, &stack_overflow);
// TODO(victorgomes): When the arguments adaptor is completely removed, we
// should get the formal parameter count and copy the arguments in its
@@ -281,10 +279,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
- __ PushReturnAddressFrom(rcx);
+ __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
@@ -599,9 +595,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r9 : receiver
// Check if we have enough stack space to push all arguments.
- // Argument count in rax. Clobbers rcx.
+ // Argument count in rax.
Label enough_stack_space, stack_overflow;
- __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rax, &stack_overflow, Label::kNear);
__ jmp(&enough_stack_space, Label::kNear);
__ bind(&stack_overflow);
@@ -880,10 +876,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
- Register return_pc = scratch2;
- __ PopReturnAddressTo(return_pc);
- __ addq(rsp, params_size);
- __ PushReturnAddressFrom(return_pc);
+ __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
+ TurboAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@@ -1187,10 +1181,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
// Load initial bytecode offset.
@@ -1396,7 +1390,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ leal(rcx, Operand(rax, 1)); // Add one for receiver.
// Add a stack check before pushing arguments.
- __ StackOverflowCheck(rcx, rdx, &stack_overflow);
+ __ StackOverflowCheck(rcx, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -1457,7 +1451,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Label stack_overflow;
// Add a stack check before pushing arguments.
- __ StackOverflowCheck(rax, r8, &stack_overflow);
+ __ StackOverflowCheck(rax, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -1704,11 +1698,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by
// writing a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
- __ movw(
- FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
- Immediate(0));
+ __ movw(FieldOperand(bytecode_array,
+ BytecodeArray::kOsrLoopNestingLevelOffset),
+ Immediate(0));
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
@@ -1899,11 +1893,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ PopReturnAddressTo(rcx);
- __ leaq(rsp,
- Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
+ __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2006,11 +1998,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ j(below, &done, Label::kNear);
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
- __ PopReturnAddressTo(rcx);
- __ leaq(rsp,
- Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
+ __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2059,11 +2049,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ j(below, &done, Label::kNear);
__ movq(rdx, args[3]); // new.target
__ bind(&done);
- __ PopReturnAddressTo(rcx);
- __ leaq(rsp,
- Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(rcx);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
+ TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2120,7 +2109,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
Label stack_overflow;
- __ StackOverflowCheck(rcx, r8, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rcx, &stack_overflow, Label::kNear);
// Push additional arguments onto the stack.
// Move the arguments already in the stack,
@@ -2222,7 +2211,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Check for stack overflow.
- __ StackOverflowCheck(r8, r12, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(r8, &stack_overflow, Label::kNear);
// Forward the arguments from the caller frame.
// Move the arguments already in the stack,
@@ -3345,16 +3334,8 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass the
// number of slots to remove in a Register as an argument.
- Register return_addr = rbx;
- __ popq(return_addr);
- Register caller_frame_slots_count = param_count;
- // Add one to also pop the receiver. The receiver is passed to a JSFunction
- // over the stack but is neither included in the number of parameters passed
- // to this function nor in the number of parameters expected in this function.
- __ addq(caller_frame_slots_count, Immediate(1));
- __ shlq(caller_frame_slots_count, Immediate(kSystemPointerSizeLog2));
- __ addq(rsp, caller_frame_slots_count);
- __ pushq(return_addr);
+ __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// --------------------------------------------------------------------------
@@ -4377,12 +4358,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
namespace {
-// Converts an interpreter frame into a baseline frame and continues execution
-// in baseline code (baseline code has to exist on the shared function info),
-// either at the current or next (in execution order) bytecode.
-void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
- bool is_osr = false) {
- __ pushq(kInterpreterAccumulatorRegister);
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
Label start;
__ bind(&start);
@@ -4390,8 +4372,44 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
Register closure = rdi;
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+ // Get the Code object from the shared function info.
+ Register code_obj = rbx;
+ __ LoadTaggedPointerField(
+ code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ j(equal, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+ }
+
+ // Load baseline code from baseline data.
+ __ LoadTaggedPointerField(
+ code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
+ }
+
// Load the feedback vector.
- Register feedback_vector = rbx;
+ Register feedback_vector = r11;
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
@@ -4412,19 +4430,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
feedback_vector);
feedback_vector = no_reg;
- // Get the Code object from the shared function info.
- Register code_obj = rbx;
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- code_obj,
- FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
- }
-
// Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
@@ -4434,7 +4439,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
- Register get_baseline_pc = rax;
+ Register get_baseline_pc = r11;
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
// If the code deoptimizes during the implicit function entry stack interrupt
@@ -4457,6 +4462,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ pushq(kInterpreterAccumulatorRegister);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3);
@@ -4474,7 +4480,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOsrNestingLevelOffset),
+ BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {
@@ -4497,8 +4503,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ pushq(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ popq(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ jmp(&start);
@@ -4506,17 +4514,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
} // namespace
-void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false);
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
-void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- Generate_BaselineEntry(masm, true);
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- Generate_BaselineEntry(masm, false, true);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index ec2588364c..970386be72 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -5172,10 +5172,28 @@ void Assembler::RecordConstPool(int size) {
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ WriteUnalignedValue(base + p.first, *object);
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue(base + p.first, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
void Assembler::GrowBuffer() {
DCHECK_EQ(buffer_start_, buffer_->start());
bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
// Compute new buffer size.
int old_size = buffer_->size();
@@ -5209,11 +5227,12 @@ void Assembler::GrowBuffer() {
reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta);
reloc_info_writer.Reposition(new_reloc_start, new_last_pc);
- // Patch on-heap references to handles.
- if (previously_on_heap && !buffer_->IsOnHeap()) {
- Address base = reinterpret_cast<Address>(buffer_->start());
- for (auto p : saved_handles_for_raw_object_ptr_) {
- WriteUnalignedValue(base + p.first, p.second);
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
}
}
@@ -5237,7 +5256,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), data);
@@ -5250,7 +5270,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), value);
@@ -5450,13 +5471,12 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
SetLdrRegisterImmediateOffset(instr, delta));
if (!entry.is_merged()) {
if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) {
+ int offset = pc_offset();
saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(pc_offset(), entry.value()));
- Handle<HeapObject> handle(reinterpret_cast<Address*>(entry.value()));
- emit(handle->ptr());
- // We must ensure that `emit` is not growing the assembler buffer
- // and falling back to off-heap compilation.
- DCHECK(IsOnHeap());
+ std::make_pair(offset, entry.value()));
+ Handle<HeapObject> object(reinterpret_cast<Address*>(entry.value()));
+ emit(object->ptr());
+ DCHECK(EmbeddedObjectMatches(offset, object));
} else {
emit(entry.value());
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index d96c761910..4a9fe49685 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -328,6 +328,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
// Label operations & relative jumps (PPUM Appendix D)
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -401,6 +410,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
// Branch instructions
void b(int branch_offset, Condition cond = al,
@@ -1067,8 +1077,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
// Record the emission of a constant pool.
//
@@ -1187,6 +1197,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return *reinterpret_cast<uint32_t*>(buffer_->start() + pc_offset) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
// Move a 32-bit immediate into a register, potentially via the constant pool.
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 49cb9d292c..26d16406a6 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -18,6 +18,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
@@ -1358,6 +1359,44 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(r1); }
+void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ int receiver_bytes = (mode == kCountExcludesReceiver) ? kPointerSize : 0;
+ switch (type) {
+ case kCountIsInteger: {
+ add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC);
+ break;
+ }
+ case kCountIsSmi: {
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ add(sp, sp, Operand(count, LSL, kPointerSizeLog2 - kSmiTagSize), LeaveCC);
+ break;
+ }
+ case kCountIsBytes: {
+ add(sp, sp, count, LeaveCC);
+ break;
+ }
+ }
+ if (receiver_bytes != 0) {
+ add(sp, sp, Operand(receiver_bytes), LeaveCC);
+ }
+}
+
+void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+ Register receiver,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(argc, receiver));
+ if (mode == kCountExcludesReceiver) {
+ // Drop arguments without receiver and override old receiver.
+ DropArguments(argc, type, kCountIncludesReceiver);
+ str(receiver, MemOperand(sp, 0));
+ } else {
+ DropArguments(argc, type, mode);
+ push(receiver);
+ }
+}
+
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
ASM_CODE_COMMENT(this);
@@ -1369,6 +1408,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
}
PushCommonFrame(scratch);
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
int TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -1553,54 +1595,6 @@ void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
MovFromFloatResult(dst);
}
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- ASM_CODE_COMMENT(this);
- DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
-
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch0;
- add(dst_reg, fp, Operand(caller_args_count, LSL, kPointerSizeLog2));
- add(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = caller_args_count;
- // Calculate the end of source area. +kPointerSize is for the receiver.
- add(src_reg, sp, Operand(callee_args_count, LSL, kPointerSizeLog2));
- add(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- cmp(src_reg, dst_reg);
- Check(lo, AbortReason::kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch1;
- Label loop, entry;
- b(&entry);
- bind(&loop);
- ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
- str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
- bind(&entry);
- cmp(sp, src_reg);
- b(ne, &loop);
-
- // Leave current frame.
- mov(sp, dst_reg);
-}
-
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index d6671fff3f..41bc5ec544 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -76,6 +76,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StubPrologue(StackFrame::Type type);
void Prologue();
+ enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
+ enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
+ void DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode);
+ void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode);
+
// Push a standard frame, consisting of lr, fp, context and JS function
void PushStandardFrame(Register function_reg);
@@ -233,15 +241,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers = 0,
Register scratch = no_reg);
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count| do not include
- // receiver. |callee_args_count| is not modified. |caller_args_count|
- // is trashed.
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index bf39a2e416..f6a035a9e7 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -4275,8 +4275,41 @@ bool Assembler::IsImmFP64(double imm) {
return true;
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ if (update_embedded_objects) {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Handle<HeapObject> object = GetEmbeddedObject(p.second);
+ WriteUnalignedValue(base + p.first, object->ptr());
+ }
+ }
+ for (auto p : saved_offsets_for_runtime_entries_) {
+ Instruction* instr = reinterpret_cast<Instruction*>(base + p.first);
+ Address target = p.second * kInstrSize + options().code_range_start;
+ DCHECK(is_int26(p.second));
+ DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
+ instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue(base + p.first, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+ for (auto p : saved_offsets_for_runtime_entries_) {
+ Instruction* instr = reinterpret_cast<Instruction*>(base + p.first);
+ DCHECK(is_int26(p.second));
+ DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
+ instr->SetInstructionBits(instr->Mask(UnconditionalBranchMask) | p.second);
+ }
+ saved_offsets_for_runtime_entries_.clear();
+}
+
void Assembler::GrowBuffer() {
bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
// Compute new buffer size.
int old_size = buffer_->size();
@@ -4320,18 +4353,12 @@ void Assembler::GrowBuffer() {
WriteUnalignedValue<intptr_t>(address, internal_ref);
}
- // Patch on-heap references to handles.
- if (previously_on_heap && !buffer_->IsOnHeap()) {
- Address base = reinterpret_cast<Address>(buffer_->start());
- for (auto p : saved_handles_for_raw_object_ptr_) {
- WriteUnalignedValue(base + p.first, p.second);
- }
- for (auto p : saved_offsets_for_runtime_entries_) {
- Instruction* instr = reinterpret_cast<Instruction*>(base + p.first);
- DCHECK(is_int26(p.second));
- DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- instr->SetInstructionBits(instr->Mask(UnconditionalBranchMask) |
- p.second);
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
}
}
@@ -4345,12 +4372,16 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
(rmode == RelocInfo::DEOPT_INLINING_ID) ||
- (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
+ (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
+ (rmode == RelocInfo::LITERAL_CONSTANT) ||
+ (rmode == RelocInfo::DEOPT_NODE_ID)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
+ RelocInfo::IsDeoptNodeId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 6a0245fcd6..8cdca7bfa8 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -204,6 +204,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
@@ -213,6 +222,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
inline void Unreachable();
@@ -339,8 +349,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
int buffer_space() const;
@@ -2067,7 +2077,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
BlockPoolsScope no_pool_scope(this);
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
dc32(data);
@@ -2075,7 +2086,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
BlockPoolsScope no_pool_scope(this);
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
dc64(data);
@@ -2083,7 +2095,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
BlockPoolsScope no_pool_scope(this);
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
dc64(data);
@@ -2676,6 +2689,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static size_t GetApproxMaxDistToConstPoolForTesting() {
return ConstantPool::kApproxDistToPool64;
}
+
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object,
+ EmbeddedObjectIndex index) {
+ return *reinterpret_cast<uint64_t*>(buffer_->start() + pc_offset) ==
+ (IsOnHeap() ? object->ptr() : index);
+ }
#endif
class FarBranchInfo {
diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc
index 4baf2e07ec..4c61e1fd82 100644
--- a/deps/v8/src/codegen/arm64/cpu-arm64.cc
+++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc
@@ -13,6 +13,10 @@
#include <libkern/OSCacheControl.h>
#endif
+#if V8_OS_WIN
+#include <windows.h>
+#endif
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 9dba8800d9..ef95b4e813 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -2215,62 +2215,6 @@ void TurboAssembler::CallForDeoptimization(
}
}
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- ASM_CODE_COMMENT(this);
- DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
-
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kSystemPointerSize to count the
- // receiver argument which is not included into formal parameters count.
- Register dst_reg = scratch0;
- Add(dst_reg, fp, Operand(caller_args_count, LSL, kSystemPointerSizeLog2));
- Add(dst_reg, dst_reg,
- StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
- // Round dst_reg up to a multiple of 16 bytes, so that we overwrite any
- // potential padding.
- Add(dst_reg, dst_reg, 15);
- Bic(dst_reg, dst_reg, 15);
-
- Register src_reg = caller_args_count;
- // Calculate the end of source area. +kSystemPointerSize is for the receiver.
- Add(src_reg, sp, Operand(callee_args_count, LSL, kSystemPointerSizeLog2));
- Add(src_reg, src_reg, kSystemPointerSize);
-
- // Round src_reg up to a multiple of 16 bytes, so we include any potential
- // padding in the copy.
- Add(src_reg, src_reg, 15);
- Bic(src_reg, src_reg, 15);
-
- if (FLAG_debug_code) {
- Cmp(src_reg, dst_reg);
- Check(lo, AbortReason::kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- RestoreFPAndLR();
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch1;
- Label loop, entry;
- B(&entry);
- bind(&loop);
- Ldr(tmp_reg, MemOperand(src_reg, -kSystemPointerSize, PreIndex));
- Str(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize, PreIndex));
- bind(&entry);
- Cmp(sp, src_reg);
- B(ne, &loop);
-
- // Leave current frame.
- Mov(sp, dst_reg);
-}
-
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
@@ -2659,11 +2603,11 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Push<TurboAssembler::kSignLR>(lr, fp);
Mov(fp, sp);
- Push(type_reg, padreg);
+ Push(type_reg, kWasmInstanceRegister);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
- // sp[0] : for alignment
+ // sp[0] : wasm instance
#endif // V8_ENABLE_WEBASSEMBLY
} else if (type == StackFrame::CONSTRUCT) {
Register type_reg = temps.AcquireX();
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 235b9a4b69..9128ba2c18 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -555,15 +555,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void Isb();
inline void Csdb();
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count| do not include
- // receiver. |callee_args_count| is not modified. |caller_args_count| is
- // trashed.
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
inline void SmiUntag(Register dst, Register src);
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 5b234526a4..21007a5973 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 8eb5ae55e2..dfd406694a 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -57,7 +57,7 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
const bool generating_embedded_builtin =
isolate->IsGeneratingEmbeddedBuiltins();
options.record_reloc_info_for_serialization = serializer;
- options.enable_root_array_delta_access =
+ options.enable_root_relative_access =
!serializer && !generating_embedded_builtin;
#ifdef USE_SIMULATOR
// Even though the simulator is enabled, we may still need to generate code
@@ -142,8 +142,9 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer {
class OnHeapAssemblerBuffer : public AssemblerBuffer {
public:
- OnHeapAssemblerBuffer(Handle<Code> code, int size)
- : code_(code), size_(size) {}
+ OnHeapAssemblerBuffer(Isolate* isolate, Handle<Code> code, int size,
+ int gc_count)
+ : isolate_(isolate), code_(code), size_(size), gc_count_(gc_count) {}
byte* start() const override {
return reinterpret_cast<byte*>(code_->raw_instruction_start());
@@ -153,20 +154,32 @@ class OnHeapAssemblerBuffer : public AssemblerBuffer {
std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
DCHECK_LT(size(), new_size);
+ Heap* heap = isolate_->heap();
+ if (Code::SizeFor(new_size) <
+ heap->MaxRegularHeapObjectSize(AllocationType::kCode)) {
+ MaybeHandle<Code> code =
+ isolate_->factory()->NewEmptyCode(CodeKind::BASELINE, new_size);
+ if (!code.is_null()) {
+ return std::make_unique<OnHeapAssemblerBuffer>(
+ isolate_, code.ToHandleChecked(), new_size, heap->gc_count());
+ }
+ }
// We fall back to the slow path using the default assembler buffer and
- // compile the code off the GC heap. Compiling directly on heap makes less
- // sense now, since we will need to allocate a new Code object, copy the
- // content generated so far and relocate.
+ // compile the code off the GC heap.
return std::make_unique<DefaultAssemblerBuffer>(new_size);
}
bool IsOnHeap() const override { return true; }
+ int OnHeapGCCount() const override { return gc_count_; }
+
MaybeHandle<Code> code() const override { return code_; }
private:
+ Isolate* isolate_;
Handle<Code> code_;
const int size_;
+ const int gc_count_;
};
static thread_local std::aligned_storage_t<sizeof(ExternalAssemblerBufferImpl),
@@ -211,7 +224,8 @@ std::unique_ptr<AssemblerBuffer> NewOnHeapAssemblerBuffer(Isolate* isolate,
MaybeHandle<Code> code =
isolate->factory()->NewEmptyCode(CodeKind::BASELINE, size);
if (code.is_null()) return {};
- return std::make_unique<OnHeapAssemblerBuffer>(code.ToHandleChecked(), size);
+ return std::make_unique<OnHeapAssemblerBuffer>(
+ isolate, code.ToHandleChecked(), size, isolate->heap()->gc_count());
}
// -----------------------------------------------------------------------------
@@ -281,13 +295,16 @@ HeapObjectRequest::HeapObjectRequest(const StringConstantBase* string,
// Platform specific but identical code for all the platforms.
-void Assembler::RecordDeoptReason(DeoptimizeReason reason,
+void Assembler::RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
SourcePosition position, int id) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset());
RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId());
RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
+#ifdef DEBUG
+ RecordRelocInfo(RelocInfo::DEOPT_NODE_ID, node_id);
+#endif // DEBUG
}
void Assembler::DataAlign(int m) {
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index ee5aef524d..7373b5d48b 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -157,9 +157,9 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// assembler is used on existing code directly (e.g. JumpTableAssembler)
// without any buffer to hold reloc information.
bool disable_reloc_info_for_patching = false;
- // Enables access to exrefs by computing a delta from the root array.
- // Only valid if code will not survive the process.
- bool enable_root_array_delta_access = false;
+ // Enables root-relative access to arbitrary untagged addresses (usually
+ // external references). Only valid if code will not survive the process.
+ bool enable_root_relative_access = false;
// Enables specific assembler sequences only used for the simulator.
bool enable_simulator_code = false;
// Enables use of isolate-independent constants, indirected through the
@@ -204,6 +204,9 @@ class AssemblerBuffer {
V8_WARN_UNUSED_RESULT = 0;
virtual bool IsOnHeap() const { return false; }
virtual MaybeHandle<Code> code() const { return MaybeHandle<Code>(); }
+ // Return the GC count when the buffer was allocated (only if the buffer is on
+ // the GC heap).
+ virtual int OnHeapGCCount() const { return 0; }
};
// Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
@@ -283,6 +286,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
bool IsOnHeap() const { return buffer_->IsOnHeap(); }
+ int OnHeapGCCount() const { return buffer_->OnHeapGCCount(); }
+
MaybeHandle<Code> code() const {
DCHECK(IsOnHeap());
return buffer_->code();
@@ -404,6 +409,9 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
!options().record_reloc_info_for_serialization && !FLAG_debug_code) {
return false;
}
+#ifndef ENABLE_DISASSEMBLER
+ if (RelocInfo::IsLiteralConstant(rmode)) return false;
+#endif
return true;
}
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index 2a5893974f..128858a47f 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -22,9 +22,9 @@ namespace internal {
"Expected optimized code cell or optimization sentinel") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
+ V(kExpectedBaselineData, "Expected baseline data") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
- V(kFpuTopIsNotZeroInDeoptimizer, "FPU TOP is not zero in deoptimizer") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 5493ba6caa..e25135dece 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -1891,7 +1891,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
// See Map::GetInObjectPropertiesStartInWords() for details.
CSA_ASSERT(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
- map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
+ map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
@@ -1899,7 +1899,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
// See Map::GetConstructorFunctionIndex() for details.
CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
- map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
+ map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
TNode<Object> CodeStubAssembler::LoadMapConstructor(TNode<Map> map) {
@@ -6195,6 +6195,13 @@ TNode<BoolT> CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() {
return TaggedEqual(cell_value, invalid);
}
+TNode<BoolT> CodeStubAssembler::IsIsConcatSpreadableProtectorCellInvalid() {
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
+ TNode<PropertyCell> cell = IsConcatSpreadableProtectorConstant();
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return TaggedEqual(cell_value, invalid);
+}
+
TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = TypedArraySpeciesProtectorConstant();
@@ -9689,7 +9696,7 @@ void CodeStubAssembler::TryLookupElement(
// TODO(verwaest): Support other elements kinds as well.
Label if_isobjectorsmi(this), if_isdouble(this), if_isdictionary(this),
if_isfaststringwrapper(this), if_isslowstringwrapper(this), if_oob(this),
- if_typedarray(this);
+ if_typedarray(this), if_rab_gsab_typedarray(this);
// clang-format off
int32_t values[] = {
// Handled by {if_isobjectorsmi}.
@@ -9719,8 +9726,18 @@ void CodeStubAssembler::TryLookupElement(
UINT8_CLAMPED_ELEMENTS,
BIGUINT64_ELEMENTS,
BIGINT64_ELEMENTS,
+ RAB_GSAB_UINT8_ELEMENTS,
+ RAB_GSAB_INT8_ELEMENTS,
+ RAB_GSAB_UINT16_ELEMENTS,
+ RAB_GSAB_INT16_ELEMENTS,
+ RAB_GSAB_UINT32_ELEMENTS,
+ RAB_GSAB_INT32_ELEMENTS,
+ RAB_GSAB_FLOAT32_ELEMENTS,
+ RAB_GSAB_FLOAT64_ELEMENTS,
+ RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ RAB_GSAB_BIGUINT64_ELEMENTS,
+ RAB_GSAB_BIGINT64_ELEMENTS,
};
- // TODO(v8:11111): Support RAB / GSAB.
Label* labels[] = {
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
@@ -9742,6 +9759,17 @@ void CodeStubAssembler::TryLookupElement(
&if_typedarray,
&if_typedarray,
&if_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
+ &if_rab_gsab_typedarray,
};
// clang-format on
STATIC_ASSERT(arraysize(values) == arraysize(labels));
@@ -9808,6 +9836,13 @@ void CodeStubAssembler::TryLookupElement(
TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent);
}
+ BIND(&if_rab_gsab_typedarray);
+ {
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
+ TNode<UintPtrT> length =
+ LoadVariableLengthJSTypedArrayLength(CAST(object), buffer, if_absent);
+ Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent);
+ }
BIND(&if_oob);
{
// Positive OOB indices mean "not found", negative indices and indices
@@ -13891,6 +13926,45 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
return result.value();
}
+void CodeStubAssembler::IsTypedArrayDetachedOrOutOfBounds(
+ TNode<JSTypedArray> array, Label* detached_or_oob,
+ Label* not_detached_nor_oob) {
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
+
+ GotoIf(IsDetachedBuffer(buffer), detached_or_oob);
+ GotoIfNot(IsVariableLengthTypedArray(array), not_detached_nor_oob);
+ GotoIf(IsSharedArrayBuffer(buffer), not_detached_nor_oob);
+
+ {
+ TNode<UintPtrT> buffer_byte_length = LoadJSArrayBufferByteLength(buffer);
+ TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
+
+ Label length_tracking(this), not_length_tracking(this);
+ Branch(IsLengthTrackingTypedArray(array), &length_tracking,
+ &not_length_tracking);
+
+ BIND(&length_tracking);
+ {
+ // The backing RAB might have been shrunk so that the start of the
+ // TypedArray is already out of bounds.
+ Branch(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length),
+ not_detached_nor_oob, detached_or_oob);
+ }
+
+ BIND(&not_length_tracking);
+ {
+ // Check if the backing RAB has shrunk so that the buffer is out of
+ // bounds.
+ TNode<UintPtrT> array_byte_length =
+ LoadJSArrayBufferViewByteLength(array);
+ Branch(UintPtrGreaterThanOrEqual(
+ buffer_byte_length,
+ UintPtrAdd(array_byte_offset, array_byte_length)),
+ not_detached_nor_oob, detached_or_oob);
+ }
+ }
+}
+
// ES #sec-integerindexedobjectbytelength
TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayByteLength(
TNode<Context> context, TNode<JSTypedArray> array,
@@ -14376,7 +14450,7 @@ TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
- StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, ToCodeT(code));
+ StoreObjectField(fun, JSFunction::kCodeOffset, ToCodeT(code));
return CAST(fun);
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index a6970a0a00..008af6006f 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -68,6 +68,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
AsyncGeneratorYieldResolveSharedFun) \
V(AsyncIteratorValueUnwrapSharedFun, async_iterator_value_unwrap_shared_fun, \
AsyncIteratorValueUnwrapSharedFun) \
+ V(IsConcatSpreadableProtector, is_concat_spreadable_protector, \
+ IsConcatSpreadableProtector) \
V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \
V(NoElementsProtector, no_elements_protector, NoElementsProtector) \
V(MegaDOMProtector, mega_dom_protector, MegaDOMProtector) \
@@ -2546,6 +2548,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsPromiseResolveProtectorCellInvalid();
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
+ TNode<BoolT> IsIsConcatSpreadableProtectorCellInvalid();
TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
@@ -3566,6 +3569,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UintPtrT> LoadVariableLengthJSTypedArrayByteLength(
TNode<Context> context, TNode<JSTypedArray> array,
TNode<JSArrayBuffer> buffer);
+ void IsTypedArrayDetachedOrOutOfBounds(TNode<JSTypedArray> array,
+ Label* detached_or_oob,
+ Label* not_detached_nor_oob);
+
TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
TNode<Int32T> elementsKind);
TNode<RawPtrT> LoadJSTypedArrayDataPtr(TNode<JSTypedArray> typed_array);
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index ee50f8b015..861bd2904f 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -4,6 +4,7 @@
#include "src/codegen/compilation-cache.h"
+#include "src/codegen/script-details.h"
#include "src/common/globals.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
@@ -104,42 +105,64 @@ void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
CompilationCacheScript::CompilationCacheScript(Isolate* isolate)
: CompilationSubCache(isolate, 1) {}
+namespace {
+
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
-bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
- MaybeHandle<Object> maybe_name,
- int line_offset, int column_offset,
- ScriptOriginOptions resource_options) {
+bool HasOrigin(Isolate* isolate, Handle<SharedFunctionInfo> function_info,
+ const ScriptDetails& script_details) {
Handle<Script> script =
- Handle<Script>(Script::cast(function_info->script()), isolate());
+ Handle<Script>(Script::cast(function_info->script()), isolate);
// If the script name isn't set, the boilerplate script should have
// an undefined name to have the same origin.
Handle<Object> name;
- if (!maybe_name.ToHandle(&name)) {
- return script->name().IsUndefined(isolate());
+ if (!script_details.name_obj.ToHandle(&name)) {
+ return script->name().IsUndefined(isolate);
}
// Do the fast bailout checks first.
- if (line_offset != script->line_offset()) return false;
- if (column_offset != script->column_offset()) return false;
+ if (script_details.line_offset != script->line_offset()) return false;
+ if (script_details.column_offset != script->column_offset()) return false;
// Check that both names are strings. If not, no match.
if (!name->IsString() || !script->name().IsString()) return false;
// Are the origin_options same?
- if (resource_options.Flags() != script->origin_options().Flags())
+ if (script_details.origin_options.Flags() !=
+ script->origin_options().Flags()) {
return false;
+ }
// Compare the two name strings for equality.
- return String::Equals(
- isolate(), Handle<String>::cast(name),
- Handle<String>(String::cast(script->name()), isolate()));
+ if (!String::Equals(isolate, Handle<String>::cast(name),
+ Handle<String>(String::cast(script->name()), isolate))) {
+ return false;
+ }
+
+ Handle<FixedArray> host_defined_options;
+ if (!script_details.host_defined_options.ToHandle(&host_defined_options)) {
+ host_defined_options = isolate->factory()->empty_fixed_array();
+ }
+
+ Handle<FixedArray> script_options(script->host_defined_options(), isolate);
+ int length = host_defined_options->length();
+ if (length != script_options->length()) return false;
+
+ for (int i = 0; i < length; i++) {
+ // host-defined options is a v8::PrimitiveArray.
+ DCHECK(host_defined_options->get(i).IsPrimitive());
+ DCHECK(script_options->get(i).IsPrimitive());
+ if (!host_defined_options->get(i).StrictEquals(script_options->get(i))) {
+ return false;
+ }
+ }
+ return true;
}
+} // namespace
// TODO(245): Need to allow identical code from different contexts to
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
- Handle<String> source, MaybeHandle<Object> name, int line_offset,
- int column_offset, ScriptOriginOptions resource_options,
+ Handle<String> source, const ScriptDetails& script_details,
LanguageMode language_mode) {
MaybeHandle<SharedFunctionInfo> result;
@@ -156,8 +179,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
if (probe.ToHandle(&function_info)) {
// Break when we've found a suitable shared function info that
// matches the origin.
- if (HasOrigin(function_info, name, line_offset, column_offset,
- resource_options)) {
+ if (HasOrigin(isolate(), function_info, script_details)) {
result = scope.CloseAndEscape(function_info);
}
}
@@ -168,12 +190,9 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
// handle created in the caller's handle scope.
Handle<SharedFunctionInfo> function_info;
if (result.ToHandle(&function_info)) {
-#ifdef DEBUG
// Since HasOrigin can allocate, we need to protect the SharedFunctionInfo
// with handles during the call.
- DCHECK(HasOrigin(function_info, name, line_offset, column_offset,
- resource_options));
-#endif
+ DCHECK(HasOrigin(isolate(), function_info, script_details));
isolate()->counters()->compilation_cache_hits()->Increment();
LOG(isolate(), CompilationCacheEvent("hit", "script", *function_info));
} else {
@@ -271,13 +290,10 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
- Handle<String> source, MaybeHandle<Object> name, int line_offset,
- int column_offset, ScriptOriginOptions resource_options,
+ Handle<String> source, const ScriptDetails& script_details,
LanguageMode language_mode) {
if (!IsEnabledScriptAndEval()) return MaybeHandle<SharedFunctionInfo>();
-
- return script_.Lookup(source, name, line_offset, column_offset,
- resource_options, language_mode);
+ return script_.Lookup(source, script_details, language_mode);
}
InfoCellPair CompilationCache::LookupEval(Handle<String> source,
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index d4f4ae52dc..b51ffee7b9 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -16,6 +16,7 @@ template <typename T>
class Handle;
class RootVisitor;
+struct ScriptDetails;
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
@@ -82,9 +83,7 @@ class CompilationCacheScript : public CompilationSubCache {
explicit CompilationCacheScript(Isolate* isolate);
MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
- MaybeHandle<Object> name,
- int line_offset, int column_offset,
- ScriptOriginOptions resource_options,
+ const ScriptDetails& script_details,
LanguageMode language_mode);
void Put(Handle<String> source, LanguageMode language_mode,
@@ -93,10 +92,6 @@ class CompilationCacheScript : public CompilationSubCache {
void Age() override;
private:
- bool HasOrigin(Handle<SharedFunctionInfo> function_info,
- MaybeHandle<Object> name, int line_offset, int column_offset,
- ScriptOriginOptions resource_options);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
@@ -163,8 +158,7 @@ class V8_EXPORT_PRIVATE CompilationCache {
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
MaybeHandle<SharedFunctionInfo> LookupScript(
- Handle<String> source, MaybeHandle<Object> name, int line_offset,
- int column_offset, ScriptOriginOptions resource_options,
+ Handle<String> source, const ScriptDetails& script_details,
LanguageMode language_mode);
// Finds the shared function info for a source string for eval in a
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 99f6d725f9..4fd70a8d9e 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -19,11 +19,12 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/pending-optimization-table.h"
+#include "src/codegen/script-details.h"
#include "src/codegen/unoptimized-compilation-info.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
-#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
#include "src/debug/debug.h"
@@ -43,7 +44,7 @@
#include "src/heap/parked-scope.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
-#include "src/logging/counters.h"
+#include "src/logging/counters-scopes.h"
#include "src/logging/log-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/feedback-cell-inl.h"
@@ -582,7 +583,7 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
isolate, compilation_info->feedback_vector_spec());
- shared_info->set_feedback_metadata(*feedback_metadata);
+ shared_info->set_feedback_metadata(*feedback_metadata, kReleaseStore);
} else {
#if V8_ENABLE_WEBASSEMBLY
DCHECK(compilation_info->has_asm_wasm_data());
@@ -590,7 +591,7 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
DCHECK((std::is_same<IsolateT, Isolate>::value));
shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
shared_info->set_feedback_metadata(
- ReadOnlyRoots(isolate).empty_feedback_metadata());
+ ReadOnlyRoots(isolate).empty_feedback_metadata(), kReleaseStore);
#else
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1284,10 +1285,10 @@ void FinalizeUnoptimizedScriptCompilation(
UnoptimizedCompileState::ParallelTasks* parallel_tasks =
compile_state->parallel_tasks();
if (parallel_tasks) {
- CompilerDispatcher* dispatcher = parallel_tasks->dispatcher();
+ LazyCompileDispatcher* dispatcher = parallel_tasks->dispatcher();
for (auto& it : *parallel_tasks) {
FunctionLiteral* literal = it.first;
- CompilerDispatcher::JobId job_id = it.second;
+ LazyCompileDispatcher::JobId job_id = it.second;
MaybeHandle<SharedFunctionInfo> maybe_shared_for_task =
Script::FindSharedFunctionInfo(script, isolate, literal);
Handle<SharedFunctionInfo> shared_for_task;
@@ -1353,10 +1354,10 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
- HistogramTimer* rate = parse_info->flags().is_eval()
- ? isolate->counters()->compile_eval()
- : isolate->counters()->compile();
- HistogramTimerScope timer(rate);
+ NestedTimedHistogram* rate = parse_info->flags().is_eval()
+ ? isolate->counters()->compile_eval()
+ : isolate->counters()->compile();
+ NestedTimedHistogramScope timer(rate);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
parse_info->flags().is_eval() ? "V8.CompileEval" : "V8.Compile");
@@ -1680,6 +1681,37 @@ Handle<Script> BackgroundCompileTask::GetScript(Isolate* isolate) {
return handle(*script_, isolate);
}
+BackgroundDeserializeTask::BackgroundDeserializeTask(
+ Isolate* isolate, std::unique_ptr<ScriptCompiler::CachedData> cached_data)
+ : isolate_for_local_isolate_(isolate),
+ cached_data_(cached_data->data, cached_data->length) {
+ // If the passed in cached data has ownership of the buffer, move it to the
+ // task.
+ if (cached_data->buffer_policy == ScriptCompiler::CachedData::BufferOwned &&
+ !cached_data_.HasDataOwnership()) {
+ cached_data->buffer_policy = ScriptCompiler::CachedData::BufferNotOwned;
+ cached_data_.AcquireDataOwnership();
+ }
+}
+
+void BackgroundDeserializeTask::Run() {
+ LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&isolate);
+ LocalHandleScope handle_scope(&isolate);
+
+ Handle<SharedFunctionInfo> inner_result;
+ off_thread_data_ =
+ CodeSerializer::StartDeserializeOffThread(&isolate, &cached_data_);
+}
+
+MaybeHandle<SharedFunctionInfo> BackgroundDeserializeTask::Finish(
+ Isolate* isolate, Handle<String> source,
+ ScriptOriginOptions origin_options) {
+ return CodeSerializer::FinishOffThreadDeserialize(
+ isolate, std::move(off_thread_data_), &cached_data_, source,
+ origin_options);
+}
+
// ----------------------------------------------------------------------------
// Implementation of Compiler
@@ -1710,6 +1742,13 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
return false;
}
+ // Unfinalized scripts don't yet have the proper source string attached and
+ // thus can't be reparsed.
+ if (Script::cast(shared_info->script()).IsMaybeUnfinalized(isolate)) {
+ bytecode->SetSourcePositionsFailedToCollect();
+ return false;
+ }
+
DCHECK(AllowCompilation::IsAllowed(isolate));
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
DCHECK(!isolate->has_pending_exception());
@@ -1718,7 +1757,8 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileCollectSourcePositions);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CollectSourcePositions");
- HistogramTimerScope timer(isolate->counters()->collect_source_positions());
+ NestedTimedHistogramScope timer(
+ isolate->counters()->collect_source_positions());
// Set up parse info.
UnoptimizedCompileFlags flags =
@@ -1803,7 +1843,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
ParseInfo parse_info(isolate, flags, &compile_state);
// Check if the compiler dispatcher has shared_info enqueued for compile.
- CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
+ LazyCompileDispatcher* dispatcher = isolate->lazy_compile_dispatcher();
if (dispatcher->IsEnqueued(shared_info)) {
if (!dispatcher->FinishNow(shared_info)) {
return FailWithPendingException(isolate, script, &parse_info, flag);
@@ -1862,7 +1902,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
// Reset the JSFunction if we are recompiling due to the bytecode having been
// flushed.
- function->ResetIfBytecodeFlushed();
+ function->ResetIfCodeFlushed();
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
@@ -2240,14 +2280,14 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// (via v8::Isolate::SetAllowCodeGenerationFromStringsCallback)
bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
Handle<String> source) {
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate));
DCHECK(isolate->allow_code_gen_callback());
-
- // Callback set. Let it decide if code generation is allowed.
- VMState<EXTERNAL> state(isolate);
- RCS_SCOPE(isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
+ ExternalCallbackScope external_callback(isolate,
+ reinterpret_cast<Address>(callback));
+ // Callback set. Let it decide if code generation is allowed.
return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
}
@@ -2431,8 +2471,7 @@ struct ScriptCompileTimerScope {
explicit ScriptCompileTimerScope(
Isolate* isolate, ScriptCompiler::NoCacheReason no_cache_reason)
: isolate_(isolate),
- all_scripts_histogram_scope_(isolate->counters()->compile_script(),
- true),
+ all_scripts_histogram_scope_(isolate->counters()->compile_script()),
no_cache_reason_(no_cache_reason),
hit_isolate_cache_(false),
producing_code_cache_(false),
@@ -2471,7 +2510,7 @@ struct ScriptCompileTimerScope {
LazyTimedHistogramScope histogram_scope_;
// TODO(leszeks): This timer is the sum of the other times, consider removing
// it to save space.
- HistogramTimerScope all_scripts_histogram_scope_;
+ NestedTimedHistogramScope all_scripts_histogram_scope_;
ScriptCompiler::NoCacheReason no_cache_reason_;
bool hit_isolate_cache_;
bool producing_code_cache_;
@@ -2600,7 +2639,7 @@ struct ScriptCompileTimerScope {
};
void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
- Compiler::ScriptDetails script_details,
+ ScriptDetails script_details,
DisallowGarbageCollection* no_gc) {
Handle<Object> script_name;
if (script_details.name_obj.ToHandle(&script_name)) {
@@ -2625,12 +2664,12 @@ void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
Handle<Script> NewScript(
Isolate* isolate, ParseInfo* parse_info, Handle<String> source,
- Compiler::ScriptDetails script_details, ScriptOriginOptions origin_options,
- NativesFlag natives,
+ ScriptDetails script_details, NativesFlag natives,
MaybeHandle<FixedArray> maybe_wrapped_arguments = kNullMaybeHandle) {
// Create a script object describing the script to be compiled.
- Handle<Script> script = parse_info->CreateScript(
- isolate, source, maybe_wrapped_arguments, origin_options, natives);
+ Handle<Script> script =
+ parse_info->CreateScript(isolate, source, maybe_wrapped_arguments,
+ script_details.origin_options, natives);
DisallowGarbageCollection no_gc;
SetScriptFieldsFromDetails(isolate, *script, script_details, &no_gc);
LOG(isolate, ScriptDetails(*script));
@@ -2639,16 +2678,15 @@ Handle<Script> NewScript(
MaybeHandle<SharedFunctionInfo> CompileScriptOnMainThread(
const UnoptimizedCompileFlags flags, Handle<String> source,
- const Compiler::ScriptDetails& script_details,
- ScriptOriginOptions origin_options, NativesFlag natives,
+ const ScriptDetails& script_details, NativesFlag natives,
v8::Extension* extension, Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
UnoptimizedCompileState compile_state(isolate);
ParseInfo parse_info(isolate, flags, &compile_state);
parse_info.set_extension(extension);
- Handle<Script> script = NewScript(isolate, &parse_info, source,
- script_details, origin_options, natives);
+ Handle<Script> script =
+ NewScript(isolate, &parse_info, source, script_details, natives);
DCHECK_IMPLIES(parse_info.flags().collect_type_profile(),
script->IsUserJavaScript());
DCHECK_EQ(parse_info.flags().is_repl_mode(), script->is_repl_mode());
@@ -2704,14 +2742,13 @@ class StressBackgroundCompileThread : public base::Thread {
v8::ScriptCompiler::StreamedSource streamed_source_;
};
-bool CanBackgroundCompile(const Compiler::ScriptDetails& script_details,
- ScriptOriginOptions origin_options,
+bool CanBackgroundCompile(const ScriptDetails& script_details,
v8::Extension* extension,
ScriptCompiler::CompileOptions compile_options,
NativesFlag natives) {
// TODO(leszeks): Remove the module check once background compilation of
// modules is supported.
- return !origin_options.IsModule() && !extension &&
+ return !script_details.origin_options.IsModule() && !extension &&
script_details.repl_mode == REPLMode::kNo &&
compile_options == ScriptCompiler::kNoCompileOptions &&
natives == NOT_NATIVES_CODE;
@@ -2728,13 +2765,13 @@ bool CompilationExceptionIsRangeError(Isolate* isolate, Handle<Object> obj) {
}
MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
- Handle<String> source, const Compiler::ScriptDetails& script_details,
- ScriptOriginOptions origin_options, Isolate* isolate,
- IsCompiledScope* is_compiled_scope) {
+ Handle<String> source, const ScriptDetails& script_details,
+ Isolate* isolate, IsCompiledScope* is_compiled_scope) {
// Start a background thread compiling the script.
StressBackgroundCompileThread background_compile_thread(
isolate, source,
- origin_options.IsModule() ? ScriptType::kModule : ScriptType::kClassic);
+ script_details.origin_options.IsModule() ? ScriptType::kModule
+ : ScriptType::kClassic);
UnoptimizedCompileFlags flags_copy =
background_compile_thread.data()->task->flags();
@@ -2752,8 +2789,8 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
TryCatch ignore_try_catch(reinterpret_cast<v8::Isolate*>(isolate));
flags_copy.set_script_id(Script::kTemporaryScriptId);
main_thread_maybe_result = CompileScriptOnMainThread(
- flags_copy, source, script_details, origin_options, NOT_NATIVES_CODE,
- nullptr, isolate, &inner_is_compiled_scope);
+ flags_copy, source, script_details, NOT_NATIVES_CODE, nullptr, isolate,
+ &inner_is_compiled_scope);
if (main_thread_maybe_result.is_null()) {
// Assume all range errors are stack overflows.
main_thread_had_stack_overflow = CompilationExceptionIsRangeError(
@@ -2770,8 +2807,7 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
MaybeHandle<SharedFunctionInfo> maybe_result =
Compiler::GetSharedFunctionInfoForStreamedScript(
- isolate, source, script_details, origin_options,
- background_compile_thread.data());
+ isolate, source, script_details, background_compile_thread.data());
// Either both compiles should succeed, or both should fail. The one exception
// to this is that the main-thread compilation might stack overflow while the
@@ -2799,9 +2835,9 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
// static
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
- const Compiler::ScriptDetails& script_details,
- ScriptOriginOptions origin_options, v8::Extension* extension,
- ScriptData* cached_data, ScriptCompiler::CompileOptions compile_options,
+ const ScriptDetails& script_details, v8::Extension* extension,
+ AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
@@ -2834,21 +2870,21 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
// First check per-isolate compilation cache.
- maybe_result = compilation_cache->LookupScript(
- source, script_details.name_obj, script_details.line_offset,
- script_details.column_offset, origin_options, language_mode);
+ maybe_result =
+ compilation_cache->LookupScript(source, script_details, language_mode);
if (!maybe_result.is_null()) {
compile_timer.set_hit_isolate_cache();
} else if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
- HistogramTimerScope timer(isolate->counters()->compile_deserialize());
+ NestedTimedHistogramScope timer(
+ isolate->counters()->compile_deserialize());
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
Handle<SharedFunctionInfo> inner_result;
if (CodeSerializer::Deserialize(isolate, cached_data, source,
- origin_options)
+ script_details.origin_options)
.ToHandle(&inner_result) &&
inner_result->is_compiled()) {
// Promote to per-isolate compilation cache.
@@ -2866,26 +2902,26 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (maybe_result.is_null()) {
// No cache entry found compile the script.
if (FLAG_stress_background_compile &&
- CanBackgroundCompile(script_details, origin_options, extension,
- compile_options, natives)) {
+ CanBackgroundCompile(script_details, extension, compile_options,
+ natives)) {
// If the --stress-background-compile flag is set, do the actual
// compilation on a background thread, and wait for its result.
maybe_result = CompileScriptOnBothBackgroundAndMainThread(
- source, script_details, origin_options, isolate, &is_compiled_scope);
+ source, script_details, isolate, &is_compiled_scope);
} else {
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForToplevelCompile(
isolate, natives == NOT_NATIVES_CODE, language_mode,
script_details.repl_mode,
- origin_options.IsModule() ? ScriptType::kModule
- : ScriptType::kClassic,
+ script_details.origin_options.IsModule() ? ScriptType::kModule
+ : ScriptType::kClassic,
FLAG_lazy);
flags.set_is_eager(compile_options == ScriptCompiler::kEagerCompile);
- maybe_result = CompileScriptOnMainThread(
- flags, source, script_details, origin_options, natives, extension,
- isolate, &is_compiled_scope);
+ maybe_result =
+ CompileScriptOnMainThread(flags, source, script_details, natives,
+ extension, isolate, &is_compiled_scope);
}
// Add the result to the isolate cache.
@@ -2904,8 +2940,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// static
MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<String> source, Handle<FixedArray> arguments,
- Handle<Context> context, const Compiler::ScriptDetails& script_details,
- ScriptOriginOptions origin_options, ScriptData* cached_data,
+ Handle<Context> context, const ScriptDetails& script_details,
+ AlignedCachedData* cached_data,
v8::ScriptCompiler::CompileOptions compile_options,
v8::ScriptCompiler::NoCacheReason no_cache_reason) {
Isolate* isolate = context->GetIsolate();
@@ -2930,12 +2966,12 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
- HistogramTimerScope timer(isolate->counters()->compile_deserialize());
+ NestedTimedHistogramScope timer(isolate->counters()->compile_deserialize());
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
maybe_result = CodeSerializer::Deserialize(isolate, cached_data, source,
- origin_options);
+ script_details.origin_options);
if (maybe_result.is_null()) {
// Deserializer failed. Fall through to compile.
compile_timer.set_consuming_code_cache_failed();
@@ -2966,7 +3002,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
}
script = NewScript(isolate, &parse_info, source, script_details,
- origin_options, NOT_NATIVES_CODE, arguments);
+ NOT_NATIVES_CODE, arguments);
Handle<SharedFunctionInfo> top_level;
maybe_result = v8::internal::CompileToplevel(&parse_info, script,
@@ -2999,8 +3035,8 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
MaybeHandle<SharedFunctionInfo>
Compiler::GetSharedFunctionInfoForStreamedScript(
Isolate* isolate, Handle<String> source,
- const ScriptDetails& script_details, ScriptOriginOptions origin_options,
- ScriptStreamingData* streaming_data) {
+ const ScriptDetails& script_details, ScriptStreamingData* streaming_data) {
+ ScriptOriginOptions origin_options = script_details.origin_options;
DCHECK(!origin_options.IsWasm());
ScriptCompileTimerScope compile_timer(
@@ -3020,9 +3056,8 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
{
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.CheckCache");
- maybe_result = compilation_cache->LookupScript(
- source, script_details.name_obj, script_details.line_offset,
- script_details.column_offset, origin_options, task->language_mode());
+ maybe_result = compilation_cache->LookupScript(source, script_details,
+ task->language_mode());
if (!maybe_result.is_null()) {
compile_timer.set_hit_isolate_cache();
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 78b5bea7bb..0d1582d872 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -19,6 +19,7 @@
#include "src/objects/debug-objects.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/pending-compilation-error-handler.h"
+#include "src/snapshot/code-serializer.h"
#include "src/utils/allocation.h"
#include "src/zone/zone.h"
@@ -26,6 +27,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class AlignedCachedData;
class AstRawString;
class BackgroundCompileTask;
class IsCompiledScope;
@@ -35,12 +37,12 @@ class OptimizedCompilationJob;
class ParseInfo;
class Parser;
class RuntimeCallStats;
-class ScriptData;
-struct ScriptStreamingData;
class TimedHistogram;
class UnoptimizedCompilationInfo;
class UnoptimizedCompilationJob;
class WorkerThreadRuntimeCallStats;
+struct ScriptDetails;
+struct ScriptStreamingData;
using UnoptimizedCompilationJobList =
std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>;
@@ -130,29 +132,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
ParseRestriction restriction, int parameters_end_pos,
int eval_scope_position, int eval_position);
- struct ScriptDetails {
- ScriptDetails()
- : line_offset(0), column_offset(0), repl_mode(REPLMode::kNo) {}
- explicit ScriptDetails(Handle<Object> script_name)
- : line_offset(0),
- column_offset(0),
- name_obj(script_name),
- repl_mode(REPLMode::kNo) {}
-
- int line_offset;
- int column_offset;
- i::MaybeHandle<i::Object> name_obj;
- i::MaybeHandle<i::Object> source_map_url;
- i::MaybeHandle<i::FixedArray> host_defined_options;
- REPLMode repl_mode;
- };
-
// Create a function that results from wrapping |source| in a function,
// with |arguments| being a list of parameters for that function.
V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetWrappedFunction(
Handle<String> source, Handle<FixedArray> arguments,
Handle<Context> context, const ScriptDetails& script_details,
- ScriptOriginOptions origin_options, ScriptData* cached_data,
+ AlignedCachedData* cached_data,
v8::ScriptCompiler::CompileOptions compile_options,
v8::ScriptCompiler::NoCacheReason no_cache_reason);
@@ -176,8 +161,8 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
- const ScriptDetails& script_details, ScriptOriginOptions origin_options,
- v8::Extension* extension, ScriptData* cached_data,
+ const ScriptDetails& script_details, v8::Extension* extension,
+ AlignedCachedData* cached_data,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code);
@@ -189,8 +174,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// owned by the caller.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
Isolate* isolate, Handle<String> source,
- const ScriptDetails& script_details, ScriptOriginOptions origin_options,
- ScriptStreamingData* streaming_data);
+ const ScriptDetails& script_details, ScriptStreamingData* streaming_data);
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
@@ -576,6 +560,23 @@ struct ScriptStreamingData {
std::unique_ptr<BackgroundCompileTask> task;
};
+class V8_EXPORT_PRIVATE BackgroundDeserializeTask {
+ public:
+ BackgroundDeserializeTask(Isolate* isolate,
+ std::unique_ptr<ScriptCompiler::CachedData> data);
+
+ void Run();
+
+ MaybeHandle<SharedFunctionInfo> Finish(Isolate* isolate,
+ Handle<String> source,
+ ScriptOriginOptions origin_options);
+
+ private:
+ Isolate* isolate_for_local_isolate_;
+ AlignedCachedData cached_data_;
+ CodeSerializer::OffThreadDeserializeData off_thread_data_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 71d0d4d419..9af91d7a15 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -354,13 +354,13 @@ void ConstantPool::Emit(const ConstantPoolKey& key) {
assm_->dd(key.value32());
} else {
if (assm_->IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(key.rmode())) {
+ int offset = assm_->pc_offset();
+ Assembler::EmbeddedObjectIndex index = key.value64();
assm_->saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(assm_->pc_offset(), key.value64()));
- Handle<Object> handle = assm_->GetEmbeddedObject(key.value64());
- assm_->dq(handle->ptr());
- // We must ensure that `dq` is not growing the assembler buffer
- // and falling back to off-heap compilation.
- DCHECK(assm_->IsOnHeap());
+ std::make_pair(offset, index));
+ Handle<Object> object = assm_->GetEmbeddedObject(index);
+ assm_->dq(object->ptr());
+ DCHECK(assm_->EmbeddedObjectMatches(offset, object, index));
} else {
assm_->dq(key.value64());
}
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index 47346d3455..b2d890c6f4 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -192,7 +192,8 @@ class ConstantPoolKey {
rmode_ != RelocInfo::VENEER_POOL &&
rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
rmode_ != RelocInfo::DEOPT_INLINING_ID &&
- rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID);
+ rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID &&
+ rmode_ != RelocInfo::DEOPT_NODE_ID);
// CODE_TARGETs can be shared because they aren't patched anymore,
// and we make sure we emit only one reloc info for them (thus delta
// patching) will apply the delta only once. At the moment, we do not dedup
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 6833ee60d0..ab6608679f 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -52,13 +52,11 @@ enum CpuFeature {
MIPS_SIMD, // MSA instructions
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
- FPU,
- FPR_GPR_MOV,
- LWSYNC,
- ISELECT,
- VSX,
- MODULO,
- SIMD,
+ PPC_6_PLUS,
+ PPC_7_PLUS,
+ PPC_8_PLUS,
+ PPC_9_PLUS,
+ PPC_10_PLUS,
#elif V8_TARGET_ARCH_S390X
FPU,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index d10f8e398e..e1d8c5d96e 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -492,9 +492,14 @@ ExternalReference ExternalReference::scheduled_exception_address(
return ExternalReference(isolate->scheduled_exception_address());
}
-ExternalReference ExternalReference::address_of_pending_message_obj(
+ExternalReference ExternalReference::address_of_pending_message(
Isolate* isolate) {
- return ExternalReference(isolate->pending_message_obj_address());
+ return ExternalReference(isolate->pending_message_address());
+}
+
+ExternalReference ExternalReference::address_of_pending_message(
+ LocalIsolate* local_isolate) {
+ return ExternalReference(local_isolate->pending_message_address());
}
FUNCTION_REFERENCE(abort_with_reason, i::abort_with_reason)
@@ -512,11 +517,6 @@ ExternalReference ExternalReference::address_of_builtin_subclassing_flag() {
return ExternalReference(&FLAG_builtin_subclassing);
}
-ExternalReference
-ExternalReference::address_of_harmony_regexp_match_indices_flag() {
- return ExternalReference(&FLAG_harmony_regexp_match_indices);
-}
-
ExternalReference ExternalReference::address_of_runtime_stats_flag() {
return ExternalReference(&TracingFlags::runtime_stats);
}
@@ -818,6 +818,13 @@ void relaxed_memcpy(volatile base::Atomic8* dest,
FUNCTION_REFERENCE(relaxed_memcpy_function, relaxed_memcpy)
+void relaxed_memmove(volatile base::Atomic8* dest,
+ volatile const base::Atomic8* src, size_t n) {
+ base::Relaxed_Memmove(dest, src, n);
+}
+
+FUNCTION_REFERENCE(relaxed_memmove_function, relaxed_memmove)
+
ExternalReference ExternalReference::printf_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(std::printf)));
}
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 169050e4ac..cbc3463841 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -49,7 +49,7 @@ class StatsCounter;
V(handle_scope_next_address, "HandleScope::next") \
V(handle_scope_limit_address, "HandleScope::limit") \
V(scheduled_exception_address, "Isolate::scheduled_exception") \
- V(address_of_pending_message_obj, "address_of_pending_message_obj") \
+ V(address_of_pending_message, "address_of_pending_message") \
V(promise_hook_flags_address, "Isolate::promise_hook_flags_address()") \
V(promise_hook_address, "Isolate::promise_hook_address()") \
V(async_event_delegate_address, "Isolate::async_event_delegate_address()") \
@@ -104,8 +104,6 @@ class StatsCounter;
"address_of_enable_experimental_regexp_engine") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
- V(address_of_harmony_regexp_match_indices_flag, \
- "FLAG_harmony_regexp_match_indices") \
V(address_of_min_int, "LDoubleConstant::min_int") \
V(address_of_mock_arraybuffer_allocator_flag, \
"FLAG_mock_arraybuffer_allocator") \
@@ -177,6 +175,7 @@ class StatsCounter;
V(libc_memmove_function, "libc_memmove") \
V(libc_memset_function, "libc_memset") \
V(relaxed_memcpy_function, "relaxed_memcpy") \
+ V(relaxed_memmove_function, "relaxed_memmove") \
V(mod_two_doubles_operation, "mod_two_doubles") \
V(mutable_big_int_absolute_add_and_canonicalize_function, \
"MutableBigInt_AbsoluteAddAndCanonicalize") \
@@ -360,6 +359,9 @@ class ExternalReference {
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(COUNT_EXTERNAL_REFERENCE);
#undef COUNT_EXTERNAL_REFERENCE
+ static V8_EXPORT_PRIVATE ExternalReference
+ address_of_pending_message(LocalIsolate* local_isolate);
+
ExternalReference() : address_(kNullAddress) {}
static ExternalReference Create(const SCTableReference& table_ref);
static ExternalReference Create(StatsCounter* counter);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index a5829e77d1..f4ff4914fb 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -186,13 +186,11 @@ void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) {
RecordRelocInfo(rmode);
if (rmode == RelocInfo::FULL_EMBEDDED_OBJECT && IsOnHeap()) {
+ int offset = pc_offset();
Handle<HeapObject> object(reinterpret_cast<Address*>(x));
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(pc_offset(), x));
+ saved_handles_for_raw_object_ptr_.push_back(std::make_pair(offset, x));
emit(object->ptr());
- // We must ensure that `emit` is not growing the assembler buffer
- // and falling back to off-heap compilation.
- DCHECK(IsOnHeap());
+ DCHECK(EmbeddedObjectMatches(offset, object));
return;
}
}
@@ -216,12 +214,11 @@ void Assembler::emit(const Immediate& x) {
return;
}
if (x.is_embedded_object() && IsOnHeap()) {
+ int offset = pc_offset();
saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(pc_offset(), x.immediate()));
+ std::make_pair(offset, x.immediate()));
emit(x.embedded_object()->ptr());
- // We must ensure that `emit` is not growing the assembler buffer
- // and falling back to off-heap compilation.
- DCHECK(IsOnHeap());
+ DCHECK(EmbeddedObjectMatches(offset, x.embedded_object()));
return;
}
emit(x.immediate());
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 1880ee1ad7..90f8e8b70c 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -3341,11 +3341,29 @@ void Assembler::emit_vex_prefix(Register vreg, VectorLength l, SIMDPrefix pp,
emit_vex_prefix(ivreg, l, pp, mm, w);
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ WriteUnalignedValue(base + p.first, *object);
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue<uint32_t>(base + p.first, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
DCHECK_EQ(buffer_start_, buffer_->start());
bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
// Compute new buffer size.
int old_size = buffer_->size();
@@ -3394,11 +3412,12 @@ void Assembler::GrowBuffer() {
it.rinfo()->apply(pc_delta);
}
- // Patch on-heap references to handles.
- if (previously_on_heap && !buffer_->IsOnHeap()) {
- Address base = reinterpret_cast<Address>(buffer_->start());
- for (auto p : saved_handles_for_raw_object_ptr_) {
- WriteUnalignedValue<uint32_t>(base + p.first, p.second);
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
}
}
@@ -3494,7 +3513,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
emit(data);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 806d17a2d4..89a65ee99b 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -392,11 +392,27 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
void FinalizeJumpOptimizationInfo();
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return *reinterpret_cast<uint32_t*>(buffer_->start() + pc_offset) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
// Read/Modify the code target in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
@@ -464,6 +480,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
// Stack
void pushad();
@@ -1786,8 +1803,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 16298ed536..c95ea8ad2c 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -81,11 +81,15 @@ void TurboAssembler::InitializeRootRegister() {
Move(kRootRegister, Immediate(isolate_root));
}
+Operand TurboAssembler::RootAsOperand(RootIndex index) {
+ DCHECK(root_array_available());
+ return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
+}
+
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
- mov(destination,
- Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ mov(destination, RootAsOperand(index));
return;
}
@@ -123,7 +127,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch,
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
- cmp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ cmp(with, RootAsOperand(index));
return;
}
@@ -140,7 +144,7 @@ void MacroAssembler::PushRoot(RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
DCHECK(RootsTable::IsImmortalImmovable(index));
- push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ push(RootAsOperand(index));
return;
}
@@ -195,7 +199,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
- // TODO(jgruber): Add support for enable_root_array_delta_access.
+ // TODO(jgruber): Add support for enable_root_relative_access.
if (root_array_available() && options().isolate_independent_code) {
if (IsAddressableThroughRootRegister(isolate(), reference)) {
// Some external references can be efficiently loaded as an offset from
@@ -234,7 +238,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
Builtin builtin;
RootIndex root_index;
if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
- return Operand(kRootRegister, RootRegisterOffsetForRootIndex(root_index));
+ return RootAsOperand(root_index);
} else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin)) {
return Operand(kRootRegister, RootRegisterOffsetForBuiltin(builtin));
} else if (object.is_identical_to(code_object_) &&
@@ -276,7 +280,7 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadAddress(Register destination,
ExternalReference source) {
- // TODO(jgruber): Add support for enable_root_array_delta_access.
+ // TODO(jgruber): Add support for enable_root_relative_access.
if (root_array_available() && options().isolate_independent_code) {
IndirectLoadExternalReference(destination, source);
return;
@@ -1157,6 +1161,70 @@ void TurboAssembler::Prologue() {
push(kJavaScriptCallArgCountRegister); // Actual argument count.
}
+void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ int receiver_bytes =
+ (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
+ switch (type) {
+ case kCountIsInteger: {
+ lea(esp, Operand(esp, count, times_system_pointer_size, receiver_bytes));
+ break;
+ }
+ case kCountIsSmi: {
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ // SMIs are stored shifted left by 1 byte with the tag being 0.
+ // This is equivalent to multiplying by 2. To convert SMIs to bytes we
+ // can therefore just multiply the stored value by half the system pointer
+ // size.
+ lea(esp,
+ Operand(esp, count, times_half_system_pointer_size, receiver_bytes));
+ break;
+ }
+ case kCountIsBytes: {
+ if (receiver_bytes == 0) {
+ add(esp, count);
+ } else {
+ lea(esp, Operand(esp, count, times_1, receiver_bytes));
+ }
+ break;
+ }
+ }
+}
+
+void TurboAssembler::DropArguments(Register count, Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(count, scratch));
+ PopReturnAddressTo(scratch);
+ DropArguments(count, type, mode);
+ PushReturnAddressFrom(scratch);
+}
+
+void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+ Register receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(argc, receiver, scratch));
+ PopReturnAddressTo(scratch);
+ DropArguments(argc, type, mode);
+ Push(receiver);
+ PushReturnAddressFrom(scratch);
+}
+
+void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+ Operand receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(argc, scratch));
+ DCHECK(!receiver.is_reg(scratch));
+ PopReturnAddressTo(scratch);
+ DropArguments(argc, type, mode);
+ Push(receiver);
+ PushReturnAddressFrom(scratch);
+}
+
void TurboAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
push(ebp);
@@ -1164,6 +1232,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
if (!StackFrame::IsJavaScript(type)) {
Push(Immediate(StackFrame::TypeToMarker(type)));
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -1431,60 +1502,6 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
jmp(entry, RelocInfo::OFF_HEAP_TARGET);
}
-void TurboAssembler::PrepareForTailCall(
- Register callee_args_count, Register caller_args_count, Register scratch0,
- Register scratch1, int number_of_temp_values_after_return_address) {
- ASM_CODE_COMMENT(this);
- DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
-
- // Calculate the destination address where we will put the return address
- // after we drop current frame.
- Register new_sp_reg = scratch0;
- sub(caller_args_count, callee_args_count);
- lea(new_sp_reg, Operand(ebp, caller_args_count, times_system_pointer_size,
- StandardFrameConstants::kCallerPCOffset -
- number_of_temp_values_after_return_address *
- kSystemPointerSize));
-
- if (FLAG_debug_code) {
- cmp(esp, new_sp_reg);
- Check(below, AbortReason::kStackAccessBelowStackPointer);
- }
-
- // Copy return address from caller's frame to current frame's return address
- // to avoid its trashing and let the following loop copy it to the right
- // place.
- Register tmp_reg = scratch1;
- mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- mov(Operand(esp,
- number_of_temp_values_after_return_address * kSystemPointerSize),
- tmp_reg);
-
- // Restore caller's frame pointer now as it could be overwritten by
- // the copying loop.
- mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // +2 here is to copy both receiver and return address.
- Register count_reg = caller_args_count;
- lea(count_reg, Operand(callee_args_count,
- 2 + number_of_temp_values_after_return_address));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
- Label loop, entry;
- jmp(&entry, Label::kNear);
- bind(&loop);
- dec(count_reg);
- mov(tmp_reg, Operand(esp, count_reg, times_system_pointer_size, 0));
- mov(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg);
- bind(&entry);
- cmp(count_reg, Immediate(0));
- j(not_equal, &loop, Label::kNear);
-
- // Leave current frame.
- mov(esp, new_sp_reg);
-}
-
void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index cfec105d87..527c357047 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -203,19 +203,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
SmiUntag(output);
}
- // Removes current frame and its arguments from the stack preserving the
- // arguments and a return address pushed to the stack for the next call. Both
- // |callee_args_count| and |caller_args_count| do not include receiver.
- // |callee_args_count| is not modified. |caller_args_count| is trashed.
- // |number_of_temp_values_after_return_address| specifies the number of words
- // pushed to the stack after the return address. This is to allow "allocation"
- // of scratch registers that this function requires by saving their values on
- // the stack.
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1,
- int number_of_temp_values_after_return_address);
-
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
@@ -244,6 +231,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void StubPrologue(StackFrame::Type type);
void Prologue();
+ // Helpers for argument handling
+ enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
+ enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
+ void DropArguments(Register count, Register scratch, ArgumentsCountType type,
+ ArgumentsCountMode mode);
+ void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode);
+ void DropArgumentsAndPushNewReceiver(Register argc, Operand receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode);
+
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, Operand src);
@@ -269,6 +270,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void InitializeRootRegister();
+ Operand RootAsOperand(RootIndex index);
void LoadRoot(Register destination, RootIndex index) final;
// Indirect root-relative loads.
@@ -490,6 +492,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void ExceptionHandler() {}
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
+
+ protected:
+ // Drops arguments assuming that the return address was already popped.
+ void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
+ ArgumentsCountMode mode = kCountExcludesReceiver);
};
// MacroAssembler implements a collection of frequently used macros.
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index 7a00608459..19ec3e86b1 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -358,6 +358,15 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeLog2Of(
}
}
+constexpr int kMaximumReprSizeLog2 =
+ ElementSizeLog2Of(MachineRepresentation::kSimd128);
+constexpr int kMaximumReprSizeInBytes = 1 << kTaggedSizeLog2;
+
+STATIC_ASSERT(kMaximumReprSizeLog2 >=
+ ElementSizeLog2Of(MachineRepresentation::kTagged));
+STATIC_ASSERT(kMaximumReprSizeLog2 >=
+ ElementSizeLog2Of(MachineRepresentation::kWord64));
+
V8_EXPORT_PRIVATE inline constexpr int ElementSizeInBytes(
MachineRepresentation rep) {
return 1 << ElementSizeLog2Of(rep);
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 2948dbd18a..0d5a8710e5 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -1988,11 +1988,11 @@ void Assembler::AdjustBaseAndOffset(MemOperand* src,
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
- addiu(at, src->rm(), kMinOffsetForSimpleAdjustment);
+ addiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
src->offset_ -= kMinOffsetForSimpleAdjustment;
} else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
src->offset() < 0) {
- addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment);
+ addiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (IsMipsArchVariant(kMips32r6)) {
// On r6 take advantage of the aui instruction, e.g.:
@@ -3537,7 +3537,27 @@ void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
}
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ set_target_value_at(address, object->ptr());
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ set_target_value_at(address, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
void Assembler::GrowBuffer() {
+ bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
+
// Compute new buffer size.
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
@@ -3580,6 +3600,16 @@ void Assembler::GrowBuffer() {
RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
}
}
+
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
+ }
+ }
+
DCHECK(!overflow());
}
@@ -3592,7 +3622,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint32_t*>(pc_) = data;
@@ -3602,7 +3633,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint64_t*>(pc_) = data;
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 2eced6363b..e2c5943ae4 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -167,6 +167,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
@@ -360,6 +369,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
// Different nop operations are used by the code generator to detect certain
// states of the generated code.
@@ -1404,8 +1414,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
@@ -1626,6 +1636,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return target_address_at(
+ reinterpret_cast<Address>(buffer_->start() + pc_offset)) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
index e1b7451eda..9d097263ae 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
@@ -34,7 +34,7 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::
// static
constexpr auto WriteBarrierDescriptor::registers() {
- return RegisterArray(a1, v0, a0, a2, a3);
+ return RegisterArray(a1, t1, t0, a0, a2, v0, a3);
}
// static
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index c197d8e6f3..9c1af1cb05 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -16,6 +16,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
@@ -1393,6 +1394,18 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
ori(rd, rd, (j.immediate() & kImm16Mask));
}
}
+ } else if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(j.rmode())) {
+ BlockGrowBufferScope block_growbuffer(this);
+ int offset = pc_offset();
+ Address address = j.immediate();
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(offset, address));
+ Handle<HeapObject> object(reinterpret_cast<Address*>(address));
+ int32_t immediate = object->ptr();
+ RecordRelocInfo(j.rmode(), immediate);
+ lui(rd, (immediate >> kLuiShift) & kImm16Mask);
+ ori(rd, rd, (immediate & kImm16Mask));
+ DCHECK(EmbeddedObjectMatches(offset, object));
} else {
int32_t immediate;
if (j.IsHeapObjectRequest()) {
@@ -4052,6 +4065,16 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
}
}
+void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && (is_int26(offset))) {
+ BranchShortHelperR6(offset, nullptr);
+ } else {
+ // Generate position independent long branch.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ GenPCRelativeJump(t8, t9, offset, RelocInfo::NONE, bdslot);
+ }
+}
+
void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
@@ -4285,52 +4308,6 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
// -----------------------------------------------------------------------------
// JavaScript invokes.
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch0;
- Lsa(dst_reg, fp, caller_args_count, kPointerSizeLog2);
- Addu(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = caller_args_count;
- // Calculate the end of source area. +kPointerSize is for the receiver.
- Lsa(src_reg, sp, callee_args_count, kPointerSizeLog2);
- Addu(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
- Operand(dst_reg));
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch1;
- Label loop, entry;
- Branch(&entry);
- bind(&loop);
- Subu(src_reg, src_reg, Operand(kPointerSize));
- Subu(dst_reg, dst_reg, Operand(kPointerSize));
- lw(tmp_reg, MemOperand(src_reg));
- sw(tmp_reg, MemOperand(dst_reg));
- bind(&entry);
- Branch(&loop, ne, sp, Operand(src_reg));
-
- // Leave current frame.
- mov(sp, dst_reg);
-}
-
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
@@ -4833,6 +4810,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
Push(kScratchReg);
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index cb362da51d..ffa5f5820d 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -176,6 +176,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
+ void BranchLong(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void Branch(Label* L, Condition cond, Register rs, RootIndex index,
BranchDelaySlot bdslot = PROTECT);
@@ -459,15 +460,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); }
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count| do not include
- // receiver. |callee_args_count| is not modified. |caller_args_count|
- // is trashed.
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 73fbe4ce4d..0379cd65ce 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -3736,7 +3736,27 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
}
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ set_target_value_at(address, object->ptr());
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ set_target_value_at(address, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
void Assembler::GrowBuffer() {
+ bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
+
// Compute new buffer size.
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
@@ -3778,6 +3798,16 @@ void Assembler::GrowBuffer() {
RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
}
}
+
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
+ }
+ }
+
DCHECK(!overflow());
}
@@ -3790,7 +3820,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint32_t*>(pc_) = data;
@@ -3800,7 +3831,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint64_t*>(pc_) = data;
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index ae3a2a2819..9f7ffdf8c7 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -167,6 +167,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
@@ -359,6 +368,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
// Different nop operations are used by the code generator to detect certain
// states of the generated code.
@@ -1467,8 +1477,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
@@ -1664,6 +1674,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return target_address_at(
+ reinterpret_cast<Address>(buffer_->start() + pc_offset)) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index a2e37bd9af..708cf4baa6 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -16,6 +16,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
@@ -1913,6 +1914,20 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
} else {
li_optimized(rd, j, mode);
}
+ } else if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(j.rmode())) {
+ BlockGrowBufferScope block_growbuffer(this);
+ int offset = pc_offset();
+ Address address = j.immediate();
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(offset, address));
+ Handle<HeapObject> object(reinterpret_cast<Address*>(address));
+ int64_t immediate = object->ptr();
+ RecordRelocInfo(j.rmode(), immediate);
+ lui(rd, (immediate >> 32) & kImm16Mask);
+ ori(rd, rd, (immediate >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, immediate & kImm16Mask);
+ DCHECK(EmbeddedObjectMatches(offset, object));
} else if (MustUseReg(j.rmode())) {
int64_t immediate;
if (j.IsHeapObjectRequest()) {
@@ -4555,6 +4570,25 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
}
}
+void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT && (is_int26(offset))) {
+ BranchShortHelperR6(offset, nullptr);
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ or_(t8, ra, zero_reg);
+ nal(); // Read PC into ra register.
+ lui(t9, (offset & kHiMaskOf32) >> kLuiShift); // Branch delay slot.
+ ori(t9, t9, (offset & kImm16Mask));
+ daddu(t9, ra, t9);
+ if (bdslot == USE_DELAY_SLOT) {
+ or_(ra, t8, zero_reg);
+ }
+ jr(t9);
+ // Emit a or_ in the branch delay slot if it's protected.
+ if (bdslot == PROTECT) or_(ra, t8, zero_reg);
+ }
+}
+
void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
@@ -4798,52 +4832,6 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
// -----------------------------------------------------------------------------
// JavaScript invokes.
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch0;
- Dlsa(dst_reg, fp, caller_args_count, kPointerSizeLog2);
- Daddu(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = caller_args_count;
- // Calculate the end of source area. +kPointerSize is for the receiver.
- Dlsa(src_reg, sp, callee_args_count, kPointerSizeLog2);
- Daddu(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
- Operand(dst_reg));
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch1;
- Label loop, entry;
- Branch(&entry);
- bind(&loop);
- Dsubu(src_reg, src_reg, Operand(kPointerSize));
- Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
- Ld(tmp_reg, MemOperand(src_reg));
- Sd(tmp_reg, MemOperand(dst_reg));
- bind(&entry);
- Branch(&loop, ne, sp, Operand(src_reg));
-
- // Leave current frame.
- mov(sp, dst_reg);
-}
-
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
@@ -5353,6 +5341,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
Push(kScratchReg);
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index a71f09a67d..a4991bcb1e 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -197,6 +197,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
+ void BranchLong(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void Branch(Label* L, Condition cond, Register rs, RootIndex index,
BranchDelaySlot bdslot = PROTECT);
@@ -484,15 +485,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count| do not include
- // receiver. |callee_args_count| is not modified. |caller_args_count|
- // is trashed.
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index e0ecfffd9d..2c568b3f3f 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -56,7 +56,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
bool CpuFeatures::SupportsWasmSimd128() {
#if V8_ENABLE_WEBASSEMBLY
- return CpuFeatures::IsSupported(SIMD);
+ return CpuFeatures::IsSupported(PPC_9_PLUS);
#else
return false;
#endif // V8_ENABLE_WEBASSEMBLY
@@ -69,65 +69,33 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
-// Detect whether frim instruction is supported (POWER5+)
-// For now we will just check for processors we know do not
-// support it
-#ifndef USE_SIMULATOR
- // Probe for additional features at runtime.
+// Probe for additional features at runtime.
+#ifdef USE_SIMULATOR
+ // Simulator
+ supported_ |= (1u << PPC_10_PLUS);
+#else
base::CPU cpu;
- if (cpu.part() == base::CPU::kPPCPower9 ||
- cpu.part() == base::CPU::kPPCPower10) {
- supported_ |= (1u << MODULO);
- }
-#if V8_TARGET_ARCH_PPC64
- if (cpu.part() == base::CPU::kPPCPower8 ||
- cpu.part() == base::CPU::kPPCPower9 ||
- cpu.part() == base::CPU::kPPCPower10) {
- supported_ |= (1u << FPR_GPR_MOV);
- }
- // V8 PPC Simd implementations need P9 at a minimum.
- if (cpu.part() == base::CPU::kPPCPower9 ||
- cpu.part() == base::CPU::kPPCPower10) {
- supported_ |= (1u << SIMD);
- }
-#endif
- if (cpu.part() == base::CPU::kPPCPower6 ||
- cpu.part() == base::CPU::kPPCPower7 ||
- cpu.part() == base::CPU::kPPCPower8 ||
- cpu.part() == base::CPU::kPPCPower9 ||
- cpu.part() == base::CPU::kPPCPower10) {
- supported_ |= (1u << LWSYNC);
- }
- if (cpu.part() == base::CPU::kPPCPower7 ||
- cpu.part() == base::CPU::kPPCPower8 ||
- cpu.part() == base::CPU::kPPCPower9 ||
- cpu.part() == base::CPU::kPPCPower10) {
- supported_ |= (1u << ISELECT);
- supported_ |= (1u << VSX);
+ if (cpu.part() == base::CPU::kPPCPower10) {
+ supported_ |= (1u << PPC_10_PLUS);
+ } else if (cpu.part() == base::CPU::kPPCPower9) {
+ supported_ |= (1u << PPC_9_PLUS);
+ } else if (cpu.part() == base::CPU::kPPCPower8) {
+ supported_ |= (1u << PPC_8_PLUS);
+ } else if (cpu.part() == base::CPU::kPPCPower7) {
+ supported_ |= (1u << PPC_7_PLUS);
+ } else if (cpu.part() == base::CPU::kPPCPower6) {
+ supported_ |= (1u << PPC_6_PLUS);
}
#if V8_OS_LINUX
- if (!(cpu.part() == base::CPU::kPPCG5 || cpu.part() == base::CPU::kPPCG4)) {
- // Assume support
- supported_ |= (1u << FPU);
- }
if (cpu.icache_line_size() != base::CPU::kUnknownCacheLineSize) {
icache_line_size_ = cpu.icache_line_size();
}
-#elif V8_OS_AIX
- // Assume support FP support and default cache line size
- supported_ |= (1u << FPU);
-#endif
-#else // Simulator
- supported_ |= (1u << FPU);
- supported_ |= (1u << LWSYNC);
- supported_ |= (1u << ISELECT);
- supported_ |= (1u << VSX);
- supported_ |= (1u << MODULO);
- supported_ |= (1u << SIMD);
-#if V8_TARGET_ARCH_PPC64
- supported_ |= (1u << FPR_GPR_MOV);
#endif
#endif
+ if (supported_ & (1u << PPC_10_PLUS)) supported_ |= (1u << PPC_9_PLUS);
+ if (supported_ & (1u << PPC_9_PLUS)) supported_ |= (1u << PPC_8_PLUS);
+ if (supported_ & (1u << PPC_8_PLUS)) supported_ |= (1u << PPC_7_PLUS);
+ if (supported_ & (1u << PPC_7_PLUS)) supported_ |= (1u << PPC_6_PLUS);
// Set a static value on whether Simd is supported.
// This variable is only used for certain archs to query SupportWasmSimd128()
@@ -149,12 +117,11 @@ void CpuFeatures::PrintTarget() {
}
void CpuFeatures::PrintFeatures() {
- printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
- printf("FPR_GPR_MOV=%d\n", CpuFeatures::IsSupported(FPR_GPR_MOV));
- printf("LWSYNC=%d\n", CpuFeatures::IsSupported(LWSYNC));
- printf("ISELECT=%d\n", CpuFeatures::IsSupported(ISELECT));
- printf("VSX=%d\n", CpuFeatures::IsSupported(VSX));
- printf("MODULO=%d\n", CpuFeatures::IsSupported(MODULO));
+ printf("PPC_6_PLUS=%d\n", CpuFeatures::IsSupported(PPC_6_PLUS));
+ printf("PPC_7_PLUS=%d\n", CpuFeatures::IsSupported(PPC_7_PLUS));
+ printf("PPC_8_PLUS=%d\n", CpuFeatures::IsSupported(PPC_8_PLUS));
+ printf("PPC_9_PLUS=%d\n", CpuFeatures::IsSupported(PPC_9_PLUS));
+ printf("PPC_10_PLUS=%d\n", CpuFeatures::IsSupported(PPC_10_PLUS));
}
Register ToRegister(int num) {
@@ -868,6 +835,10 @@ void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
}
+void Assembler::mulli(Register dst, Register src, const Operand& imm) {
+ d_form(MULLI, dst, src, imm.immediate(), true);
+}
+
// Multiply hi word
void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
@@ -1649,6 +1620,12 @@ void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
rc);
}
+void Assembler::fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, RCBit rc) {
+ emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
+ rc);
+}
+
void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frb, RCBit rc) {
a_form(EXT4 | FDIV, frt, fra, frb, rc);
@@ -1954,6 +1931,18 @@ bool Assembler::IsNop(Instr instr, int type) {
return instr == (ORI | reg * B21 | reg * B16);
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ // TODO(v8:11872) This function should never be called if Sparkplug on heap
+ // compilation is not supported.
+ UNREACHABLE();
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ // TODO(v8:11872) This function should never be called if Sparkplug on heap
+ // compilation is not supported.
+ UNREACHABLE();
+}
+
void Assembler::GrowBuffer(int needed) {
DCHECK_EQ(buffer_start_, buffer_->start());
@@ -2003,7 +1992,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint32_t*>(pc_) = data;
@@ -2013,7 +2003,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint64_t*>(pc_) = value;
@@ -2023,7 +2014,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uintptr_t*>(pc_) = data;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 37d5674078..f46090cec5 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -196,6 +196,15 @@ class Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
void MaybeEmitOutOfLineConstantPool() { EmitConstantPool(); }
inline void CheckTrampolinePoolQuick(int extra_space = 0) {
@@ -454,25 +463,37 @@ class Assembler : public AssemblerBase {
PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
#undef DECLARE_PPC_XX2_INSTRUCTIONS
-#define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
- inline void name(const Simd128Register rt, const Simd128Register ra, \
- const Simd128Register rb) { \
- xx3_form(instr_name, rt, ra, rb); \
- }
-
- inline void xx3_form(Instr instr, Simd128Register t, Simd128Register a,
- Simd128Register b) {
- // Using VR (high VSR) registers.
- int AX = 1;
- int BX = 1;
- int TX = 1;
+#define DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb) { \
+ xx3_form(instr_name, rt, ra, rb); \
+ }
+#define DECLARE_PPC_XX3_SCALAR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const DoubleRegister rt, const DoubleRegister ra, \
+ const DoubleRegister rb) { \
+ xx3_form(instr_name, rt, ra, rb); \
+ }
+
+ template <typename T>
+ inline void xx3_form(Instr instr, T t, T a, T b) {
+ static_assert(std::is_same<T, Simd128Register>::value ||
+ std::is_same<T, DoubleRegister>::value,
+ "VSX only uses FP or Vector registers.");
+ // Using FP (low VSR) registers.
+ int AX = 0, BX = 0, TX = 0;
+ // Using VR (high VSR) registers when Simd registers are used.
+ if (std::is_same<T, Simd128Register>::value) {
+ AX = BX = TX = 1;
+ }
emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
(b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
}
- PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
-#undef DECLARE_PPC_XX3_INSTRUCTIONS
+ PPC_XX3_OPCODE_VECTOR_LIST(DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS)
+ PPC_XX3_OPCODE_SCALAR_LIST(DECLARE_PPC_XX3_SCALAR_INSTRUCTIONS)
+#undef DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS
+#undef DECLARE_PPC_XX3_SCALAR_INSTRUCTIONS
#define DECLARE_PPC_VX_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
inline void name(const Simd128Register rt, const Simd128Register rb, \
@@ -492,6 +513,15 @@ class Assembler : public AssemblerBase {
inline void name(const Simd128Register rt, const Operand& imm) { \
vx_form(instr_name, rt, imm); \
}
+#define DECLARE_PPC_VX_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
+ inline void name(const Register rt, const Simd128Register rb) { \
+ vx_form(instr_name, rt, rb); \
+ }
+#define DECLARE_PPC_VX_INSTRUCTIONS_G_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Register rb, \
+ const Operand& imm) { \
+ vx_form(instr_name, rt, rb, imm); \
+ }
inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb,
const Operand& imm) {
@@ -509,6 +539,14 @@ class Assembler : public AssemblerBase {
inline void vx_form(Instr instr, Simd128Register rt, const Operand& imm) {
emit(instr | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0x1F) * B16);
}
+ inline void vx_form(Instr instr, Register rt, Simd128Register rb) {
+ emit(instr | (rt.code() & 0x1F) * B21 | (rb.code() & 0x1F) * B11);
+ }
+ inline void vx_form(Instr instr, Simd128Register rt, Register rb,
+ const Operand& imm) {
+ emit(instr | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0x1F) * B16 |
+ (rb.code() & 0x1F) * B11);
+ }
PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
PPC_VX_OPCODE_B_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_B_FORM)
@@ -517,10 +555,14 @@ class Assembler : public AssemblerBase {
DECLARE_PPC_VX_INSTRUCTIONS_C_FORM) /* OPCODE_D_FORM can use
INSTRUCTIONS_C_FORM */
PPC_VX_OPCODE_E_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_E_FORM)
+ PPC_VX_OPCODE_F_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_F_FORM)
+ PPC_VX_OPCODE_G_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_G_FORM)
#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_C_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_E_FORM
+#undef DECLARE_PPC_VX_INSTRUCTIONS_F_FORM
+#undef DECLARE_PPC_VX_INSTRUCTIONS_G_FORM
#define DECLARE_PPC_VA_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
inline void name(const Simd128Register rt, const Simd128Register ra, \
@@ -565,6 +607,7 @@ class Assembler : public AssemblerBase {
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
// Branch instructions
void bclr(BOfield bo, int condition_bit, LKBit lk);
@@ -826,6 +869,7 @@ class Assembler : public AssemblerBase {
void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void mulli(Register dst, Register src, const Operand& imm);
void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
@@ -1039,6 +1083,8 @@ class Assembler : public AssemblerBase {
void fmsub(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, RCBit rc = LeaveRC);
// Vector instructions
void mfvsrd(const Register ra, const Simd128Register r);
@@ -1143,8 +1189,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 7dfb3d427d..e7f1ff311d 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -140,69 +140,71 @@ inline Condition NegateCondition(Condition cond) {
// access the various ISA fields.
using Instr = uint32_t;
-#define PPC_XX3_OPCODE_LIST(V) \
- /* VSX Scalar Add Double-Precision */ \
- V(xsadddp, XSADDDP, 0xF0000100) \
- /* VSX Scalar Add Single-Precision */ \
- V(xsaddsp, XSADDSP, 0xF0000000) \
- /* VSX Scalar Compare Ordered Double-Precision */ \
- V(xscmpodp, XSCMPODP, 0xF0000158) \
- /* VSX Scalar Compare Unordered Double-Precision */ \
- V(xscmpudp, XSCMPUDP, 0xF0000118) \
- /* VSX Scalar Copy Sign Double-Precision */ \
- V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
- /* VSX Scalar Divide Double-Precision */ \
- V(xsdivdp, XSDIVDP, 0xF00001C0) \
- /* VSX Scalar Divide Single-Precision */ \
- V(xsdivsp, XSDIVSP, 0xF00000C0) \
- /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
- V(xsmaddadp, XSMADDADP, 0xF0000108) \
- /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
- V(xsmaddasp, XSMADDASP, 0xF0000008) \
- /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
- V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
- /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
- V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
- /* VSX Scalar Maximum Double-Precision */ \
- V(xsmaxdp, XSMAXDP, 0xF0000500) \
- /* VSX Scalar Minimum Double-Precision */ \
- V(xsmindp, XSMINDP, 0xF0000540) \
- /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
- V(xsmsubadp, XSMSUBADP, 0xF0000188) \
- /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
- V(xsmsubasp, XSMSUBASP, 0xF0000088) \
- /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
- V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
- /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
- V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
- /* VSX Scalar Multiply Double-Precision */ \
- V(xsmuldp, XSMULDP, 0xF0000180) \
- /* VSX Scalar Multiply Single-Precision */ \
- V(xsmulsp, XSMULSP, 0xF0000080) \
- /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
- V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
- /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
- V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
- /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
- V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
- /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
- V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
- /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
- V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
- /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
- V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
- /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
- V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
- /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
- V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
- /* VSX Scalar Reciprocal Estimate Double-Precision */ \
- V(xsredp, XSREDP, 0xF0000168) \
- /* VSX Scalar Subtract Double-Precision */ \
- V(xssubdp, XSSUBDP, 0xF0000140) \
- /* VSX Scalar Subtract Single-Precision */ \
- V(xssubsp, XSSUBSP, 0xF0000040) \
- /* VSX Scalar Test for software Divide Double-Precision */ \
- V(xstdivdp, XSTDIVDP, 0xF00001E8) \
+#define PPC_XX3_OPCODE_SCALAR_LIST(V) \
+ /* VSX Scalar Add Double-Precision */ \
+ V(xsadddp, XSADDDP, 0xF0000100) \
+ /* VSX Scalar Add Single-Precision */ \
+ V(xsaddsp, XSADDSP, 0xF0000000) \
+ /* VSX Scalar Compare Ordered Double-Precision */ \
+ V(xscmpodp, XSCMPODP, 0xF0000158) \
+ /* VSX Scalar Compare Unordered Double-Precision */ \
+ V(xscmpudp, XSCMPUDP, 0xF0000118) \
+ /* VSX Scalar Copy Sign Double-Precision */ \
+ V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
+ /* VSX Scalar Divide Double-Precision */ \
+ V(xsdivdp, XSDIVDP, 0xF00001C0) \
+ /* VSX Scalar Divide Single-Precision */ \
+ V(xsdivsp, XSDIVSP, 0xF00000C0) \
+ /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
+ V(xsmaddadp, XSMADDADP, 0xF0000108) \
+ /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
+ V(xsmaddasp, XSMADDASP, 0xF0000008) \
+ /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
+ V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
+ /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
+ V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
+ /* VSX Scalar Maximum Double-Precision */ \
+ V(xsmaxdp, XSMAXDP, 0xF0000500) \
+ /* VSX Scalar Minimum Double-Precision */ \
+ V(xsmindp, XSMINDP, 0xF0000540) \
+ /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
+ V(xsmsubadp, XSMSUBADP, 0xF0000188) \
+ /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
+ V(xsmsubasp, XSMSUBASP, 0xF0000088) \
+ /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
+ V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
+ /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
+ V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
+ /* VSX Scalar Multiply Double-Precision */ \
+ V(xsmuldp, XSMULDP, 0xF0000180) \
+ /* VSX Scalar Multiply Single-Precision */ \
+ V(xsmulsp, XSMULSP, 0xF0000080) \
+ /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
+ V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
+ /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
+ V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
+ /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
+ V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
+ /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
+ V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
+ /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
+ V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
+ /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
+ V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
+ /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
+ V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
+ /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
+ V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
+ /* VSX Scalar Reciprocal Estimate Double-Precision */ \
+ V(xsredp, XSREDP, 0xF0000168) \
+ /* VSX Scalar Subtract Double-Precision */ \
+ V(xssubdp, XSSUBDP, 0xF0000140) \
+ /* VSX Scalar Subtract Single-Precision */ \
+ V(xssubsp, XSSUBSP, 0xF0000040) \
+ /* VSX Scalar Test for software Divide Double-Precision */ \
+ V(xstdivdp, XSTDIVDP, 0xF00001E8)
+
+#define PPC_XX3_OPCODE_VECTOR_LIST(V) \
/* VSX Vector Add Double-Precision */ \
V(xvadddp, XVADDDP, 0xF0000300) \
/* VSX Vector Add Single-Precision */ \
@@ -1172,6 +1174,10 @@ using Instr = uint32_t;
V(cntlzw, CNTLZWX, 0x7C000034) \
/* Count Leading Zeros Doubleword */ \
V(cntlzd, CNTLZDX, 0x7C000074) \
+ /* Count Tailing Zeros Word */ \
+ V(cnttzw, CNTTZWX, 0x7C000434) \
+ /* Count Tailing Zeros Doubleword */ \
+ V(cnttzd, CNTTZDX, 0x7C000474) \
/* Population Count Byte-wise */ \
V(popcntb, POPCNTB, 0x7C0000F4) \
/* Population Count Words */ \
@@ -2309,6 +2315,8 @@ using Instr = uint32_t;
V(vmulosw, VMULOSW, 0x10000188) \
/* Vector Multiply Odd Unsigned Word */ \
V(vmulouw, VMULOUW, 0x10000088) \
+ /* Vector Multiply Low Doubleword */ \
+ V(vmulld, VMULLD, 0x100001C9) \
/* Vector Sum across Quarter Signed Halfword Saturate */ \
V(vsum4shs, VSUM4SHS, 0x10000648) \
/* Vector Pack Unsigned Word Unsigned Saturate */ \
@@ -2454,6 +2462,24 @@ using Instr = uint32_t;
/* Vector Splat Immediate Signed Word */ \
V(vspltisw, VSPLTISW, 0x1000038C)
+#define PPC_VX_OPCODE_F_FORM_LIST(V) \
+ /* Vector Extract Byte Mask */ \
+ V(vextractbm, VEXTRACTBM, 0x10080642) \
+ /* Vector Extract Halfword Mask */ \
+ V(vextracthm, VEXTRACTHM, 0x10090642) \
+ /* Vector Extract Word Mask */ \
+ V(vextractwm, VEXTRACTWM, 0x100A0642) \
+ /* Vector Extract Doubleword Mask */ \
+ V(vextractdm, VEXTRACTDM, 0x100B0642)
+
+#define PPC_VX_OPCODE_G_FORM_LIST(V) \
+ /* Vector Insert Word from GPR using \
+immediate-specified index */ \
+ V(vinsw, VINSW, 0x100000CF) \
+ /* Vector Insert Doubleword from GPR using \
+immediate-specified index */ \
+ V(vinsd, VINSD, 0x100001CF)
+
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
@@ -2606,6 +2632,8 @@ using Instr = uint32_t;
PPC_VX_OPCODE_C_FORM_LIST(V) \
PPC_VX_OPCODE_D_FORM_LIST(V) \
PPC_VX_OPCODE_E_FORM_LIST(V) \
+ PPC_VX_OPCODE_F_FORM_LIST(V) \
+ PPC_VX_OPCODE_G_FORM_LIST(V) \
PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_XS_OPCODE_LIST(V) \
@@ -2653,7 +2681,8 @@ using Instr = uint32_t;
PPC_VC_OPCODE_LIST(V) \
PPC_XX1_OPCODE_LIST(V) \
PPC_XX2_OPCODE_LIST(V) \
- PPC_XX3_OPCODE_LIST(V) \
+ PPC_XX3_OPCODE_VECTOR_LIST(V) \
+ PPC_XX3_OPCODE_SCALAR_LIST(V) \
PPC_XX4_OPCODE_LIST(V)
enum Opcode : uint32_t {
@@ -2944,6 +2973,7 @@ class Instruction {
opcode = extcode | BitField(20, 16) | BitField(10, 0);
switch (opcode) {
PPC_VX_OPCODE_D_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_F_FORM_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 0);
@@ -2952,6 +2982,7 @@ class Instruction {
PPC_VX_OPCODE_B_FORM_LIST(OPCODE_CASES)
PPC_VX_OPCODE_C_FORM_LIST(OPCODE_CASES)
PPC_VX_OPCODE_E_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_G_FORM_LIST(OPCODE_CASES)
PPC_VX_OPCODE_UNUSED_LIST(OPCODE_CASES)
PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
@@ -3002,7 +3033,8 @@ class Instruction {
opcode = extcode | BitField(10, 3);
switch (opcode) {
PPC_EVS_OPCODE_LIST(OPCODE_CASES)
- PPC_XX3_OPCODE_LIST(OPCODE_CASES)
+ PPC_XX3_OPCODE_VECTOR_LIST(OPCODE_CASES)
+ PPC_XX3_OPCODE_SCALAR_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(8, 1);
diff --git a/deps/v8/src/codegen/ppc/cpu-ppc.cc b/deps/v8/src/codegen/ppc/cpu-ppc.cc
index 9559af7778..1ded190f05 100644
--- a/deps/v8/src/codegen/ppc/cpu-ppc.cc
+++ b/deps/v8/src/codegen/ppc/cpu-ppc.cc
@@ -8,7 +8,7 @@
#include "src/codegen/cpu-features.h"
-#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
+#define INSTR_AND_DATA_CACHE_COHERENCY PPC_6_PLUS
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 03a197f9fb..f243055490 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -16,6 +16,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
@@ -312,7 +313,7 @@ void TurboAssembler::Drop(int count) {
}
void TurboAssembler::Drop(Register count, Register scratch) {
- ShiftLeftImm(scratch, count, Operand(kSystemPointerSizeLog2));
+ ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
add(sp, sp, scratch);
}
@@ -335,7 +336,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
if (order == kNormal) {
cmpi(size, Operand::Zero());
beq(&done);
- ShiftLeftImm(scratch, size, Operand(kSystemPointerSizeLog2));
+ ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
add(scratch, array, scratch);
mtctr(size);
@@ -1010,16 +1011,16 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
blt(&less_than_32);
// If shift >= 32
andi(scratch, shift, Operand(0x1F));
- slw(dst_high, src_low, scratch);
+ ShiftLeftU32(dst_high, src_low, scratch);
li(dst_low, Operand::Zero());
b(&done);
bind(&less_than_32);
// If shift < 32
subfic(scratch, shift, Operand(32));
- slw(dst_high, src_high, shift);
+ ShiftLeftU32(dst_high, src_high, shift);
srw(scratch, src_low, scratch);
orx(dst_high, dst_high, scratch);
- slw(dst_low, src_low, shift);
+ ShiftLeftU32(dst_low, src_low, shift);
bind(&done);
}
@@ -1033,15 +1034,15 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
li(dst_low, Operand::Zero());
} else if (shift > 32) {
shift &= 0x1F;
- slwi(dst_high, src_low, Operand(shift));
+ ShiftLeftU32(dst_high, src_low, Operand(shift));
li(dst_low, Operand::Zero());
} else if (shift == 0) {
Move(dst_low, src_low);
Move(dst_high, src_high);
} else {
- slwi(dst_high, src_high, Operand(shift));
+ ShiftLeftU32(dst_high, src_high, Operand(shift));
rlwimi(dst_high, src_low, shift, 32 - shift, 31);
- slwi(dst_low, src_low, Operand(shift));
+ ShiftLeftU32(dst_low, src_low, Operand(shift));
}
}
@@ -1064,7 +1065,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
// If shift < 32
subfic(scratch, shift, Operand(32));
srw(dst_low, src_low, shift);
- slw(scratch, src_high, scratch);
+ ShiftLeftU32(scratch, src_high, scratch);
orx(dst_low, dst_low, scratch);
srw(dst_high, src_high, shift);
bind(&done);
@@ -1110,7 +1111,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
// If shift < 32
subfic(scratch, shift, Operand(32));
srw(dst_low, src_low, shift);
- slw(scratch, src_high, scratch);
+ ShiftLeftU32(scratch, src_high, scratch);
orx(dst_low, dst_low, scratch);
sraw(dst_high, src_high, shift);
bind(&done);
@@ -1197,6 +1198,47 @@ void TurboAssembler::Prologue() {
}
}
+void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ int receiver_bytes =
+ (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
+ switch (type) {
+ case kCountIsInteger: {
+ ShiftLeftU64(ip, count, Operand(kSystemPointerSizeLog2));
+ add(sp, sp, ip);
+ break;
+ }
+ case kCountIsSmi: {
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ SmiToPtrArrayOffset(count, count);
+ add(sp, sp, count);
+ break;
+ }
+ case kCountIsBytes: {
+ add(sp, sp, count);
+ break;
+ }
+ }
+ if (receiver_bytes != 0) {
+ addi(sp, sp, Operand(receiver_bytes));
+ }
+}
+
+void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+ Register receiver,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(argc, receiver));
+ if (mode == kCountExcludesReceiver) {
+ // Drop arguments without receiver and override old receiver.
+ DropArguments(argc, type, kCountIncludesReceiver);
+ StoreU64(receiver, MemOperand(sp));
+ } else {
+ DropArguments(argc, type, mode);
+ push(receiver);
+ }
+}
+
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
@@ -1369,7 +1411,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count.is_valid()) {
if (!argument_count_is_length) {
- ShiftLeftImm(argument_count, argument_count,
+ ShiftLeftU64(argument_count, argument_count,
Operand(kSystemPointerSizeLog2));
}
add(sp, sp, argument_count);
@@ -1384,54 +1426,6 @@ void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d1);
}
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
-
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kSystemPointerSize to count the
- // receiver argument which is not included into formal parameters count.
- Register dst_reg = scratch0;
- ShiftLeftImm(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
- add(dst_reg, fp, dst_reg);
- AddS64(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize),
- scratch0);
-
- Register src_reg = caller_args_count;
- // Calculate the end of source area. +kSystemPointerSize is for the receiver.
- ShiftLeftImm(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
- add(src_reg, sp, src_reg);
- AddS64(src_reg, src_reg, Operand(kSystemPointerSize), scratch0);
-
- if (FLAG_debug_code) {
- CmpU64(src_reg, dst_reg);
- Check(lt, AbortReason::kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- RestoreFrameStateForTailCall();
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch1;
- Label loop;
- addi(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
- mtctr(tmp_reg);
- bind(&loop);
- LoadU64WithUpdate(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
- StoreU64WithUpdate(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
- bdnz(&loop);
-
- // Leave current frame.
- mr(sp, dst_reg);
-}
-
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
@@ -1457,7 +1451,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
// here which will cause scratch to become negative.
sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- ShiftLeftImm(r0, num_args, Operand(kSystemPointerSizeLog2));
+ ShiftLeftU64(r0, num_args, Operand(kSystemPointerSizeLog2));
CmpS64(scratch, r0);
ble(stack_overflow); // Signed comparison.
}
@@ -1496,7 +1490,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Label copy;
Register src = r9, dest = r8;
addi(src, sp, Operand(-kSystemPointerSize));
- ShiftLeftImm(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2));
+ ShiftLeftU64(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2));
sub(sp, sp, r0);
// Update stack pointer.
addi(dest, sp, Operand(-kSystemPointerSize));
@@ -1815,6 +1809,10 @@ void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs,
Label check_zero, return_left, return_right, return_nan, done;
fcmpu(lhs, rhs);
bunordered(&return_nan);
+ if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
+ xsmindp(dst, lhs, rhs);
+ b(&done);
+ }
beq(&check_zero);
ble(&return_left);
b(&return_right);
@@ -1859,6 +1857,10 @@ void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
Label check_zero, return_left, return_right, return_nan, done;
fcmpu(lhs, rhs);
bunordered(&return_nan);
+ if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
+ xsmaxdp(dst, lhs, rhs);
+ b(&done);
+ }
beq(&check_zero);
bge(&return_left);
b(&return_right);
@@ -2389,8 +2391,9 @@ void TurboAssembler::CheckPageFlag(
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
+ DCHECK(scratch != r0);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
- LoadU64(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
+ LoadU64(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset), r0);
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
@@ -2449,7 +2452,7 @@ void TurboAssembler::LoadDoubleLiteral(DoubleRegister result,
litVal.dval = value.AsUint64();
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mov(scratch, Operand(litVal.ival));
mtfprd(result, scratch);
return;
@@ -2475,7 +2478,7 @@ void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// sign-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mtfprwa(dst, src);
return;
}
@@ -2500,7 +2503,7 @@ void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// zero-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mtfprwz(dst, src);
return;
}
@@ -2527,7 +2530,7 @@ void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
#endif
Register src) {
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mtfprd(dst, src);
return;
}
@@ -2550,8 +2553,8 @@ void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
Register src_hi,
Register src_lo,
Register scratch) {
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
- sldi(scratch, src_hi, Operand(32));
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
+ ShiftLeftU64(scratch, src_hi, Operand(32));
rldimi(scratch, src_lo, 0, 32);
mtfprd(dst, scratch);
return;
@@ -2569,7 +2572,7 @@ void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mffprd(scratch, dst);
rldimi(scratch, src, 0, 32);
mtfprd(dst, scratch);
@@ -2588,7 +2591,7 @@ void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mffprd(scratch, dst);
rldimi(scratch, src, 32, 0);
mtfprd(dst, scratch);
@@ -2606,7 +2609,7 @@ void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mffprwz(dst, src);
return;
}
@@ -2621,7 +2624,7 @@ void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mffprd(dst, src);
srdi(dst, dst, Operand(32));
return;
@@ -2641,7 +2644,7 @@ void TurboAssembler::MovDoubleToInt64(
#endif
Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
- if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mffprd(dst, src);
return;
}
@@ -2705,6 +2708,198 @@ void TurboAssembler::SubS64(Register dst, Register src, const Operand& value,
}
}
+void TurboAssembler::AddS32(Register dst, Register src, Register value,
+ RCBit r) {
+ AddS64(dst, src, value, LeaveOE, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::AddS32(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ AddS64(dst, src, value, scratch, LeaveOE, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::SubS32(Register dst, Register src, Register value,
+ RCBit r) {
+ SubS64(dst, src, value, LeaveOE, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::SubS32(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ SubS64(dst, src, value, scratch, LeaveOE, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::MulS64(Register dst, Register src, const Operand& value,
+ Register scratch, OEBit s, RCBit r) {
+ if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
+ mulli(dst, src, value);
+ } else {
+ mov(scratch, value);
+ mulld(dst, src, scratch, s, r);
+ }
+}
+
+void TurboAssembler::MulS64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ mulld(dst, src, value, s, r);
+}
+
+void TurboAssembler::MulS32(Register dst, Register src, const Operand& value,
+ Register scratch, OEBit s, RCBit r) {
+ MulS64(dst, src, value, scratch, s, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ MulS64(dst, src, value, s, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::AndU64(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ if (is_uint16(value.immediate()) && r == SetRC) {
+ andi(dst, src, value);
+ } else {
+ mov(scratch, value);
+ and_(dst, src, scratch, r);
+ }
+}
+
+void TurboAssembler::AndU64(Register dst, Register src, Register value,
+ RCBit r) {
+ and_(dst, src, value, r);
+}
+
+void TurboAssembler::OrU64(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ if (is_int16(value.immediate()) && r == LeaveRC) {
+ ori(dst, src, value);
+ } else {
+ mov(scratch, value);
+ orx(dst, src, scratch, r);
+ }
+}
+
+void TurboAssembler::OrU64(Register dst, Register src, Register value,
+ RCBit r) {
+ orx(dst, src, value, r);
+}
+
+void TurboAssembler::XorU64(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ if (is_int16(value.immediate()) && r == LeaveRC) {
+ xori(dst, src, value);
+ } else {
+ mov(scratch, value);
+ xor_(dst, src, scratch, r);
+ }
+}
+
+void TurboAssembler::XorU64(Register dst, Register src, Register value,
+ RCBit r) {
+ xor_(dst, src, value, r);
+}
+
+void TurboAssembler::AndU32(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ AndU64(dst, src, value, scratch, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::AndU32(Register dst, Register src, Register value,
+ RCBit r) {
+ AndU64(dst, src, value, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::OrU32(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ OrU64(dst, src, value, scratch, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::OrU32(Register dst, Register src, Register value,
+ RCBit r) {
+ OrU64(dst, src, value, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::XorU32(Register dst, Register src, const Operand& value,
+ Register scratch, RCBit r) {
+ XorU64(dst, src, value, scratch, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::XorU32(Register dst, Register src, Register value,
+ RCBit r) {
+ XorU64(dst, src, value, r);
+ extsw(dst, dst, r);
+}
+
+void TurboAssembler::ShiftLeftU64(Register dst, Register src,
+ const Operand& value, RCBit r) {
+ sldi(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightU64(Register dst, Register src,
+ const Operand& value, RCBit r) {
+ srdi(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightS64(Register dst, Register src,
+ const Operand& value, RCBit r) {
+ sradi(dst, src, value.immediate(), r);
+}
+
+void TurboAssembler::ShiftLeftU32(Register dst, Register src,
+ const Operand& value, RCBit r) {
+ slwi(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightU32(Register dst, Register src,
+ const Operand& value, RCBit r) {
+ srwi(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightS32(Register dst, Register src,
+ const Operand& value, RCBit r) {
+ srawi(dst, src, value.immediate(), r);
+}
+
+void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register value,
+ RCBit r) {
+ sld(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightU64(Register dst, Register src, Register value,
+ RCBit r) {
+ srd(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightS64(Register dst, Register src, Register value,
+ RCBit r) {
+ srad(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register value,
+ RCBit r) {
+ slw(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightU32(Register dst, Register src, Register value,
+ RCBit r) {
+ srw(dst, src, value, r);
+}
+
+void TurboAssembler::ShiftRightS32(Register dst, Register src, Register value,
+ RCBit r) {
+ sraw(dst, src, value, r);
+}
+
void TurboAssembler::CmpS64(Register src1, Register src2, CRegister cr) {
cmp(src1, src2, cr);
}
@@ -2765,54 +2960,48 @@ void TurboAssembler::CmpU32(Register src1, Register src2, CRegister cr) {
cmplw(src1, src2, cr);
}
-void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
- RCBit rc) {
- if (rb.is_reg()) {
- and_(ra, rs, rb.rm(), rc);
- } else {
- if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
- rc == SetRC) {
- andi(ra, rs, rb);
- } else {
- // mov handles the relocation.
- DCHECK(rs != r0);
- mov(r0, rb);
- and_(ra, rs, r0, rc);
- }
- }
+void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fadd(dst, lhs, rhs, r);
}
-void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
- if (rb.is_reg()) {
- orx(ra, rs, rb.rm(), rc);
- } else {
- if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
- rc == LeaveRC) {
- ori(ra, rs, rb);
- } else {
- // mov handles the relocation.
- DCHECK(rs != r0);
- mov(r0, rb);
- orx(ra, rs, r0, rc);
- }
- }
+void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fsub(dst, lhs, rhs, r);
}
-void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
- RCBit rc) {
- if (rb.is_reg()) {
- xor_(ra, rs, rb.rm(), rc);
- } else {
- if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
- rc == LeaveRC) {
- xori(ra, rs, rb);
- } else {
- // mov handles the relocation.
- DCHECK(rs != r0);
- mov(r0, rb);
- xor_(ra, rs, r0, rc);
- }
- }
+void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fmul(dst, lhs, rhs, r);
+}
+
+void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fdiv(dst, lhs, rhs, r);
+}
+
+void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fadd(dst, lhs, rhs, r);
+ frsp(dst, dst, r);
+}
+
+void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fsub(dst, lhs, rhs, r);
+ frsp(dst, dst, r);
+}
+
+void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fmul(dst, lhs, rhs, r);
+ frsp(dst, dst, r);
+}
+
+void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fdiv(dst, lhs, rhs, r);
+ frsp(dst, dst, r);
}
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
@@ -2858,7 +3047,7 @@ void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
Register scratch, RCBit rc) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- And(dst, src, Operand(smi), rc);
+ AndU64(dst, src, Operand(smi), scratch, rc);
#else
LoadSmiLiteral(scratch, smi);
and_(dst, src, scratch, rc);
@@ -3285,11 +3474,11 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
// The builtin_index register contains the builtin index as a Smi.
if (SmiValuesAre32Bits()) {
- ShiftRightArithImm(builtin_index, builtin_index,
- kSmiShift - kSystemPointerSizeLog2);
+ ShiftRightS64(builtin_index, builtin_index,
+ Operand(kSmiShift - kSystemPointerSizeLog2));
} else {
DCHECK(SmiValuesAre31Bits());
- ShiftLeftImm(builtin_index, builtin_index,
+ ShiftLeftU64(builtin_index, builtin_index,
Operand(kSystemPointerSizeLog2 - kSmiShift));
}
AddS64(builtin_index, builtin_index,
@@ -3340,7 +3529,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
bind(&if_code_is_off_heap);
LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset),
r0);
- ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
+ ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
LoadU64(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()),
@@ -3434,6 +3623,24 @@ void TurboAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); }
void TurboAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); }
+void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) {
+ cntlzw(dst, src, r);
+}
+
+void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) {
+ cntlzd(dst, src, r);
+}
+
+void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
+ RCBit r) {
+ cnttzw(dst, src, r);
+}
+
+void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
+ RCBit r) {
+ cnttzd(dst, src, r);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index bae3b4732c..035c29b1e5 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -38,23 +38,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_PPC64
-#define ShiftLeftImm sldi
-#define ShiftRightImm srdi
#define ClearLeftImm clrldi
#define ClearRightImm clrrdi
-#define ShiftRightArithImm sradi
-#define ShiftLeft_ sld
-#define ShiftRight_ srd
-#define ShiftRightArith srad
#else
-#define ShiftLeftImm slwi
-#define ShiftRightImm srwi
#define ClearLeftImm clrlwi
#define ClearRightImm clrrwi
-#define ShiftRightArithImm srawi
-#define ShiftLeft_ slw
-#define ShiftRight_ srw
-#define ShiftRightArith sraw
#endif
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
@@ -124,6 +112,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StubPrologue(StackFrame::Type type);
void Prologue();
+ enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
+ enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
+ void DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode);
+ void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode);
+
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function
void PushStandardFrame(Register function_reg);
@@ -191,6 +187,86 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
void SubS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
RCBit r = LeaveRC);
+ void AddS32(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = LeaveRC);
+ void AddS32(Register dst, Register src, Register value, RCBit r = LeaveRC);
+ void SubS32(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = LeaveRC);
+ void SubS32(Register dst, Register src, Register value, RCBit r = LeaveRC);
+ void MulS64(Register dst, Register src, const Operand& value,
+ Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
+ void MulS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void MulS32(Register dst, Register src, const Operand& value,
+ Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
+ void MulS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void AndU64(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = SetRC);
+ void AndU64(Register dst, Register src, Register value, RCBit r = SetRC);
+ void OrU64(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = SetRC);
+ void OrU64(Register dst, Register src, Register value, RCBit r = LeaveRC);
+ void XorU64(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = SetRC);
+ void XorU64(Register dst, Register src, Register value, RCBit r = LeaveRC);
+ void AndU32(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = SetRC);
+ void AndU32(Register dst, Register src, Register value, RCBit r = SetRC);
+ void OrU32(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = SetRC);
+ void OrU32(Register dst, Register src, Register value, RCBit r = LeaveRC);
+ void XorU32(Register dst, Register src, const Operand& value,
+ Register scratch = r0, RCBit r = SetRC);
+ void XorU32(Register dst, Register src, Register value, RCBit r = LeaveRC);
+
+ void ShiftLeftU64(Register dst, Register src, const Operand& value,
+ RCBit r = LeaveRC);
+ void ShiftRightU64(Register dst, Register src, const Operand& value,
+ RCBit r = LeaveRC);
+ void ShiftRightS64(Register dst, Register src, const Operand& value,
+ RCBit r = LeaveRC);
+ void ShiftLeftU32(Register dst, Register src, const Operand& value,
+ RCBit r = LeaveRC);
+ void ShiftRightU32(Register dst, Register src, const Operand& value,
+ RCBit r = LeaveRC);
+ void ShiftRightS32(Register dst, Register src, const Operand& value,
+ RCBit r = LeaveRC);
+ void ShiftLeftU64(Register dst, Register src, Register value,
+ RCBit r = LeaveRC);
+ void ShiftRightU64(Register dst, Register src, Register value,
+ RCBit r = LeaveRC);
+ void ShiftRightS64(Register dst, Register src, Register value,
+ RCBit r = LeaveRC);
+ void ShiftLeftU32(Register dst, Register src, Register value,
+ RCBit r = LeaveRC);
+ void ShiftRightU32(Register dst, Register src, Register value,
+ RCBit r = LeaveRC);
+ void ShiftRightS32(Register dst, Register src, Register value,
+ RCBit r = LeaveRC);
+
+ void CountLeadingZerosU32(Register dst, Register src, RCBit r = LeaveRC);
+ void CountLeadingZerosU64(Register dst, Register src, RCBit r = LeaveRC);
+ void CountTrailingZerosU32(Register dst, Register src, RCBit r = LeaveRC);
+ void CountTrailingZerosU64(Register dst, Register src, RCBit r = LeaveRC);
+
+ void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+ void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+ void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+ void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+ void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+ void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+ void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+ void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
void Push(Register src) { push(src); }
// Push a handle.
@@ -356,10 +432,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
@@ -507,7 +579,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
if (COMPRESS_POINTERS_BOOL) {
srawi(dst, src, kSmiShift, rc);
} else {
- ShiftRightArithImm(dst, src, kSmiShift, rc);
+ ShiftRightS64(dst, src, Operand(kSmiShift), rc);
}
}
@@ -678,6 +750,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Pointer compression Support
+ void SmiToPtrArrayOffset(Register dst, Register src) {
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
+ ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
+ ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
+#endif
+ }
+
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
@@ -836,10 +918,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// load a literal double value <value> to FPR <result>
- void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
- void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
- void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
-
void AddSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
void SubSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
void CmpSmiLiteral(Register src1, Smi smi, Register scratch,
@@ -1011,17 +1089,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Shift left by kSmiShift
void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
- ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
- }
-
- void SmiToPtrArrayOffset(Register dst, Register src) {
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
- ShiftLeftImm(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
-#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
- ShiftRightArithImm(dst, src, kSmiShift - kSystemPointerSizeLog2);
-#endif
+ ShiftLeftU64(dst, src, Operand(kSmiShift), rc);
}
// Jump if either of the registers contain a non-smi.
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 14011fb9e3..0693d32459 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -157,7 +157,8 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteShortData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode)) {
+ RelocInfo::IsDeoptPosition(rmode) ||
+ RelocInfo::IsDeoptNodeId(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
@@ -244,7 +245,8 @@ void RelocIterator::next() {
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode)) {
+ RelocInfo::IsDeoptPosition(rmode) ||
+ RelocInfo::IsDeoptNodeId(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@@ -422,6 +424,10 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "deopt reason";
case DEOPT_ID:
return "deopt index";
+ case LITERAL_CONSTANT:
+ return "literal constant";
+ case DEOPT_NODE_ID:
+ return "deopt node id";
case CONST_POOL:
return "constant pool";
case VENEER_POOL:
@@ -525,6 +531,8 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_INLINING_ID:
case DEOPT_REASON:
case DEOPT_ID:
+ case LITERAL_CONSTANT:
+ case DEOPT_NODE_ID:
case CONST_POOL:
case VENEER_POOL:
case WASM_CALL:
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index f2a2d04523..918c93b13f 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -88,6 +88,10 @@ class RelocInfo {
DEOPT_INLINING_ID, // Deoptimization source position.
DEOPT_REASON, // Deoptimization reason index.
DEOPT_ID, // Deoptimization inlining id.
+ DEOPT_NODE_ID, // Id of the node that caused deoptimization. This
+ // information is only recorded in debug builds.
+
+ LITERAL_CONSTANT, // An constant embedded in the instruction stream.
// This is not an actual reloc mode, but used to encode a long pc jump that
// cannot be encoded as part of another record.
@@ -169,6 +173,12 @@ class RelocInfo {
return mode == DEOPT_REASON;
}
static constexpr bool IsDeoptId(Mode mode) { return mode == DEOPT_ID; }
+ static constexpr bool IsLiteralConstant(Mode mode) {
+ return mode == LITERAL_CONSTANT;
+ }
+ static constexpr bool IsDeoptNodeId(Mode mode) {
+ return mode == DEOPT_NODE_ID;
+ }
static constexpr bool IsExternalReference(Mode mode) {
return mode == EXTERNAL_REFERENCE;
}
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index 3875a93158..0c322542a9 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -227,6 +227,9 @@ Assembler::Assembler(const AssemblerOptions& options,
block_buffer_growth_ = false;
}
+void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
+Assembler::~Assembler() { CHECK(constpool_.IsEmpty()); }
+
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
@@ -272,7 +275,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
- nop();
+ NOP();
}
}
@@ -1126,9 +1129,9 @@ void Assembler::GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1,
}
void Assembler::GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode,
- Register rs1, uint8_t uimm6) {
- DCHECK(is_uint3(funct3) && is_uint2(funct2) && is_uint6(uimm6));
- ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) | ((uimm6 & 0x20) << 7) |
+ Register rs1, int8_t imm6) {
+ DCHECK(is_uint3(funct3) && is_uint2(funct2) && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | ((imm6 & 0x20) << 7) |
((rs1.code() & 0x7) << kRvcRs1sShift) |
(funct3 << kRvcFunct3Shift) | (funct2 << 10);
emit(instr);
@@ -1264,7 +1267,10 @@ uint64_t Assembler::jump_address(Label* L) {
}
}
uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
- DCHECK_EQ(imm & 3, 0);
+ if (FLAG_riscv_c_extension)
+ DCHECK_EQ(imm & 1, 0);
+ else
+ DCHECK_EQ(imm & 3, 0);
return imm;
}
@@ -1292,7 +1298,10 @@ uint64_t Assembler::branch_long_offset(Label* L) {
}
}
int64_t offset = target_pos - pc_offset();
- DCHECK_EQ(offset & 3, 0);
+ if (FLAG_riscv_c_extension)
+ DCHECK_EQ(offset & 1, 0);
+ else
+ DCHECK_EQ(offset & 3, 0);
return static_cast<uint64_t>(offset);
}
@@ -2104,9 +2113,9 @@ void Assembler::c_lui(Register rd, int8_t imm6) {
GenInstrCI(0b011, C1, rd, imm6);
}
-void Assembler::c_slli(Register rd, uint8_t uimm6) {
- DCHECK(rd != zero_reg && uimm6 != 0);
- GenInstrCIU(0b000, C2, rd, uimm6);
+void Assembler::c_slli(Register rd, uint8_t shamt6) {
+ DCHECK(rd != zero_reg && shamt6 != 0);
+ GenInstrCIU(0b000, C2, rd, shamt6);
}
void Assembler::c_fldsp(FPURegister rd, uint16_t uimm9) {
@@ -2210,7 +2219,8 @@ void Assembler::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
void Assembler::c_lw(Register rd, Register rs1, uint16_t uimm7) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
- ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7));
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
uint8_t uimm5 =
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
GenInstrCL(0b010, C0, rd, rs1, uimm5);
@@ -2218,14 +2228,16 @@ void Assembler::c_lw(Register rd, Register rs1, uint16_t uimm7) {
void Assembler::c_ld(Register rd, Register rs1, uint16_t uimm8) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
- ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCL(0b011, C0, rd, rs1, uimm5);
}
void Assembler::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
- ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCL(0b001, C0, rd, rs1, uimm5);
}
@@ -2234,7 +2246,8 @@ void Assembler::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
void Assembler::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
- ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7));
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
uint8_t uimm5 =
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
GenInstrCS(0b110, C0, rs2, rs1, uimm5);
@@ -2242,14 +2255,16 @@ void Assembler::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
void Assembler::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
- ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCS(0b111, C0, rs2, rs1, uimm5);
}
void Assembler::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
- ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCS(0b101, C0, rs2, rs1, uimm5);
}
@@ -2282,19 +2297,35 @@ void Assembler::c_beqz(Register rs1, int16_t imm9) {
GenInstrCB(0b110, C1, rs1, uimm8);
}
-void Assembler::c_srli(Register rs1, uint8_t uimm6) {
- DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
- GenInstrCBA(0b100, 0b00, C1, rs1, uimm6);
+void Assembler::c_srli(Register rs1, int8_t shamt6) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b00, C1, rs1, shamt6);
+}
+
+void Assembler::c_srai(Register rs1, int8_t shamt6) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b01, C1, rs1, shamt6);
+}
+
+void Assembler::c_andi(Register rs1, int8_t imm6) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
+ GenInstrCBA(0b100, 0b10, C1, rs1, imm6);
}
-void Assembler::c_srai(Register rs1, uint8_t uimm6) {
- DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
- GenInstrCBA(0b100, 0b01, C1, rs1, uimm6);
+// Definitions for using compressed vs non compressed
+
+void Assembler::NOP() {
+ if (FLAG_riscv_c_extension)
+ c_nop();
+ else
+ nop();
}
-void Assembler::c_andi(Register rs1, uint8_t uimm6) {
- DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
- GenInstrCBA(0b100, 0b10, C1, rs1, uimm6);
+void Assembler::EBREAK() {
+ if (FLAG_riscv_c_extension)
+ c_ebreak();
+ else
+ ebreak();
}
// Privileged
@@ -2755,8 +2786,28 @@ void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
}
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ set_target_value_at(address, object->ptr());
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ set_target_value_at(address, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
void Assembler::GrowBuffer() {
DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_);
+ bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
+
// Compute new buffer size.
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
@@ -2798,6 +2849,16 @@ void Assembler::GrowBuffer() {
RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
}
}
+
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
+ }
+ }
+
DCHECK(!overflow());
}
@@ -2809,7 +2870,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
if (!is_buffer_growth_blocked()) CheckBuffer();
@@ -2819,7 +2881,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
if (!is_buffer_growth_blocked()) CheckBuffer();
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 720a654c58..88e403d366 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -158,9 +158,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler() { CHECK(constpool_.IsEmpty()); }
-
- void AbortedCodeGeneration() { constpool_.Clear(); }
+ virtual ~Assembler();
+ void AbortedCodeGeneration();
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
@@ -355,6 +354,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// ---------------------------------------------------------------------------
// Code generation.
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
@@ -364,6 +372,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
// Different nop operations are used by the code generator to detect certain
// states of the generated code.
@@ -621,7 +630,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void c_addi4spn(Register rd, int16_t uimm10);
void c_li(Register rd, int8_t imm6);
void c_lui(Register rd, int8_t imm6);
- void c_slli(Register rd, uint8_t uimm6);
+ void c_slli(Register rd, uint8_t shamt6);
void c_fldsp(FPURegister rd, uint16_t uimm9);
void c_lwsp(Register rd, uint16_t uimm8);
void c_ldsp(Register rd, uint16_t uimm9);
@@ -651,9 +660,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void c_bnez(Register rs1, Label* L) { c_bnez(rs1, branch_offset(L)); }
void c_beqz(Register rs1, int16_t imm9);
inline void c_beqz(Register rs1, Label* L) { c_beqz(rs1, branch_offset(L)); }
- void c_srli(Register rs1, uint8_t uimm6);
- void c_srai(Register rs1, uint8_t uimm6);
- void c_andi(Register rs1, uint8_t uimm6);
+ void c_srli(Register rs1, int8_t shamt6);
+ void c_srai(Register rs1, int8_t shamt6);
+ void c_andi(Register rs1, int8_t imm6);
+ void NOP();
+ void EBREAK();
// Privileged
void uret();
@@ -826,8 +837,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
@@ -1017,6 +1028,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return target_address_at(
+ reinterpret_cast<Address>(buffer_->start() + pc_offset)) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1135,7 +1154,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrCJ(uint8_t funct3, Opcode opcode, uint16_t uint11);
void GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1, uint8_t uimm8);
void GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode, Register rs1,
- uint8_t uimm6);
+ int8_t imm6);
// ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index bd1f63b673..c9cb7687fd 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -1033,8 +1033,6 @@ class InstructionGetters : public T {
DCHECK(this->IsShortInstruction());
// | funct3 | imm[5] | rs1/rd | imm[4:0] | opcode |
// 15 12 6 2
- // | funct3 | nzimm[17] | rs1/rd | nzimm[16:12] | opcode |
- // 15 12 6 2
uint32_t Bits = this->InstructionBits();
int32_t imm6 = ((Bits & 0x1000) >> 7) | ((Bits & 0x7c) >> 2);
return imm6 << 26 >> 26;
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index d94352951d..3baa71d1a2 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -16,6 +16,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
@@ -141,7 +142,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
Add64(fp, sp, Operand(kSystemPointerSize));
} else {
Push(ra, fp);
- mv(fp, sp);
+ Mv(fp, sp);
}
}
@@ -377,9 +378,19 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- addw(rd, rs, rt.rm());
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) &&
+ ((rt.rm().code() & 0b11000) == 0b01000)) {
+ c_addw(rd, rt.rm());
+ } else {
+ addw(rd, rs, rt.rm());
+ }
} else {
- if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
+ (rd.code() == rs.code()) && (rd != zero_reg) &&
+ !MustUseReg(rt.rmode())) {
+ c_addiw(rd, static_cast<int8_t>(rt.immediate()));
+ } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
} else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
(2048 <= rt.immediate() && rt.immediate() <= 4094)) {
@@ -389,7 +400,7 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
addw(rd, rs, scratch);
}
}
@@ -397,9 +408,27 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- add(rd, rs, rt.rm());
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ (rt.rm() != zero_reg) && (rs != zero_reg)) {
+ c_add(rd, rt.rm());
+ } else {
+ add(rd, rs, rt.rm());
+ }
} else {
- if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
+ (rd.code() == rs.code()) && (rd != zero_reg) && (rt.immediate() != 0) &&
+ !MustUseReg(rt.rmode())) {
+ c_addi(rd, static_cast<int8_t>(rt.immediate()));
+ } else if (FLAG_riscv_c_extension && is_int10(rt.immediate()) &&
+ (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
+ (rd.code() == rs.code()) && (rd == sp) &&
+ !MustUseReg(rt.rmode())) {
+ c_addi16sp(static_cast<int16_t>(rt.immediate()));
+ } else if (FLAG_riscv_c_extension && ((rd.code() & 0b11000) == 0b01000) &&
+ (rs == sp) && is_uint10(rt.immediate()) &&
+ (rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
+ c_addi4spn(rd, static_cast<uint16_t>(rt.immediate()));
+ } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addi(rd, rs, static_cast<int32_t>(rt.immediate()));
} else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
(2048 <= rt.immediate() && rt.immediate() <= 4094)) {
@@ -410,7 +439,7 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
add(rd, rs, scratch);
}
}
@@ -418,10 +447,23 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- subw(rd, rs, rt.rm());
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) &&
+ ((rt.rm().code() & 0b11000) == 0b01000)) {
+ c_subw(rd, rt.rm());
+ } else {
+ subw(rd, rs, rt.rm());
+ }
} else {
DCHECK(is_int32(rt.immediate()));
- if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ (rd != zero_reg) && is_int6(-rt.immediate()) &&
+ !MustUseReg(rt.rmode())) {
+ c_addiw(
+ rd,
+ static_cast<int8_t>(
+ -rt.immediate())); // No c_subiw instr, use c_addiw(x, y, -imm).
+ } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
addiw(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
@@ -434,11 +476,11 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
Register scratch = temps.Acquire();
if (-rt.immediate() >> 12 == 0 && !MustUseReg(rt.rmode())) {
// Use load -imm and addu when loading -imm generates one instruction.
- RV_li(scratch, -rt.immediate());
+ Li(scratch, -rt.immediate());
addw(rd, rs, scratch);
} else {
// li handles the relocation.
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
subw(rd, rs, scratch);
}
}
@@ -447,7 +489,25 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- sub(rd, rs, rt.rm());
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) &&
+ ((rt.rm().code() & 0b11000) == 0b01000)) {
+ c_sub(rd, rt.rm());
+ } else {
+ sub(rd, rs, rt.rm());
+ }
+ } else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ (rd != zero_reg) && is_int6(-rt.immediate()) &&
+ (rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
+ c_addi(rd,
+ static_cast<int8_t>(
+ -rt.immediate())); // No c_subi instr, use c_addi(x, y, -imm).
+
+ } else if (FLAG_riscv_c_extension && is_int10(-rt.immediate()) &&
+ (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
+ (rd.code() == rs.code()) && (rd == sp) &&
+ !MustUseReg(rt.rmode())) {
+ c_addi16sp(static_cast<int16_t>(-rt.immediate()));
} else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
addi(rd, rs,
static_cast<int32_t>(
@@ -464,13 +524,13 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, -rt.immediate());
+ Li(scratch, -rt.immediate());
add(rd, rs, scratch);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
sub(rd, rs, scratch);
}
}
@@ -483,7 +543,7 @@ void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
mulw(rd, rs, scratch);
}
}
@@ -495,7 +555,7 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
mul(rd, rs, scratch);
}
srai(rd, rd, 32);
@@ -504,10 +564,11 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
Register rsz, Register rtz) {
slli(rsz, rs, 32);
- if (rt.is_reg())
+ if (rt.is_reg()) {
slli(rtz, rt.rm(), 32);
- else
- RV_li(rtz, rt.immediate() << 32);
+ } else {
+ Li(rtz, rt.immediate() << 32);
+ }
mulhu(rd, rsz, rtz);
srai(rd, rd, 32);
}
@@ -519,7 +580,7 @@ void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
mul(rd, rs, scratch);
}
}
@@ -531,7 +592,7 @@ void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
mulh(rd, rs, scratch);
}
}
@@ -543,7 +604,7 @@ void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
divw(res, rs, scratch);
}
}
@@ -555,7 +616,7 @@ void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
remw(rd, rs, scratch);
}
}
@@ -567,7 +628,7 @@ void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
remuw(rd, rs, scratch);
}
}
@@ -579,7 +640,7 @@ void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
div(rd, rs, scratch);
}
}
@@ -591,7 +652,7 @@ void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
divuw(res, rs, scratch);
}
}
@@ -603,7 +664,7 @@ void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
divu(res, rs, scratch);
}
}
@@ -615,7 +676,7 @@ void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
rem(rd, rs, scratch);
}
}
@@ -627,22 +688,32 @@ void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
remu(rd, rs, scratch);
}
}
void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- and_(rd, rs, rt.rm());
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) &&
+ ((rt.rm().code() & 0b11000) == 0b01000)) {
+ c_and(rd, rt.rm());
+ } else {
+ and_(rd, rs, rt.rm());
+ }
} else {
- if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
+ !MustUseReg(rt.rmode()) && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000)) {
+ c_andi(rd, static_cast<int8_t>(rt.immediate()));
+ } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
andi(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
and_(rd, rs, scratch);
}
}
@@ -650,7 +721,13 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- or_(rd, rs, rt.rm());
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) &&
+ ((rt.rm().code() & 0b11000) == 0b01000)) {
+ c_or(rd, rt.rm());
+ } else {
+ or_(rd, rs, rt.rm());
+ }
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
ori(rd, rs, static_cast<int32_t>(rt.immediate()));
@@ -658,7 +735,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
or_(rd, rs, scratch);
}
}
@@ -666,7 +743,13 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- xor_(rd, rs, rt.rm());
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) &&
+ ((rt.rm().code() & 0b11000) == 0b01000)) {
+ c_xor(rd, rt.rm());
+ } else {
+ xor_(rd, rs, rt.rm());
+ }
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
xori(rd, rs, static_cast<int32_t>(rt.immediate()));
@@ -674,7 +757,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
xor_(rd, rs, scratch);
}
}
@@ -744,7 +827,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
slt(rd, rs, scratch);
}
}
@@ -761,7 +844,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
sltu(rd, rs, scratch);
}
}
@@ -775,7 +858,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
slt(rd, scratch, rs);
}
xori(rd, rd, 1);
@@ -789,7 +872,7 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
sltu(rd, scratch, rs);
}
xori(rd, rd, 1);
@@ -813,7 +896,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
slt(rd, scratch, rs);
}
}
@@ -826,7 +909,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- RV_li(scratch, rt.immediate());
+ Li(scratch, rt.immediate());
sltu(rd, scratch, rs);
}
}
@@ -861,6 +944,10 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sra(rd, rs, rt.rm());
+ } else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ c_srai(rd, shamt);
} else {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
srai(rd, rs, shamt);
@@ -870,6 +957,10 @@ void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srl(rd, rs, rt.rm());
+ } else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ c_srli(rd, shamt);
} else {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
srli(rd, rs, shamt);
@@ -881,7 +972,28 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
sll(rd, rs, rt.rm());
} else {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
- slli(rd, rs, shamt);
+ if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
+ (rd != zero_reg) && (shamt != 0) && is_uint6(shamt)) {
+ c_slli(rd, shamt);
+ } else {
+ slli(rd, rs, shamt);
+ }
+ }
+}
+
+void TurboAssembler::Li(Register rd, int64_t imm) {
+ if (FLAG_riscv_c_extension && (rd != zero_reg) && is_int6(imm)) {
+ c_li(rd, imm);
+ } else {
+ RV_li(rd, imm);
+ }
+}
+
+void TurboAssembler::Mv(Register rd, const Operand& rt) {
+ if (FLAG_riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) {
+ c_mv(rd, rt.rm());
+ } else {
+ mv(rd, rt.rm());
}
}
@@ -898,7 +1010,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
} else {
int64_t ror_value = rt.immediate() % 32;
if (ror_value == 0) {
- mv(rd, rs);
+ Mv(rd, rs);
return;
} else if (ror_value < 0) {
ror_value += 32;
@@ -922,7 +1034,7 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
} else {
int64_t dror_value = rt.immediate() % 64;
if (dror_value == 0) {
- mv(rd, rs);
+ Mv(rd, rs);
return;
} else if (dror_value < 0) {
dror_value += 64;
@@ -1302,7 +1414,17 @@ void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
- this->lw(target, source.rm(), source.offset());
+ if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
+ ((source.rm().code() & 0b11000) == 0b01000) &&
+ is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
+ this->c_lw(target, source.rm(), source.offset());
+ } else if (FLAG_riscv_c_extension && (target != zero_reg) &&
+ is_uint8(source.offset()) && (source.rm() == sp) &&
+ ((source.offset() & 0x3) == 0)) {
+ this->c_lwsp(target, source.offset());
+ } else {
+ this->lw(target, source.rm(), source.offset());
+ }
};
AlignedLoadHelper(rd, rs, fn);
}
@@ -1316,21 +1438,49 @@ void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
- this->sw(value, source.rm(), source.offset());
+ if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
+ ((source.rm().code() & 0b11000) == 0b01000) &&
+ is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
+ this->c_sw(value, source.rm(), source.offset());
+ } else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
+ is_uint8(source.offset()) && (((source.offset() & 0x3) == 0))) {
+ this->c_swsp(value, source.offset());
+ } else {
+ this->sw(value, source.rm(), source.offset());
+ }
};
AlignedStoreHelper(rd, rs, fn);
}
void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
- this->ld(target, source.rm(), source.offset());
+ if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
+ ((source.rm().code() & 0b11000) == 0b01000) &&
+ is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
+ this->c_ld(target, source.rm(), source.offset());
+ } else if (FLAG_riscv_c_extension && (target != zero_reg) &&
+ is_uint9(source.offset()) && (source.rm() == sp) &&
+ ((source.offset() & 0x7) == 0)) {
+ this->c_ldsp(target, source.offset());
+ } else {
+ this->ld(target, source.rm(), source.offset());
+ }
};
AlignedLoadHelper(rd, rs, fn);
}
void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
- this->sd(value, source.rm(), source.offset());
+ if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
+ ((source.rm().code() & 0b11000) == 0b01000) &&
+ is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
+ this->c_sd(value, source.rm(), source.offset());
+ } else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
+ is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
+ this->c_sdsp(value, source.offset());
+ } else {
+ this->sd(value, source.rm(), source.offset());
+ }
};
AlignedStoreHelper(rd, rs, fn);
}
@@ -1351,14 +1501,32 @@ void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) {
void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
auto fn = [this](FPURegister target, const MemOperand& source) {
- this->fld(target, source.rm(), source.offset());
+ if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
+ ((source.rm().code() & 0b11000) == 0b01000) &&
+ is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
+ this->c_fld(target, source.rm(), source.offset());
+ } else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
+ is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
+ this->c_fldsp(target, source.offset());
+ } else {
+ this->fld(target, source.rm(), source.offset());
+ }
};
AlignedLoadHelper(fd, src, fn);
}
void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
auto fn = [this](FPURegister value, const MemOperand& source) {
- this->fsd(value, source.rm(), source.offset());
+ if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
+ ((source.rm().code() & 0b11000) == 0b01000) &&
+ is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
+ this->c_fsd(value, source.rm(), source.offset());
+ } else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
+ is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
+ this->c_fsdsp(value, source.offset());
+ } else {
+ this->fsd(value, source.rm(), source.offset());
+ }
};
AlignedStoreHelper(fs, src, fn);
}
@@ -1468,7 +1636,7 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
DCHECK(!MustUseReg(j.rmode()));
DCHECK(mode == OPTIMIZE_SIZE);
- RV_li(rd, j.immediate());
+ Li(rd, j.immediate());
}
void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
@@ -1486,12 +1654,23 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
ld(rd, rd, 0);
} else {
if ((count - reverse_count) > 1) {
- RV_li(rd, ~j.immediate());
+ Li(rd, ~j.immediate());
not_(rd, rd);
} else {
- RV_li(rd, j.immediate());
+ Li(rd, j.immediate());
}
}
+ } else if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(j.rmode())) {
+ BlockGrowBufferScope block_growbuffer(this);
+ int offset = pc_offset();
+ Address address = j.immediate();
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(offset, address));
+ Handle<HeapObject> object(reinterpret_cast<Address*>(address));
+ int64_t immediate = object->ptr();
+ RecordRelocInfo(j.rmode(), immediate);
+ li_ptr(rd, immediate);
+ DCHECK(EmbeddedObjectMatches(offset, object));
} else if (MustUseReg(j.rmode())) {
int64_t immediate;
if (j.IsHeapObjectRequest()) {
@@ -2613,7 +2792,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
}
}
} else {
- if (is_trampoline_emitted()) {
+ if (is_trampoline_emitted() && near_jump == Label::Distance::kFar) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
@@ -3190,13 +3369,14 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// Note that this assumes the caller code (i.e. the Code object currently
// being generated) is immovable or that the callee function cannot trigger
// GC, since the callee function will return to it.
-
+ //
// Compute the return address in lr to return to after the jump below. The
// pc is already at '+ 8' from the current instruction; but return is after
// three instructions, so add another 4 to pc to get the return address.
-
+ //
Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
- static constexpr int kNumInstructionsToJump = 5;
+ int kNumInstructionsToJump = 5;
+ if (FLAG_riscv_c_extension) kNumInstructionsToJump = 4;
Label find_ra;
// Adjust the value in ra to point to the correct return location, one
// instruction past the real call into C code (the jalr(t6)), and push it.
@@ -3213,7 +3393,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// Stack is still aligned.
// Call the C routine.
- mv(t6,
+ Mv(t6,
target); // Function pointer to t6 to conform to ABI for PIC.
jalr(t6);
// Make sure the stored 'ra' points to this position.
@@ -3227,21 +3407,6 @@ void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::GenPCRelativeJump(Register rd, int64_t imm32) {
- DCHECK(is_int32(imm32));
- int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
- auipc(rd, Hi20); // Read PC + Hi20 into scratch.
- jr(rd, Lo12); // jump PC + Hi20 + Lo12
-}
-
-void TurboAssembler::GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
- DCHECK(is_int32(imm32));
- int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
- auipc(rd, Hi20); // Read PC + Hi20 into scratch.
- jalr(rd, Lo12); // jump PC + Hi20 + Lo12
-}
void TurboAssembler::BranchLong(Label* L) {
// Generate position independent long branch.
@@ -3306,9 +3471,9 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
Xor(reg2, reg2, Operand(reg1));
Xor(reg1, reg1, Operand(reg2));
} else {
- mv(scratch, reg1);
- mv(reg1, reg2);
- mv(reg2, scratch);
+ Mv(scratch, reg1);
+ Mv(reg1, reg2);
+ Mv(reg2, scratch);
}
}
@@ -3342,7 +3507,7 @@ void TurboAssembler::PushArray(Register array, Register size,
Register scratch2 = temps.Acquire();
Label loop, entry;
if (order == PushArrayOrder::kReverse) {
- mv(scratch, zero_reg);
+ Mv(scratch, zero_reg);
jmp(&entry);
bind(&loop);
CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
@@ -3352,7 +3517,7 @@ void TurboAssembler::PushArray(Register array, Register size,
bind(&entry);
Branch(&loop, less, scratch, Operand(size));
} else {
- mv(scratch, size);
+ Mv(scratch, size);
jmp(&entry);
bind(&loop);
CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
@@ -3443,52 +3608,6 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
// -----------------------------------------------------------------------------
// JavaScript invokes.
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kSystemPointerSize to count the
- // receiver argument which is not included into formal parameters count.
- Register dst_reg = scratch0;
- CalcScaledAddress(dst_reg, fp, caller_args_count, kSystemPointerSizeLog2);
- Add64(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-
- Register src_reg = caller_args_count;
- // Calculate the end of source area. +kSystemPointerSize is for the receiver.
- CalcScaledAddress(src_reg, sp, callee_args_count, kSystemPointerSizeLog2);
- Add64(src_reg, src_reg, Operand(kSystemPointerSize));
-
- if (FLAG_debug_code) {
- Check(Uless, AbortReason::kStackAccessBelowStackPointer, src_reg,
- Operand(dst_reg));
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch1;
- Label loop, entry;
- Branch(&entry);
- bind(&loop);
- Sub64(src_reg, src_reg, Operand(kSystemPointerSize));
- Sub64(dst_reg, dst_reg, Operand(kSystemPointerSize));
- Ld(tmp_reg, MemOperand(src_reg));
- Sd(tmp_reg, MemOperand(dst_reg));
- bind(&entry);
- Branch(&loop, ne, sp, Operand(src_reg));
-
- // Leave current frame.
- mv(sp, dst_reg);
-}
-
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
@@ -3767,7 +3886,7 @@ void TurboAssembler::AddOverflow64(Register dst, Register left,
xor_(overflow, scratch2, left);
xor_(scratch, scratch2, right_reg);
and_(overflow, overflow, scratch);
- mv(dst, scratch2);
+ Mv(dst, scratch2);
} else {
add(dst, left, right_reg);
xor_(overflow, dst, left);
@@ -3799,7 +3918,7 @@ void TurboAssembler::SubOverflow64(Register dst, Register left,
xor_(overflow, left, scratch2);
xor_(scratch, left, right_reg);
and_(overflow, overflow, scratch);
- mv(dst, scratch2);
+ Mv(dst, scratch2);
} else {
sub(dst, left, right_reg);
xor_(overflow, left, dst);
@@ -3934,7 +4053,7 @@ void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
- Branch(&L, cc, rs, rt);
+ BranchShort(&L, cc, rs, rt);
Abort(reason);
// Will not return here.
bind(&L);
@@ -4023,6 +4142,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
li(scratch, Operand(StackFrame::TypeToMarker(type)));
Push(scratch);
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -4149,7 +4271,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
// Pop the arguments, restore registers, and return.
- mv(sp, fp); // Respect ABI stack constraint.
+ Mv(sp, fp); // Respect ABI stack constraint.
Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
@@ -4196,7 +4318,7 @@ void MacroAssembler::AssertStackIsAligned() {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, sp, frame_alignment_mask);
- Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ BranchShort(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort re-entering here.
ebreak();
@@ -4317,13 +4439,13 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Label done;
// Check if JSGeneratorObject
- Branch(&done, eq, kScratchReg, Operand(JS_GENERATOR_OBJECT_TYPE));
+ BranchShort(&done, eq, kScratchReg, Operand(JS_GENERATOR_OBJECT_TYPE));
// Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
- Branch(&done, eq, kScratchReg, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+ BranchShort(&done, eq, kScratchReg, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
// Check if JSAsyncGeneratorObject
- Branch(&done, eq, kScratchReg, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
+ BranchShort(&done, eq, kScratchReg, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
Abort(AbortReason::kOperandIsNotAGeneratorObject);
@@ -4336,7 +4458,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
- Branch(&done_checking, eq, object, Operand(scratch));
+ BranchShort(&done_checking, eq, object, Operand(scratch));
GetObjectType(object, scratch, scratch);
Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
Operand(ALLOCATION_SITE_TYPE));
@@ -4451,7 +4573,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments and the
// original value of sp.
- mv(scratch, sp);
+ Mv(scratch, sp);
Sub64(sp, sp, Operand((stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
@@ -4510,7 +4632,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
And(scratch, sp, Operand(frame_alignment_mask));
- Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ BranchShort(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
@@ -4525,7 +4647,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// stays correct.
{
if (function != t6) {
- mv(t6, function);
+ Mv(t6, function);
function = t6;
}
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index d06b4ce176..04285916bc 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -209,8 +209,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
void LoadRootRelative(Register destination, int32_t offset) final;
- inline void GenPCRelativeJump(Register rd, int64_t imm32);
- inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32);
+ inline void GenPCRelativeJump(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jr(rd, Lo12); // jump PC + Hi20 + Lo12
+ }
+
+ inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jalr(rd, Lo12); // jump PC + Hi20 + Lo12
+ }
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
@@ -407,6 +420,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
+#define DEFINE_INSTRUCTION3(instr) void instr(Register rd, int64_t imm);
+
DEFINE_INSTRUCTION(Add32)
DEFINE_INSTRUCTION(Add64)
DEFINE_INSTRUCTION(Div32)
@@ -457,6 +472,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DEFINE_INSTRUCTION(Ror)
DEFINE_INSTRUCTION(Dror)
+
+ DEFINE_INSTRUCTION3(Li)
+ DEFINE_INSTRUCTION2(Mv)
+
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
@@ -473,15 +492,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count| do not include
- // receiver. |callee_args_count| is not modified. |caller_args_count|
- // is trashed.
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments);
// Before calling a C-function from generated code, align arguments on stack.
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 99d95c7ede..511096e0db 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -750,6 +750,18 @@ void Assembler::dumy(int r1, int x2, int b2, int d2) {
#endif
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ // TODO(v8:11872) This function should never be called if Sparkplug on heap
+ // compilation is not supported.
+ UNREACHABLE();
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ // TODO(v8:11872) This function should never be called if Sparkplug on heap
+ // compilation is not supported.
+ UNREACHABLE();
+}
+
void Assembler::GrowBuffer(int needed) {
DCHECK_EQ(buffer_start_, buffer_->start());
@@ -799,7 +811,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint32_t*>(pc_) = data;
@@ -809,7 +822,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uint64_t*>(pc_) = value;
@@ -819,7 +833,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
CheckBuffer();
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
*reinterpret_cast<uintptr_t*>(pc_) = data;
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 7cd0d46778..704dead070 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -241,6 +241,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
@@ -1055,6 +1064,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
void breakpoint(bool do_print) {
if (do_print) {
@@ -1304,8 +1314,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index cb67d8d9f9..4de7f2cf4b 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -16,6 +16,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
@@ -1399,6 +1400,47 @@ void TurboAssembler::Prologue(Register base, int prologue_offset) {
PushStandardFrame(r3);
}
+void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ int receiver_bytes =
+ (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
+ switch (type) {
+ case kCountIsInteger: {
+ ShiftLeftU64(ip, count, Operand(kSystemPointerSizeLog2));
+ lay(sp, MemOperand(sp, ip));
+ break;
+ }
+ case kCountIsSmi: {
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ SmiToPtrArrayOffset(count, count);
+ AddS64(sp, sp, count);
+ break;
+ }
+ case kCountIsBytes: {
+ AddS64(sp, sp, count);
+ break;
+ }
+ }
+ if (receiver_bytes != 0) {
+ AddS64(sp, sp, Operand(receiver_bytes));
+ }
+}
+
+void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+ Register receiver,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(argc, receiver));
+ if (mode == kCountExcludesReceiver) {
+ // Drop arguments without receiver and override old receiver.
+ DropArguments(argc, type, kCountIncludesReceiver);
+ StoreU64(receiver, MemOperand(sp));
+ } else {
+ DropArguments(argc, type, mode);
+ push(receiver);
+ }
+}
+
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// We create a stack frame with:
@@ -1410,6 +1452,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
@@ -1574,55 +1619,6 @@ void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d0);
}
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
-
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We AddS64 kSystemPointerSize to count the
- // receiver argument which is not included into formal parameters count.
- Register dst_reg = scratch0;
- ShiftLeftU64(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
- AddS64(dst_reg, fp, dst_reg);
- AddS64(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-
- Register src_reg = caller_args_count;
- // Calculate the end of source area. +kSystemPointerSize is for the receiver.
- ShiftLeftU64(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
- AddS64(src_reg, sp, src_reg);
- AddS64(src_reg, src_reg, Operand(kSystemPointerSize));
-
- if (FLAG_debug_code) {
- CmpU64(src_reg, dst_reg);
- Check(lt, AbortReason::kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- RestoreFrameStateForTailCall();
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch1;
- Label loop;
- AddS64(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
- mov(r1, tmp_reg);
- bind(&loop);
- LoadU64(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
- StoreU64(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
- lay(src_reg, MemOperand(src_reg, -kSystemPointerSize));
- lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize));
- BranchOnCount(r1, &loop);
-
- // Leave current frame.
- mov(sp, dst_reg);
-}
-
MemOperand MacroAssembler::StackLimitAsMemOperand(StackLimitKind kind) {
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
@@ -5086,6 +5082,322 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
bind(&done);
}
+// Simd Support.
+#define kScratchDoubleReg d13
+
+void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
+ vrep(dst, src, Operand(0), Condition(3));
+}
+
+void TurboAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) {
+ vrep(dst, src, Operand(0), Condition(2));
+}
+
+void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) {
+ vlvg(dst, src, MemOperand(r0, 0), Condition(3));
+ vrep(dst, dst, Operand(0), Condition(3));
+}
+
+void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) {
+ vlvg(dst, src, MemOperand(r0, 0), Condition(2));
+ vrep(dst, dst, Operand(0), Condition(2));
+}
+
+void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) {
+ vlvg(dst, src, MemOperand(r0, 0), Condition(1));
+ vrep(dst, dst, Operand(0), Condition(1));
+}
+
+void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) {
+ vlvg(dst, src, MemOperand(r0, 0), Condition(0));
+ vrep(dst, dst, Operand(0), Condition(0));
+}
+
+void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vrep(dst, src, Operand(1 - imm_lane_idx), Condition(3));
+}
+
+void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vrep(dst, src, Operand(3 - imm_lane_idx), Condition(2));
+}
+
+void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vlgv(dst, src, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
+}
+
+void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vlgv(dst, src, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
+}
+
+void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vlgv(dst, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
+}
+
+void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vlgv(r0, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
+ lghr(dst, r0);
+}
+
+void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vlgv(dst, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
+}
+
+void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx) {
+ vlgv(r0, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
+ lgbr(dst, r0);
+}
+
+void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+ DoubleRegister src2,
+ uint8_t imm_lane_idx) {
+ vlgv(r0, src2, MemOperand(r0, 0), Condition(3));
+ if (src1 != dst) {
+ vlr(dst, src1, Condition(0), Condition(0), Condition(0));
+ }
+ vlvg(dst, r0, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
+}
+
+void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+ DoubleRegister src2,
+ uint8_t imm_lane_idx) {
+ vlgv(r0, src2, MemOperand(r0, 0), Condition(2));
+ if (src1 != dst) {
+ vlr(dst, src1, Condition(0), Condition(0), Condition(0));
+ }
+ vlvg(dst, r0, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
+}
+
+void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx) {
+ if (src1 != dst) {
+ vlr(dst, src1, Condition(0), Condition(0), Condition(0));
+ }
+ vlvg(dst, src2, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
+}
+
+void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx) {
+ if (src1 != dst) {
+ vlr(dst, src1, Condition(0), Condition(0), Condition(0));
+ }
+ vlvg(dst, src2, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
+}
+
+void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx) {
+ if (src1 != dst) {
+ vlr(dst, src1, Condition(0), Condition(0), Condition(0));
+ }
+ vlvg(dst, src2, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
+}
+
+void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx) {
+ if (src1 != dst) {
+ vlr(dst, src1, Condition(0), Condition(0), Condition(0));
+ }
+ vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
+}
+
+#define SIMD_BINOP_LIST_VRR_B(V) \
+ V(I64x2Eq, vceq, 0, 3) \
+ V(I64x2GtS, vch, 0, 3) \
+ V(I32x4Eq, vceq, 0, 2) \
+ V(I32x4GtS, vch, 0, 2) \
+ V(I32x4GtU, vchl, 0, 2) \
+ V(I16x8Eq, vceq, 0, 1) \
+ V(I16x8GtS, vch, 0, 1) \
+ V(I16x8GtU, vchl, 0, 1) \
+ V(I8x16Eq, vceq, 0, 0) \
+ V(I8x16GtS, vch, 0, 0) \
+ V(I8x16GtU, vchl, 0, 0)
+
+#define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Simd128Register src2) { \
+ op(dst, src1, src2, Condition(c1), Condition(c2)); \
+ }
+SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
+#undef EMIT_SIMD_BINOP_VRR_B
+#undef SIMD_BINOP_LIST_VRR_B
+
+#define SIMD_BINOP_LIST_VRR_C(V) \
+ V(F64x2Add, vfa, 0, 0, 3) \
+ V(F64x2Sub, vfs, 0, 0, 3) \
+ V(F64x2Mul, vfm, 0, 0, 3) \
+ V(F64x2Div, vfd, 0, 0, 3) \
+ V(F64x2Min, vfmin, 1, 0, 3) \
+ V(F64x2Max, vfmax, 1, 0, 3) \
+ V(F64x2Eq, vfce, 0, 0, 3) \
+ V(F32x4Add, vfa, 0, 0, 2) \
+ V(F32x4Sub, vfs, 0, 0, 2) \
+ V(F32x4Mul, vfm, 0, 0, 2) \
+ V(F32x4Div, vfd, 0, 0, 2) \
+ V(F32x4Min, vfmin, 1, 0, 2) \
+ V(F32x4Max, vfmax, 1, 0, 2) \
+ V(F32x4Eq, vfce, 0, 0, 2) \
+ V(I64x2Add, va, 0, 0, 3) \
+ V(I64x2Sub, vs, 0, 0, 3) \
+ V(I32x4Add, va, 0, 0, 2) \
+ V(I32x4Sub, vs, 0, 0, 2) \
+ V(I32x4Mul, vml, 0, 0, 2) \
+ V(I32x4MinS, vmn, 0, 0, 2) \
+ V(I32x4MinU, vmnl, 0, 0, 2) \
+ V(I32x4MaxS, vmx, 0, 0, 2) \
+ V(I32x4MaxU, vmxl, 0, 0, 2) \
+ V(I16x8Add, va, 0, 0, 1) \
+ V(I16x8Sub, vs, 0, 0, 1) \
+ V(I16x8Mul, vml, 0, 0, 1) \
+ V(I16x8MinS, vmn, 0, 0, 1) \
+ V(I16x8MinU, vmnl, 0, 0, 1) \
+ V(I16x8MaxS, vmx, 0, 0, 1) \
+ V(I16x8MaxU, vmxl, 0, 0, 1) \
+ V(I8x16Add, va, 0, 0, 0) \
+ V(I8x16Sub, vs, 0, 0, 0) \
+ V(I8x16MinS, vmn, 0, 0, 0) \
+ V(I8x16MinU, vmnl, 0, 0, 0) \
+ V(I8x16MaxS, vmx, 0, 0, 0) \
+ V(I8x16MaxU, vmxl, 0, 0, 0)
+
+#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Simd128Register src2) { \
+ op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \
+ }
+SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
+#undef EMIT_SIMD_BINOP_VRR_C
+#undef SIMD_BINOP_LIST_VRR_C
+
+// Opcodes without a 1-1 match.
+void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ Register scratch_1 = r0;
+ Register scratch_2 = r1;
+ for (int i = 0; i < 2; i++) {
+ vlgv(scratch_1, src1, MemOperand(r0, i), Condition(3));
+ vlgv(scratch_2, src2, MemOperand(r0, i), Condition(3));
+ MulS64(scratch_1, scratch_2);
+ scratch_1 = r1;
+ scratch_2 = ip;
+ }
+ vlvgp(dst, r0, r1);
+}
+
+void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vfce(dst, src1, src2, Condition(0), Condition(0), Condition(3));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
+}
+
+void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vfch(dst, src2, src1, Condition(0), Condition(0), Condition(3));
+}
+
+void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vfche(dst, src2, src1, Condition(0), Condition(0), Condition(3));
+}
+
+void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vfce(dst, src1, src2, Condition(0), Condition(0), Condition(2));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+}
+
+void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vfch(dst, src2, src1, Condition(0), Condition(0), Condition(2));
+}
+
+void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vfche(dst, src2, src1, Condition(0), Condition(0), Condition(2));
+}
+
+void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vceq(dst, src1, src2, Condition(0), Condition(3));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
+}
+
+void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ // Compute !(B > A) which is equal to A >= B.
+ vch(dst, src2, src1, Condition(0), Condition(3));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
+}
+
+void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vceq(dst, src1, src2, Condition(0), Condition(2));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+}
+
+void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ // Compute !(B > A) which is equal to A >= B.
+ vch(dst, src2, src1, Condition(0), Condition(2));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+}
+
+void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(2));
+ vchl(dst, src1, src2, Condition(0), Condition(2));
+ vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
+}
+
+void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vceq(dst, src1, src2, Condition(0), Condition(1));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
+}
+
+void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ // Compute !(B > A) which is equal to A >= B.
+ vch(dst, src2, src1, Condition(0), Condition(1));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
+}
+
+void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(1));
+ vchl(dst, src1, src2, Condition(0), Condition(1));
+ vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(1));
+}
+
+void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vceq(dst, src1, src2, Condition(0), Condition(0));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+}
+
+void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ // Compute !(B > A) which is equal to A >= B.
+ vch(dst, src2, src1, Condition(0), Condition(0));
+ vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+}
+
+void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2) {
+ vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(0));
+ vchl(dst, src1, src2, Condition(0), Condition(0));
+ vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
+}
+
+#undef kScratchDoubleReg
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index fbf2ad0510..51cdb48326 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -735,6 +735,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
int prologue_offset = 0);
void Prologue(Register base, int prologue_offset = 0);
+ enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
+ enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
+ void DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode);
+ void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode);
+
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
// ----------------------------------------------------------------
@@ -812,10 +820,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -1031,8 +1035,128 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreReturnAddressAndCall(Register target);
// ---------------------------------------------------------------------------
+ // Simd Support.
+ void F64x2Splat(Simd128Register dst, Simd128Register src);
+ void F32x4Splat(Simd128Register dst, Simd128Register src);
+ void I64x2Splat(Simd128Register dst, Register src);
+ void I32x4Splat(Simd128Register dst, Register src);
+ void I16x8Splat(Simd128Register dst, Register src);
+ void I8x16Splat(Simd128Register dst, Register src);
+ void F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void I64x2ExtractLane(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void I32x4ExtractLane(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void I16x8ExtractLaneU(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void I16x8ExtractLaneS(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void I8x16ExtractLaneU(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void I8x16ExtractLaneS(Register dst, Simd128Register src,
+ uint8_t imm_lane_idx);
+ void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+ DoubleRegister src2, uint8_t imm_lane_idx);
+ void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+ DoubleRegister src2, uint8_t imm_lane_idx);
+ void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx);
+ void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx);
+ void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx);
+ void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
+ Register src2, uint8_t imm_lane_idx);
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Min) \
+ V(F64x2Max) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
+ V(I32x4MinS) \
+ V(I32x4MinU) \
+ V(I32x4MaxS) \
+ V(I32x4MaxU) \
+ V(I16x8Add) \
+ V(I16x8Sub) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I16x8MinS) \
+ V(I16x8MinU) \
+ V(I16x8MaxS) \
+ V(I16x8MaxU) \
+ V(I8x16Add) \
+ V(I8x16Sub) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
+ V(I8x16MinS) \
+ V(I8x16MinU) \
+ V(I8x16MaxS) \
+ V(I8x16MaxU)
+
+#define PROTOTYPE_SIMD_BINOP(name) \
+ void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
+ SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
+#undef PROTOTYPE_SIMD_BINOP
+#undef SIMD_BINOP_LIST
+
+ // ---------------------------------------------------------------------------
// Pointer compression Support
+ void SmiToPtrArrayOffset(Register dst, Register src) {
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
+ ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
+ ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
+#endif
+ }
+
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
@@ -1286,16 +1410,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
ShiftLeftU64(dst, src, Operand(kSmiShift));
}
- void SmiToPtrArrayOffset(Register dst, Register src) {
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
- ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
-#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
- ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
-#endif
- }
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
diff --git a/deps/v8/src/codegen/script-details.h b/deps/v8/src/codegen/script-details.h
new file mode 100644
index 0000000000..a0a364c6b5
--- /dev/null
+++ b/deps/v8/src/codegen/script-details.h
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_SCRIPT_DETAILS_H_
+#define V8_CODEGEN_SCRIPT_DETAILS_H_
+
+#include "src/common/globals.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+struct ScriptDetails {
+ ScriptDetails()
+ : line_offset(0), column_offset(0), repl_mode(REPLMode::kNo) {}
+ explicit ScriptDetails(
+ Handle<Object> script_name,
+ ScriptOriginOptions origin_options = v8::ScriptOriginOptions())
+ : line_offset(0),
+ column_offset(0),
+ name_obj(script_name),
+ repl_mode(REPLMode::kNo),
+ origin_options(origin_options) {}
+
+ int line_offset;
+ int column_offset;
+ MaybeHandle<Object> name_obj;
+ MaybeHandle<Object> source_map_url;
+ MaybeHandle<FixedArray> host_defined_options;
+ REPLMode repl_mode;
+ const ScriptOriginOptions origin_options;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_SCRIPT_DETAILS_H_
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index 3a73ae09f8..edd1a977e6 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -18,6 +18,30 @@
namespace v8 {
namespace internal {
+void SharedTurboAssembler::Move(Register dst, Register src) {
+ // Helper to paper over the different assembler function names.
+ if (dst != src) {
+#if V8_TARGET_ARCH_IA32
+ mov(dst, src);
+#elif V8_TARGET_ARCH_X64
+ movq(dst, src);
+#else
+#error Unsupported target architecture.
+#endif
+ }
+}
+
+void SharedTurboAssembler::And(Register dst, Immediate src) {
+ // Helper to paper over the different assembler function names.
+#if V8_TARGET_ARCH_IA32
+ and_(dst, src);
+#elif V8_TARGET_ARCH_X64
+ andq(dst, src);
+#else
+#error Unsupported target architecture.
+#endif
+}
+
void SharedTurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -496,6 +520,67 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
}
}
+void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
+ uint8_t shift, XMMRegister xmm_tmp) {
+ DCHECK_GT(64, shift);
+ DCHECK_NE(xmm_tmp, dst);
+ DCHECK_NE(xmm_tmp, src);
+ // Use logical right shift to emulate arithmetic right shifts:
+ // Given:
+ // signed >> c
+ // == (signed + 2^63 - 2^63) >> c
+ // == ((signed + 2^63) >> c) - (2^63 >> c)
+ // ^^^^^^^^^
+ // xmm_tmp
+ // signed + 2^63 is an unsigned number, so we can use logical right shifts.
+
+ // xmm_tmp = wasm_i64x2_const(0x80000000'00000000).
+ Pcmpeqd(xmm_tmp, xmm_tmp);
+ Psllq(xmm_tmp, byte{63});
+
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
+ Movapd(dst, src);
+ src = dst;
+ }
+ // Add a bias of 2^63 to convert signed to unsigned.
+ // Since only highest bit changes, use pxor instead of paddq.
+ Pxor(dst, src, xmm_tmp);
+ // Logically shift both value and bias.
+ Psrlq(dst, shift);
+ Psrlq(xmm_tmp, shift);
+ // Subtract shifted bias to convert back to signed value.
+ Psubq(dst, xmm_tmp);
+}
+
+void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
+ Register shift, XMMRegister xmm_tmp,
+ XMMRegister xmm_shift,
+ Register tmp_shift) {
+ DCHECK_NE(xmm_tmp, dst);
+ DCHECK_NE(xmm_tmp, src);
+ DCHECK_NE(xmm_shift, dst);
+ DCHECK_NE(xmm_shift, src);
+ // tmp_shift can alias shift since we don't use shift after masking it.
+
+ // See I64x2ShrS with constant shift for explanation of this algorithm.
+ Pcmpeqd(xmm_tmp, xmm_tmp);
+ Psllq(xmm_tmp, byte{63});
+
+ // Shift modulo 64.
+ Move(tmp_shift, shift);
+ And(tmp_shift, Immediate(0x3F));
+ Movd(xmm_shift, tmp_shift);
+
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
+ Movapd(dst, src);
+ src = dst;
+ }
+ Pxor(dst, src, xmm_tmp);
+ Psrlq(dst, xmm_shift);
+ Psrlq(xmm_tmp, xmm_shift);
+ Psubq(dst, xmm_tmp);
+}
+
// 1. Unpack src0, src1 into even-number elements of scratch.
// 2. Unpack src1, src0 into even-number elements of dst.
// 3. Multiply 1. with 2.
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index add62a5a49..7c6f7185b9 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -33,6 +33,10 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ // Move if registers are not identical.
+ void Move(Register dst, Register src);
+ void And(Register dst, Immediate src);
+
void Movapd(XMMRegister dst, XMMRegister src);
template <typename Dst, typename Src>
@@ -315,6 +319,11 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
XMMRegister scratch);
void I64x2GeS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
XMMRegister scratch);
+ void I64x2ShrS(XMMRegister dst, XMMRegister src, uint8_t shift,
+ XMMRegister xmm_tmp);
+ void I64x2ShrS(XMMRegister dst, XMMRegister src, Register shift,
+ XMMRegister xmm_tmp, XMMRegister xmm_shift,
+ Register tmp_shift);
void I64x2ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch, bool low, bool is_signed);
void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index a617391372..4d30f01c08 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -64,15 +64,12 @@ void Assembler::emit(Immediate64 x) {
if (!RelocInfo::IsNone(x.rmode_)) {
RecordRelocInfo(x.rmode_);
if (x.rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT && IsOnHeap()) {
- Address handle_address = reinterpret_cast<Address>(&x.value_);
- Handle<HeapObject> object = Handle<HeapObject>::cast(
- ReadUnalignedValue<Handle<Object>>(handle_address));
+ int offset = pc_offset();
+ Handle<HeapObject> object(reinterpret_cast<Address*>(x.value_));
saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(pc_offset(), x.value_));
+ std::make_pair(offset, x.value_));
emitq(static_cast<uint64_t>(object->ptr()));
- // We must ensure that `emitq` is not growing the assembler buffer
- // and falling back to off-heap compilation.
- DCHECK(IsOnHeap());
+ DCHECK(EmbeddedObjectMatches(offset, object));
return;
}
}
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 1a99afa8dd..1e66311d95 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -443,6 +443,10 @@ void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on x64.
}
+void Assembler::LoopHeaderAlign() {
+ Align(64); // Preferred alignment of loop header on x64.
+}
+
bool Assembler::IsNop(Address addr) {
byte* a = reinterpret_cast<byte*>(addr);
while (*a == 0x66) a++;
@@ -533,10 +537,38 @@ bool Assembler::is_optimizable_farjmp(int idx) {
return !!(bitmap[idx / 32] & (1 << (idx & 31)));
}
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ if (update_embedded_objects) {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ WriteUnalignedValue(base + p.first, *object);
+ }
+ }
+ for (auto p : saved_offsets_for_runtime_entries_) {
+ Address pc = base + p.first;
+ Address target = p.second + options().code_range_start;
+ WriteUnalignedValue<uint32_t>(pc, relative_target_offset(target, pc));
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue(base + p.first, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+ for (auto p : saved_offsets_for_runtime_entries_) {
+ WriteUnalignedValue<uint32_t>(base + p.first, p.second);
+ }
+ saved_offsets_for_runtime_entries_.clear();
+}
+
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
// Compute new buffer size.
DCHECK_EQ(buffer_start_, buffer_->start());
@@ -575,14 +607,12 @@ void Assembler::GrowBuffer() {
WriteUnalignedValue(p, ReadUnalignedValue<intptr_t>(p) + pc_delta);
}
- // Patch on-heap references to handles.
- if (previously_on_heap && !buffer_->IsOnHeap()) {
- Address base = reinterpret_cast<Address>(buffer_->start());
- for (auto p : saved_handles_for_raw_object_ptr_) {
- WriteUnalignedValue(base + p.first, p.second);
- }
- for (auto p : saved_offsets_for_runtime_entries_) {
- WriteUnalignedValue<uint32_t>(base + p.first, p.second);
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
}
}
@@ -4302,7 +4332,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
emitl(data);
@@ -4311,7 +4342,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
if (!RelocInfo::IsNone(rmode)) {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
emitq(data);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 6c64f8ded9..c3d3af100b 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -421,6 +421,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
}
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
void FinalizeJumpOptimizationInfo();
// Unused on this architecture.
@@ -551,6 +560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
+ void LoopHeaderAlign();
// Stack
void pushfq();
@@ -1854,8 +1864,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
@@ -1867,6 +1877,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void dq(Label* label);
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return *reinterpret_cast<uint64_t*>(buffer_->start() + pc_offset) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
// Patch entries for partial constant pool.
void PatchConstPool();
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index e670da113e..5a8dc356b8 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -21,6 +21,7 @@
#include "src/common/external-pointer.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
@@ -46,7 +47,7 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && options().enable_root_array_delta_access) {
+ if (root_array_available_ && options().enable_root_relative_access) {
intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
if (is_int32(delta)) {
movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
@@ -62,7 +63,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
}
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && options().enable_root_array_delta_access) {
+ if (root_array_available_ && options().enable_root_relative_access) {
intptr_t delta =
RootRegisterOffsetForExternalReference(isolate(), destination);
if (is_int32(delta)) {
@@ -103,7 +104,7 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && options().enable_root_array_delta_access) {
+ if (root_array_available_ && options().enable_root_relative_access) {
intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
if (is_int32(delta)) {
leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
@@ -123,7 +124,7 @@ void TurboAssembler::LoadAddress(Register destination,
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
- if (root_array_available_ && options().enable_root_array_delta_access) {
+ if (root_array_available_ && options().enable_root_relative_access) {
int64_t delta =
RootRegisterOffsetForExternalReference(isolate(), reference);
if (is_int32(delta)) {
@@ -155,26 +156,29 @@ void MacroAssembler::PushAddress(ExternalReference source) {
Push(kScratchRegister);
}
+Operand TurboAssembler::RootAsOperand(RootIndex index) {
+ DCHECK(root_array_available());
+ return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
+}
+
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
DCHECK(root_array_available_);
- movq(destination,
- Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ movq(destination, RootAsOperand(index));
}
void MacroAssembler::PushRoot(RootIndex index) {
DCHECK(root_array_available_);
- Push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ Push(RootAsOperand(index));
}
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
DCHECK(root_array_available_);
if (base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
RootIndex::kLastStrongOrReadOnlyRoot)) {
- cmp_tagged(with,
- Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ cmp_tagged(with, RootAsOperand(index));
} else {
// Some smi roots contain system pointer size values like stack limits.
- cmpq(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ cmpq(with, RootAsOperand(index));
}
}
@@ -1192,7 +1196,7 @@ Register TurboAssembler::GetSmiConstant(Smi source) {
return kScratchRegister;
}
-void MacroAssembler::Cmp(Register dst, int32_t src) {
+void TurboAssembler::Cmp(Register dst, int32_t src) {
if (src == 0) {
testl(dst, dst);
} else {
@@ -1200,7 +1204,7 @@ void MacroAssembler::Cmp(Register dst, int32_t src) {
}
}
-void MacroAssembler::SmiTag(Register reg) {
+void TurboAssembler::SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (COMPRESS_POINTERS_BOOL) {
@@ -1210,7 +1214,7 @@ void MacroAssembler::SmiTag(Register reg) {
}
}
-void MacroAssembler::SmiTag(Register dst, Register src) {
+void TurboAssembler::SmiTag(Register dst, Register src) {
DCHECK(dst != src);
if (COMPRESS_POINTERS_BOOL) {
movl(dst, src);
@@ -1261,18 +1265,18 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
}
}
-void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
+void TurboAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
cmp_tagged(smi1, smi2);
}
-void MacroAssembler::SmiCompare(Register dst, Smi src) {
+void TurboAssembler::SmiCompare(Register dst, Smi src) {
AssertSmi(dst);
Cmp(dst, src);
}
-void MacroAssembler::Cmp(Register dst, Smi src) {
+void TurboAssembler::Cmp(Register dst, Smi src) {
if (src.value() == 0) {
test_tagged(dst, dst);
} else {
@@ -1282,19 +1286,19 @@ void MacroAssembler::Cmp(Register dst, Smi src) {
}
}
-void MacroAssembler::SmiCompare(Register dst, Operand src) {
+void TurboAssembler::SmiCompare(Register dst, Operand src) {
AssertSmi(dst);
AssertSmi(src);
cmp_tagged(dst, src);
}
-void MacroAssembler::SmiCompare(Operand dst, Register src) {
+void TurboAssembler::SmiCompare(Operand dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
cmp_tagged(dst, src);
}
-void MacroAssembler::SmiCompare(Operand dst, Smi src) {
+void TurboAssembler::SmiCompare(Operand dst, Smi src) {
AssertSmi(dst);
if (SmiValuesAre32Bits()) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value()));
@@ -1304,7 +1308,7 @@ void MacroAssembler::SmiCompare(Operand dst, Smi src) {
}
}
-void MacroAssembler::Cmp(Operand dst, Smi src) {
+void TurboAssembler::Cmp(Operand dst, Smi src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
DCHECK(!dst.AddressUsesRegister(smi_reg));
@@ -1329,19 +1333,19 @@ void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
j(smi, on_smi, near_jump);
}
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
+void TurboAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
+void TurboAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
+void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) {
if (constant.value() != 0) {
if (SmiValuesAre32Bits()) {
addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value()));
@@ -1361,7 +1365,7 @@ void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
}
}
-SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
+SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) {
if (SmiValuesAre32Bits()) {
DCHECK(is_uint6(shift));
// There is a possible optimization if shift is in the range 60-63, but that
@@ -1667,6 +1671,65 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements,
PushReturnAddressFrom(scratch);
}
+void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ int receiver_bytes =
+ (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
+ switch (type) {
+ case kCountIsInteger: {
+ leaq(rsp, Operand(rsp, count, times_system_pointer_size, receiver_bytes));
+ break;
+ }
+ case kCountIsSmi: {
+ SmiIndex index = SmiToIndex(count, count, kSystemPointerSizeLog2);
+ leaq(rsp, Operand(rsp, index.reg, index.scale, receiver_bytes));
+ break;
+ }
+ case kCountIsBytes: {
+ if (receiver_bytes == 0) {
+ addq(rsp, count);
+ } else {
+ leaq(rsp, Operand(rsp, count, times_1, receiver_bytes));
+ }
+ break;
+ }
+ }
+}
+
+void TurboAssembler::DropArguments(Register count, Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(count, scratch));
+ PopReturnAddressTo(scratch);
+ DropArguments(count, type, mode);
+ PushReturnAddressFrom(scratch);
+}
+
+void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+ Register receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(argc, receiver, scratch));
+ PopReturnAddressTo(scratch);
+ DropArguments(argc, type, mode);
+ Push(receiver);
+ PushReturnAddressFrom(scratch);
+}
+
+void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+ Operand receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode) {
+ DCHECK(!AreAliased(argc, scratch));
+ DCHECK(!receiver.AddressUsesRegister(scratch));
+ PopReturnAddressTo(scratch);
+ DropArguments(argc, type, mode);
+ Push(receiver);
+ PushReturnAddressFrom(scratch);
+}
+
void TurboAssembler::Push(Register src) { pushq(src); }
void TurboAssembler::Push(Operand src) { pushq(src); }
@@ -2694,21 +2757,21 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
-void MacroAssembler::AssertSmi(Operand object) {
+void TurboAssembler::AssertSmi(Operand object) {
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
@@ -2859,55 +2922,6 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
}
}
-void TurboAssembler::PrepareForTailCall(Register callee_args_count,
- Register caller_args_count,
- Register scratch0, Register scratch1) {
- ASM_CODE_COMMENT(this);
- DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
-
- // Calculate the destination address where we will put the return address
- // after we drop current frame.
- Register new_sp_reg = scratch0;
- subq(caller_args_count, callee_args_count);
- leaq(new_sp_reg, Operand(rbp, caller_args_count, times_system_pointer_size,
- StandardFrameConstants::kCallerPCOffset));
-
- if (FLAG_debug_code) {
- cmpq(rsp, new_sp_reg);
- Check(below, AbortReason::kStackAccessBelowStackPointer);
- }
-
- // Copy return address from caller's frame to current frame's return address
- // to avoid its trashing and let the following loop copy it to the right
- // place.
- Register tmp_reg = scratch1;
- movq(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- movq(Operand(rsp, 0), tmp_reg);
-
- // Restore caller's frame pointer now as it could be overwritten by
- // the copying loop.
- movq(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // +2 here is to copy both receiver and return address.
- Register count_reg = caller_args_count;
- leaq(count_reg, Operand(callee_args_count, 2));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
- Label loop, entry;
- jmp(&entry, Label::kNear);
- bind(&loop);
- decq(count_reg);
- movq(tmp_reg, Operand(rsp, count_reg, times_system_pointer_size, 0));
- movq(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg);
- bind(&entry);
- cmpq(count_reg, Immediate(0));
- j(not_equal, &loop, Label::kNear);
-
- // Leave current frame.
- movq(rsp, new_sp_reg);
-}
-
void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
InvokeType type) {
@@ -3000,23 +3014,22 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
}
void MacroAssembler::StackOverflowCheck(
- Register num_args, Register scratch, Label* stack_overflow,
+ Register num_args, Label* stack_overflow,
Label::Distance stack_overflow_distance) {
ASM_CODE_COMMENT(this);
- DCHECK_NE(num_args, scratch);
+ DCHECK_NE(num_args, kScratchRegister);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- movq(kScratchRegister, StackLimitAsOperand(StackLimitKind::kRealStackLimit));
- movq(scratch, rsp);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- subq(scratch, kScratchRegister);
+ movq(kScratchRegister, rsp);
+ // Make kScratchRegister the space we have left. The stack might already be
+ // overflowed here which will cause kScratchRegister to become negative.
+ subq(kScratchRegister, StackLimitAsOperand(StackLimitKind::kRealStackLimit));
// TODO(victorgomes): Use ia32 approach with leaq, since it requires less
// instructions.
- sarq(scratch, Immediate(kSystemPointerSizeLog2));
+ sarq(kScratchRegister, Immediate(kSystemPointerSizeLog2));
// Check if the arguments will overflow the stack.
- cmpq(scratch, num_args);
+ cmpq(kScratchRegister, num_args);
// Signed comparison.
// TODO(victorgomes): Save some bytes in the builtins that use stack checks
// by jumping to a builtin that throws the exception.
@@ -3043,7 +3056,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
j(less_equal, &regular_invoke, Label::kFar);
Label stack_overflow;
- StackOverflowCheck(expected_parameter_count, rcx, &stack_overflow);
+ StackOverflowCheck(expected_parameter_count, &stack_overflow);
// Underapplication. Move the arguments already in the stack, including the
// receiver and the return address.
@@ -3147,6 +3160,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
if (!StackFrame::IsJavaScript(type)) {
Push(Immediate(StackFrame::TypeToMarker(type)));
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index c303eed9e2..02b9eb410e 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -123,6 +123,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Ret(int bytes_dropped, Register scratch);
// Operations on roots in the root-array.
+ Operand RootAsOperand(RootIndex index);
void LoadRoot(Register destination, RootIndex index) final;
void LoadRoot(Operand destination, RootIndex index) {
LoadRoot(kScratchRegister, index);
@@ -224,14 +225,74 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Popcntq(Register dst, Register src);
void Popcntq(Register dst, Operand src);
- // Is the value a tagged smi.
+ void Cmp(Register dst, Smi src);
+ void Cmp(Operand dst, Smi src);
+ void Cmp(Register dst, int32_t src);
+
+ // ---------------------------------------------------------------------------
+ // Conversions between tagged smi values and non-tagged integer values.
+
+ // Tag an word-size value. The result must be known to be a valid smi value.
+ void SmiTag(Register reg);
+ // Requires dst != src
+ void SmiTag(Register dst, Register src);
+
+ // Simple comparison of smis. Both sides must be known smis to use these,
+ // otherwise use Cmp.
+ void SmiCompare(Register smi1, Register smi2);
+ void SmiCompare(Register dst, Smi src);
+ void SmiCompare(Register dst, Operand src);
+ void SmiCompare(Operand dst, Register src);
+ void SmiCompare(Operand dst, Smi src);
+
+ // Functions performing a check on a known or potential smi. Returns
+ // a condition that is satisfied if the check is successful.
Condition CheckSmi(Register src);
Condition CheckSmi(Operand src);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
+ void AssertSmi(Operand object);
+
+ // Test-and-jump functions. Typically combines a check function
+ // above with a conditional jump.
+
// Jump to label if the value is a tagged smi.
void JumpIfSmi(Register src, Label* on_smi,
Label::Distance near_jump = Label::kFar);
+ // Jump to label if the value is not a tagged smi.
+ void JumpIfNotSmi(Register src, Label* on_not_smi,
+ Label::Distance near_jump = Label::kFar);
+
+ // Jump to label if the value is not a tagged smi.
+ void JumpIfNotSmi(Operand src, Label* on_not_smi,
+ Label::Distance near_jump = Label::kFar);
+
+ // Operations on tagged smi values.
+
+ // Smis represent a subset of integers. The subset is always equivalent to
+ // a two's complement interpretation of a fixed number of bits.
+
+ // Add an integer constant to a tagged smi, giving a tagged smi as result.
+ // No overflow testing on the result is done.
+ void SmiAddConstant(Operand dst, Smi constant);
+
+ // Specialized operations
+
+ // Converts, if necessary, a smi to a combination of number and
+ // multiplier to be used as a scaled index.
+ // The src register contains a *positive* smi value. The shift is the
+ // power of two to multiply the index value by (e.g. to index by
+ // smi-value * kSystemPointerSize, pass the smi and kSystemPointerSizeLog2).
+ // The returned index register may be either src or dst, depending
+ // on what is most efficient. If src and dst are different registers,
+ // src is always unchanged.
+ SmiIndex SmiToIndex(Register dst, Register src, int shift);
+
void JumpIfEqual(Register a, int32_t b, Label* dest) {
cmpl(a, Immediate(b));
j(equal, dest);
@@ -449,6 +510,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void StubPrologue(StackFrame::Type type);
void Prologue();
+ // Helpers for argument handling
+ enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
+ enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
+ void DropArguments(Register count, Register scratch, ArgumentsCountType type,
+ ArgumentsCountMode mode);
+ void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode);
+ void DropArgumentsAndPushNewReceiver(Register argc, Operand receiver,
+ Register scratch,
+ ArgumentsCountType type,
+ ArgumentsCountMode mode);
+
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, AbortReason reason);
@@ -495,14 +570,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
}
#endif
- // Removes current frame and its arguments from the stack preserving the
- // arguments and a return address pushed to the stack for the next call. Both
- // |callee_args_count| and |caller_args_count| do not include receiver.
- // |callee_args_count| is not modified. |caller_args_count| is trashed.
- void PrepareForTailCall(Register callee_args_count,
- Register caller_args_count, Register scratch0,
- Register scratch1);
-
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, isolate_root);
@@ -633,6 +700,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi value);
+
+ // Drops arguments assuming that the return address was already popped.
+ void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
+ ArgumentsCountMode mode = kCountExcludesReceiver);
};
// MacroAssembler implements a collection of frequently used macros.
@@ -754,64 +825,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, InvokeType type);
// ---------------------------------------------------------------------------
- // Conversions between tagged smi values and non-tagged integer values.
-
- // Tag an word-size value. The result must be known to be a valid smi value.
- void SmiTag(Register reg);
- // Requires dst != src
- void SmiTag(Register dst, Register src);
-
- // Simple comparison of smis. Both sides must be known smis to use these,
- // otherwise use Cmp.
- void SmiCompare(Register smi1, Register smi2);
- void SmiCompare(Register dst, Smi src);
- void SmiCompare(Register dst, Operand src);
- void SmiCompare(Operand dst, Register src);
- void SmiCompare(Operand dst, Smi src);
-
- // Functions performing a check on a known or potential smi. Returns
- // a condition that is satisfied if the check is successful.
-
- // Test-and-jump functions. Typically combines a check function
- // above with a conditional jump.
-
- // Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Register src, Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Operand src, Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Operations on tagged smi values.
-
- // Smis represent a subset of integers. The subset is always equivalent to
- // a two's complement interpretation of a fixed number of bits.
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
- void SmiAddConstant(Operand dst, Smi constant);
-
- // Specialized operations
-
- // Converts, if necessary, a smi to a combination of number and
- // multiplier to be used as a scaled index.
- // The src register contains a *positive* smi value. The shift is the
- // power of two to multiply the index value by (e.g. to index by
- // smi-value * kSystemPointerSize, pass the smi and kSystemPointerSizeLog2).
- // The returned index register may be either src or dst, depending
- // on what is most efficient. If src and dst are different registers,
- // src is always unchanged.
- SmiIndex SmiToIndex(Register dst, Register src, int shift);
-
- // ---------------------------------------------------------------------------
// Macro instructions.
+ using TurboAssembler::Cmp;
void Cmp(Register dst, Handle<Object> source);
void Cmp(Operand dst, Handle<Object> source);
- void Cmp(Register dst, Smi src);
- void Cmp(Operand dst, Smi src);
- void Cmp(Register dst, int32_t src);
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
@@ -827,7 +845,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// clobbering the rsp register.
void DropUnderReturnAddress(int stack_elements,
Register scratch = kScratchRegister);
-
void PushQuad(Operand src);
void PushImm32(int32_t imm32);
void Pop(Register dst);
@@ -865,13 +882,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
andq(reg, Immediate(mask));
}
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
-
- // Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object);
- void AssertSmi(Operand object);
-
// Abort execution if argument is not a CodeT, enabled via --debug-code.
void AssertCodeT(Register object);
@@ -957,7 +967,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Stack limit utilities
Operand StackLimitAsOperand(StackLimitKind kind);
void StackOverflowCheck(
- Register num_args, Register scratch, Label* stack_overflow,
+ Register num_args, Label* stack_overflow,
Label::Distance stack_overflow_distance = Label::kFar);
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 9a06221e82..6aee59eb83 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -14,6 +14,7 @@
#include "include/v8-internal.h"
#include "src/base/atomic-utils.h"
#include "src/base/build_config.h"
+#include "src/base/enum-set.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -750,11 +751,15 @@ struct SlotTraits {
using TMaybeObjectSlot = CompressedMaybeObjectSlot;
using THeapObjectSlot = CompressedHeapObjectSlot;
using TOffHeapObjectSlot = OffHeapCompressedObjectSlot;
+ // TODO(v8:11880): switch to OffHeapCompressedObjectSlot.
+ using TCodeObjectSlot = CompressedObjectSlot;
#else
using TObjectSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
using TOffHeapObjectSlot = OffHeapFullObjectSlot;
+ // TODO(v8:11880): switch to OffHeapFullObjectSlot.
+ using TCodeObjectSlot = FullObjectSlot;
#endif
};
@@ -776,6 +781,12 @@ using HeapObjectSlot = SlotTraits::THeapObjectSlot;
// off-heap.
using OffHeapObjectSlot = SlotTraits::TOffHeapObjectSlot;
+// A CodeObjectSlot instance describes a kTaggedSize-sized field ("slot")
+// holding a strong pointer to a Code object. The Code object slots might be
+// compressed and since code space might be allocated off the main heap
+// the load operations require explicit cage base value for code space.
+using CodeObjectSlot = SlotTraits::TCodeObjectSlot;
+
using WeakSlotCallback = bool (*)(FullObjectSlot pointer);
using WeakSlotCallbackWithHeap = bool (*)(Heap* heap, FullObjectSlot pointer);
@@ -869,12 +880,28 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum class BytecodeFlushMode {
- kDoNotFlushBytecode,
+enum class CodeFlushMode {
kFlushBytecode,
- kStressFlushBytecode,
+ kFlushBaselineCode,
+ kStressFlushCode,
};
+bool inline IsBaselineCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
+ return mode.contains(CodeFlushMode::kFlushBaselineCode);
+}
+
+bool inline IsByteCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
+ return mode.contains(CodeFlushMode::kFlushBytecode);
+}
+
+bool inline IsStressFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
+ return mode.contains(CodeFlushMode::kStressFlushCode);
+}
+
+bool inline IsFlushingDisabled(base::EnumSet<CodeFlushMode> mode) {
+ return mode.empty();
+}
+
// Indicates whether a script should be parsed and compiled in REPL mode.
enum class REPLMode {
kYes,
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
index 6b3eadf801..f08a549385 100644
--- a/deps/v8/src/compiler-dispatcher/OWNERS
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -1,3 +1,4 @@
jkummerow@chromium.org
leszeks@chromium.org
rmcilroy@chromium.org
+victorgomes@chromium.org
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
index ab7d5086c1..e57463d404 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/ast/ast.h"
#include "src/base/platform/time.h"
@@ -21,13 +21,14 @@
namespace v8 {
namespace internal {
-CompilerDispatcher::Job::Job(BackgroundCompileTask* task_arg)
+LazyCompileDispatcher::Job::Job(BackgroundCompileTask* task_arg)
: task(task_arg), has_run(false), aborted(false) {}
-CompilerDispatcher::Job::~Job() = default;
+LazyCompileDispatcher::Job::~Job() = default;
-CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
- size_t max_stack_size)
+LazyCompileDispatcher::LazyCompileDispatcher(Isolate* isolate,
+ Platform* platform,
+ size_t max_stack_size)
: isolate_(isolate),
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
@@ -47,20 +48,20 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
block_for_testing_(false),
semaphore_for_testing_(0) {
if (trace_compiler_dispatcher_ && !IsEnabled()) {
- PrintF("CompilerDispatcher: dispatcher is disabled\n");
+ PrintF("LazyCompileDispatcher: dispatcher is disabled\n");
}
}
-CompilerDispatcher::~CompilerDispatcher() {
- // AbortAll must be called before CompilerDispatcher is destroyed.
+LazyCompileDispatcher::~LazyCompileDispatcher() {
+ // AbortAll must be called before LazyCompileDispatcher is destroyed.
CHECK(task_manager_->canceled());
}
-base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
+base::Optional<LazyCompileDispatcher::JobId> LazyCompileDispatcher::Enqueue(
const ParseInfo* outer_parse_info, const AstRawString* function_name,
const FunctionLiteral* function_literal) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherEnqueue");
+ "V8.LazyCompilerDispatcherEnqueue");
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
if (!IsEnabled()) return base::nullopt;
@@ -72,8 +73,9 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
JobMap::const_iterator it = InsertJob(std::move(job));
JobId id = it->first;
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: enqueued job %zu for function literal id %d\n",
- id, function_literal->function_literal_id());
+ PrintF(
+ "LazyCompileDispatcher: enqueued job %zu for function literal id %d\n",
+ id, function_literal->function_literal_id());
}
// Post a a background worker task to perform the compilation on the worker
@@ -86,23 +88,26 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
return base::make_optional(id);
}
-bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
+bool LazyCompileDispatcher::IsEnabled() const {
+ return FLAG_lazy_compile_dispatcher;
+}
-bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
+bool LazyCompileDispatcher::IsEnqueued(
+ Handle<SharedFunctionInfo> function) const {
if (jobs_.empty()) return false;
return GetJobFor(function) != jobs_.end();
}
-bool CompilerDispatcher::IsEnqueued(JobId job_id) const {
+bool LazyCompileDispatcher::IsEnqueued(JobId job_id) const {
return jobs_.find(job_id) != jobs_.end();
}
-void CompilerDispatcher::RegisterSharedFunctionInfo(
+void LazyCompileDispatcher::RegisterSharedFunctionInfo(
JobId job_id, SharedFunctionInfo function) {
DCHECK_NE(jobs_.find(job_id), jobs_.end());
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: registering ");
+ PrintF("LazyCompileDispatcher: registering ");
function.ShortPrint();
PrintF(" with job id %zu\n", job_id);
}
@@ -127,9 +132,9 @@ void CompilerDispatcher::RegisterSharedFunctionInfo(
}
}
-void CompilerDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
+void LazyCompileDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherWaitForBackgroundJob");
+ "V8.LazyCompilerDispatcherWaitForBackgroundJob");
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
base::MutexGuard lock(&mutex_);
@@ -146,12 +151,12 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
}
-bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
+bool LazyCompileDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherFinishNow");
+ "V8.LazyCompilerDispatcherFinishNow");
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileFinishNowOnDispatcher);
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: finishing ");
+ PrintF("LazyCompileDispatcher: finishing ");
function->ShortPrint();
PrintF(" now\n");
}
@@ -176,9 +181,9 @@ bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
return success;
}
-void CompilerDispatcher::AbortJob(JobId job_id) {
+void LazyCompileDispatcher::AbortJob(JobId job_id) {
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: aborted job %zu\n", job_id);
+ PrintF("LazyCompileDispatcher: aborted job %zu\n", job_id);
}
JobMap::const_iterator job_it = jobs_.find(job_id);
Job* job = job_it->second.get();
@@ -194,13 +199,13 @@ void CompilerDispatcher::AbortJob(JobId job_id) {
}
}
-void CompilerDispatcher::AbortAll() {
+void LazyCompileDispatcher::AbortAll() {
task_manager_->TryAbortAll();
for (auto& it : jobs_) {
WaitForJobIfRunningOnBackground(it.second.get());
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: aborted job %zu\n", it.first);
+ PrintF("LazyCompileDispatcher: aborted job %zu\n", it.first);
}
}
jobs_.clear();
@@ -214,7 +219,7 @@ void CompilerDispatcher::AbortAll() {
task_manager_->CancelAndWait();
}
-CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
+LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::GetJobFor(
Handle<SharedFunctionInfo> shared) const {
JobId* job_id_ptr = shared_to_unoptimized_job_id_.Find(shared);
JobMap::const_iterator job = jobs_.end();
@@ -224,7 +229,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
return job;
}
-void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
+void LazyCompileDispatcher::ScheduleIdleTaskFromAnyThread(
const base::MutexGuard&) {
if (!taskrunner_->IdleTasksEnabled()) return;
if (idle_task_scheduled_) return;
@@ -235,9 +240,9 @@ void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
[this](double deadline_in_seconds) { DoIdleWork(deadline_in_seconds); }));
}
-void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
+void LazyCompileDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
+ "V8.LazyCompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
{
base::MutexGuard lock(&mutex_);
if (pending_background_jobs_.empty()) return;
@@ -250,9 +255,9 @@ void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
MakeCancelableTask(task_manager_.get(), [this] { DoBackgroundWork(); }));
}
-void CompilerDispatcher::DoBackgroundWork() {
+void LazyCompileDispatcher::DoBackgroundWork() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherDoBackgroundWork");
+ "V8.LazyCompilerDispatcherDoBackgroundWork");
for (;;) {
Job* job = nullptr;
{
@@ -272,7 +277,7 @@ void CompilerDispatcher::DoBackgroundWork() {
}
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: doing background work\n");
+ PrintF("LazyCompileDispatcher: doing background work\n");
}
job->task->Run();
@@ -303,22 +308,22 @@ void CompilerDispatcher::DoBackgroundWork() {
// deleted.
}
-void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
+void LazyCompileDispatcher::DoIdleWork(double deadline_in_seconds) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherDoIdleWork");
+ "V8.LazyCompilerDispatcherDoIdleWork");
{
base::MutexGuard lock(&mutex_);
idle_task_scheduled_ = false;
}
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
+ PrintF("LazyCompileDispatcher: received %0.1lfms of idle time\n",
(deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) *
static_cast<double>(base::Time::kMillisecondsPerSecond));
}
while (deadline_in_seconds > platform_->MonotonicallyIncreasingTime()) {
// Find a job which is pending finalization and has a shared function info
- CompilerDispatcher::JobMap::const_iterator it;
+ LazyCompileDispatcher::JobMap::const_iterator it;
{
base::MutexGuard lock(&mutex_);
for (it = jobs_.cbegin(); it != jobs_.cend(); ++it) {
@@ -351,7 +356,7 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
}
}
-CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
+LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::InsertJob(
std::unique_ptr<Job> job) {
bool added;
JobMap::const_iterator it;
@@ -361,8 +366,8 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
return it;
}
-CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
- CompilerDispatcher::JobMap::const_iterator it) {
+LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::RemoveJob(
+ LazyCompileDispatcher::JobMap::const_iterator it) {
Job* job = it->second.get();
DCHECK_EQ(running_background_jobs_.find(job), running_background_jobs_.end());
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
index 501940314c..9690e2e6ff 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
-#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
+#ifndef V8_COMPILER_DISPATCHER_LAZY_COMPILE_DISPATCHER_H_
+#define V8_COMPILER_DISPATCHER_LAZY_COMPILE_DISPATCHER_H_
#include <cstdint>
#include <map>
@@ -34,7 +34,6 @@ class AstValueFactory;
class BackgroundCompileTask;
class CancelableTaskManager;
class UnoptimizedCompileJob;
-class CompilerDispatcherTracer;
class FunctionLiteral;
class Isolate;
class ParseInfo;
@@ -46,8 +45,8 @@ class Zone;
template <typename T>
class Handle;
-// The CompilerDispatcher uses a combination of idle tasks and background tasks
-// to parse and compile lazily parsed functions.
+// The LazyCompileDispatcher uses a combination of idle tasks and background
+// tasks to parse and compile lazily parsed functions.
//
// As both parsing and compilation currently requires a preparation and
// finalization step that happens on the main thread, every task has to be
@@ -55,32 +54,32 @@ class Handle;
// can then be parsed or compiled on either background threads, or during idle
// time. Last, it has to be finalized during idle time again.
//
-// CompilerDispatcher::jobs_ maintains the list of all CompilerDispatcherJobs
-// the CompilerDispatcher knows about.
+// LazyCompileDispatcher::jobs_ maintains the list of all
+// LazyCompilerDispatcherJobs the LazyCompileDispatcher knows about.
//
-// CompilerDispatcher::pending_background_jobs_ contains the set of
-// CompilerDispatcherJobs that can be processed on a background thread.
+// LazyCompileDispatcher::pending_background_jobs_ contains the set of
+// LazyCompilerDispatcherJobs that can be processed on a background thread.
//
-// CompilerDispatcher::running_background_jobs_ contains the set of
-// CompilerDispatcherJobs that are currently being processed on a background
+// LazyCompileDispatcher::running_background_jobs_ contains the set of
+// LazyCompilerDispatcherJobs that are currently being processed on a background
// thread.
//
-// CompilerDispatcher::DoIdleWork tries to advance as many jobs out of jobs_ as
-// possible during idle time. If a job can't be advanced, but is suitable for
+// LazyCompileDispatcher::DoIdleWork tries to advance as many jobs out of jobs_
+// as possible during idle time. If a job can't be advanced, but is suitable for
// background processing, it fires off background threads.
//
-// CompilerDispatcher::DoBackgroundWork advances one of the pending jobs, and
-// then spins of another idle task to potentially do the final step on the main
-// thread.
-class V8_EXPORT_PRIVATE CompilerDispatcher {
+// LazyCompileDispatcher::DoBackgroundWork advances one of the pending jobs,
+// and then spins of another idle task to potentially do the final step on the
+// main thread.
+class V8_EXPORT_PRIVATE LazyCompileDispatcher {
public:
using JobId = uintptr_t;
- CompilerDispatcher(Isolate* isolate, Platform* platform,
- size_t max_stack_size);
- CompilerDispatcher(const CompilerDispatcher&) = delete;
- CompilerDispatcher& operator=(const CompilerDispatcher&) = delete;
- ~CompilerDispatcher();
+ LazyCompileDispatcher(Isolate* isolate, Platform* platform,
+ size_t max_stack_size);
+ LazyCompileDispatcher(const LazyCompileDispatcher&) = delete;
+ LazyCompileDispatcher& operator=(const LazyCompileDispatcher&) = delete;
+ ~LazyCompileDispatcher();
// Returns true if the compiler dispatcher is enabled.
bool IsEnabled() const;
@@ -109,14 +108,14 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
void AbortAll();
private:
- FRIEND_TEST(CompilerDispatcherTest, IdleTaskNoIdleTime);
- FRIEND_TEST(CompilerDispatcherTest, IdleTaskSmallIdleTime);
- FRIEND_TEST(CompilerDispatcherTest, FinishNowWithWorkerTask);
- FRIEND_TEST(CompilerDispatcherTest, AbortJobNotStarted);
- FRIEND_TEST(CompilerDispatcherTest, AbortJobAlreadyStarted);
- FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask);
- FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
- FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread);
+ FRIEND_TEST(LazyCompilerDispatcherTest, IdleTaskNoIdleTime);
+ FRIEND_TEST(LazyCompilerDispatcherTest, IdleTaskSmallIdleTime);
+ FRIEND_TEST(LazyCompilerDispatcherTest, FinishNowWithWorkerTask);
+ FRIEND_TEST(LazyCompilerDispatcherTest, AbortJobNotStarted);
+ FRIEND_TEST(LazyCompilerDispatcherTest, AbortJobAlreadyStarted);
+ FRIEND_TEST(LazyCompilerDispatcherTest, AsyncAbortAllPendingWorkerTask);
+ FRIEND_TEST(LazyCompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
+ FRIEND_TEST(LazyCompilerDispatcherTest, CompileMultipleOnBackgroundThread);
struct Job {
explicit Job(BackgroundCompileTask* task_arg);
@@ -202,4 +201,4 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
+#endif // V8_COMPILER_DISPATCHER_LAZY_COMPILE_DISPATCHER_H_
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index ac395aac8a..f8a7fa8814 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -131,7 +131,7 @@ void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
output_queue_.push(job);
}
- isolate_->stack_guard()->RequestInstallCode();
+ if (finalize()) isolate_->stack_guard()->RequestInstallCode();
}
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
@@ -159,6 +159,18 @@ void OptimizingCompileDispatcher::FlushInputQueue() {
}
}
+void OptimizingCompileDispatcher::AwaitCompileTasks() {
+ {
+ base::MutexGuard lock_guard(&ref_count_mutex_);
+ while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
+ }
+
+#ifdef DEBUG
+ base::MutexGuard access_input_queue(&input_queue_mutex_);
+ CHECK_EQ(input_queue_length_, 0);
+#endif // DEBUG
+}
+
void OptimizingCompileDispatcher::FlushQueues(
BlockingBehavior blocking_behavior, bool restore_function_code) {
if (FLAG_block_concurrent_recompilation) Unblock();
@@ -219,7 +231,7 @@ bool OptimizingCompileDispatcher::HasJobs() {
// Note: This relies on {output_queue_} being mutated by a background thread
// only when {ref_count_} is not zero. Also, {ref_count_} is never incremented
// by a background thread.
- return !(ref_count_ == 0 && output_queue_.empty());
+ return ref_count_ != 0 || !output_queue_.empty() || blocked_jobs_ != 0;
}
void OptimizingCompileDispatcher::QueueForOptimization(
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 4ae966192f..56592ed9b4 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// Takes ownership of |job|.
void QueueForOptimization(OptimizedCompilationJob* job);
void Unblock();
+ void AwaitCompileTasks();
void InstallOptimizedFunctions();
inline bool IsQueueAvailable() {
@@ -55,6 +56,15 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// This method must be called on the main thread.
bool HasJobs();
+ // Whether to finalize and thus install the optimized code. Defaults to true.
+ // Only set to false for testing (where finalization is then manually
+ // requested using %FinalizeOptimization).
+ bool finalize() const { return finalize_; }
+ void set_finalize(bool finalize) {
+ CHECK(!HasJobs());
+ finalize_ = finalize;
+ }
+
private:
class CompileTask;
@@ -101,6 +111,8 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// Since flags might get modified while the background thread is running, it
// is not safe to access them directly.
int recompilation_delay_;
+
+ bool finalize_ = true;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index b9e9293235..21f453f4d8 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -28,7 +28,7 @@ namespace compiler {
namespace {
-bool CanInlinePropertyAccess(Handle<Map> map, AccessMode access_mode) {
+bool CanInlinePropertyAccess(MapRef map, AccessMode access_mode) {
// We can inline property access to prototypes of all primitives, except
// the special Oddball ones that have no wrapper counterparts (i.e. Null,
// Undefined and TheHole).
@@ -37,16 +37,17 @@ bool CanInlinePropertyAccess(Handle<Map> map, AccessMode access_mode) {
// relationship between the map and the object (and therefore the property
// dictionary).
STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
- if (map->IsBooleanMap()) return true;
- if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true;
- if (map->IsJSObjectMap()) {
- if (map->is_dictionary_map()) {
+ if (map.object()->IsBooleanMap()) return true;
+ if (map.instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true;
+ if (map.object()->IsJSObjectMap()) {
+ if (map.is_dictionary_map()) {
if (!V8_DICT_PROPERTY_CONST_TRACKING_BOOL) return false;
- return access_mode == AccessMode::kLoad && map->is_prototype_map();
+ return access_mode == AccessMode::kLoad &&
+ map.object()->is_prototype_map();
}
- return !map->has_named_interceptor() &&
+ return !map.object()->has_named_interceptor() &&
// TODO(verwaest): Allowlist contexts to which we have access.
- !map->is_access_check_needed();
+ !map.is_access_check_needed();
}
return false;
}
@@ -82,8 +83,8 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
}
ElementAccessInfo::ElementAccessInfo(
- ZoneVector<Handle<Map>>&& lookup_start_object_maps,
- ElementsKind elements_kind, Zone* zone)
+ ZoneVector<MapRef>&& lookup_start_object_maps, ElementsKind elements_kind,
+ Zone* zone)
: elements_kind_(elements_kind),
lookup_start_object_maps_(lookup_start_object_maps),
transition_sources_(zone) {
@@ -96,22 +97,25 @@ PropertyAccessInfo PropertyAccessInfo::Invalid(Zone* zone) {
}
// static
-PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone,
- Handle<Map> receiver_map,
- MaybeHandle<JSObject> holder) {
+PropertyAccessInfo PropertyAccessInfo::NotFound(
+ Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder) {
return PropertyAccessInfo(zone, kNotFound, holder, {{receiver_map}, zone});
}
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- Zone* zone, Handle<Map> receiver_map,
+ Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
+ Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
+ base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
DCHECK_IMPLIES(
field_representation.IsDouble(),
- HasFieldRepresentationDependenciesOnMap(dependencies, field_owner_map));
+ HasFieldRepresentationDependenciesOnMap(
+ dependencies, transition_map.has_value()
+ ? transition_map->object()
+ : holder.has_value() ? holder->map().object()
+ : receiver_map.object()));
return PropertyAccessInfo(kDataField, holder, transition_map, field_index,
field_representation, field_type, field_owner_map,
field_map, {{receiver_map}, zone},
@@ -120,11 +124,11 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
// static
PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
- Zone* zone, Handle<Map> receiver_map,
+ Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
+ Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
+ base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
return PropertyAccessInfo(kFastDataConstant, holder, transition_map,
field_index, field_representation, field_type,
field_owner_map, field_map, {{receiver_map}, zone},
@@ -133,39 +137,38 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
// static
PropertyAccessInfo PropertyAccessInfo::FastAccessorConstant(
- Zone* zone, Handle<Map> receiver_map, Handle<Object> constant,
- MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(zone, kFastAccessorConstant, holder, constant,
- MaybeHandle<Name>(), {{receiver_map}, zone});
+ Zone* zone, MapRef receiver_map, base::Optional<ObjectRef> constant,
+ base::Optional<JSObjectRef> holder) {
+ return PropertyAccessInfo(zone, kFastAccessorConstant, holder, constant, {},
+ {{receiver_map}, zone});
}
// static
PropertyAccessInfo PropertyAccessInfo::ModuleExport(Zone* zone,
- Handle<Map> receiver_map,
- Handle<Cell> cell) {
- return PropertyAccessInfo(zone, kModuleExport, MaybeHandle<JSObject>(), cell,
- MaybeHandle<Name>{}, {{receiver_map}, zone});
+ MapRef receiver_map,
+ CellRef cell) {
+ return PropertyAccessInfo(zone, kModuleExport, {}, cell, {},
+ {{receiver_map}, zone});
}
// static
PropertyAccessInfo PropertyAccessInfo::StringLength(Zone* zone,
- Handle<Map> receiver_map) {
- return PropertyAccessInfo(zone, kStringLength, MaybeHandle<JSObject>(),
- {{receiver_map}, zone});
+ MapRef receiver_map) {
+ return PropertyAccessInfo(zone, kStringLength, {}, {{receiver_map}, zone});
}
// static
PropertyAccessInfo PropertyAccessInfo::DictionaryProtoDataConstant(
- Zone* zone, Handle<Map> receiver_map, Handle<JSObject> holder,
- InternalIndex dictionary_index, Handle<Name> name) {
+ Zone* zone, MapRef receiver_map, JSObjectRef holder,
+ InternalIndex dictionary_index, NameRef name) {
return PropertyAccessInfo(zone, kDictionaryProtoDataConstant, holder,
{{receiver_map}, zone}, dictionary_index, name);
}
// static
PropertyAccessInfo PropertyAccessInfo::DictionaryProtoAccessorConstant(
- Zone* zone, Handle<Map> receiver_map, MaybeHandle<JSObject> holder,
- Handle<Object> constant, Handle<Name> property_name) {
+ Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder,
+ ObjectRef constant, NameRef property_name) {
return PropertyAccessInfo(zone, kDictionaryProtoAccessorConstant, holder,
constant, property_name, {{receiver_map}, zone});
}
@@ -193,8 +196,8 @@ PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
dictionary_index_(InternalIndex::NotFound()) {}
PropertyAccessInfo::PropertyAccessInfo(
- Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps)
+ Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ ZoneVector<MapRef>&& lookup_start_object_maps)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
holder_(holder),
@@ -204,9 +207,9 @@ PropertyAccessInfo::PropertyAccessInfo(
dictionary_index_(InternalIndex::NotFound()) {}
PropertyAccessInfo::PropertyAccessInfo(
- Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- Handle<Object> constant, MaybeHandle<Name> property_name,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps)
+ Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ base::Optional<ObjectRef> constant, base::Optional<NameRef> name,
+ ZoneVector<MapRef>&& lookup_start_object_maps)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
constant_(constant),
@@ -215,15 +218,16 @@ PropertyAccessInfo::PropertyAccessInfo(
field_representation_(Representation::None()),
field_type_(Type::Any()),
dictionary_index_(InternalIndex::NotFound()),
- name_(property_name) {
- DCHECK_IMPLIES(kind == kDictionaryProtoAccessorConstant,
- !property_name.is_null());
+ name_(name) {
+ DCHECK_IMPLIES(kind == kDictionaryProtoAccessorConstant, name.has_value());
}
+
PropertyAccessInfo::PropertyAccessInfo(
- Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
- FieldIndex field_index, Representation field_representation,
- Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps,
+ Kind kind, base::Optional<JSObjectRef> holder,
+ base::Optional<MapRef> transition_map, FieldIndex field_index,
+ Representation field_representation, Type field_type,
+ MapRef field_owner_map, base::Optional<MapRef> field_map,
+ ZoneVector<MapRef>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
@@ -236,14 +240,14 @@ PropertyAccessInfo::PropertyAccessInfo(
field_owner_map_(field_owner_map),
field_map_(field_map),
dictionary_index_(InternalIndex::NotFound()) {
- DCHECK_IMPLIES(!transition_map.is_null(),
- field_owner_map.address() == transition_map.address());
+ DCHECK_IMPLIES(transition_map.has_value(),
+ field_owner_map.equals(transition_map.value()));
}
PropertyAccessInfo::PropertyAccessInfo(
- Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps,
- InternalIndex dictionary_index, Handle<Name> name)
+ Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ ZoneVector<MapRef>&& lookup_start_object_maps,
+ InternalIndex dictionary_index, NameRef name)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
holder_(holder),
@@ -262,14 +266,31 @@ MinimorphicLoadPropertyAccessInfo::MinimorphicLoadPropertyAccessInfo(
field_representation_(field_representation),
field_type_(field_type) {}
+namespace {
+
+template <class RefT>
+bool OptionalRefEquals(base::Optional<RefT> lhs, base::Optional<RefT> rhs) {
+ if (!lhs.has_value()) return !rhs.has_value();
+ if (!rhs.has_value()) return false;
+ return lhs->equals(rhs.value());
+}
+
+template <class T>
+void AppendVector(ZoneVector<T>* dst, const ZoneVector<T>& src) {
+ dst->insert(dst->end(), src.begin(), src.end());
+}
+
+} // namespace
+
bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
AccessMode access_mode, Zone* zone) {
- if (this->kind_ != that->kind_) return false;
- if (this->holder_.address() != that->holder_.address()) return false;
+ if (kind_ != that->kind_) return false;
+ if (!OptionalRefEquals(holder_, that->holder_)) return false;
- switch (this->kind_) {
+ switch (kind_) {
case kInvalid:
- return that->kind_ == kInvalid;
+ DCHECK_EQ(that->kind_, kInvalid);
+ return true;
case kDataField:
case kFastDataConstant: {
@@ -277,90 +298,70 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
// GetFieldAccessStubKey method here just like the ICs do
// since that way we only compare the relevant bits of the
// field indices).
- if (this->field_index_.GetFieldAccessStubKey() ==
+ if (field_index_.GetFieldAccessStubKey() !=
that->field_index_.GetFieldAccessStubKey()) {
- switch (access_mode) {
- case AccessMode::kHas:
- case AccessMode::kLoad: {
- if (!this->field_representation_.Equals(
- that->field_representation_)) {
- if (this->field_representation_.IsDouble() ||
- that->field_representation_.IsDouble()) {
- return false;
- }
- this->field_representation_ = Representation::Tagged();
- }
- if (this->field_map_.address() != that->field_map_.address()) {
- this->field_map_ = MaybeHandle<Map>();
- }
- break;
- }
- case AccessMode::kStore:
- case AccessMode::kStoreInLiteral: {
- // For stores, the field map and field representation information
- // must match exactly, otherwise we cannot merge the stores. We
- // also need to make sure that in case of transitioning stores,
- // the transition targets match.
- if (this->field_map_.address() != that->field_map_.address() ||
- !this->field_representation_.Equals(
- that->field_representation_) ||
- this->transition_map_.address() !=
- that->transition_map_.address()) {
+ return false;
+ }
+
+ switch (access_mode) {
+ case AccessMode::kHas:
+ case AccessMode::kLoad: {
+ if (!field_representation_.Equals(that->field_representation_)) {
+ if (field_representation_.IsDouble() ||
+ that->field_representation_.IsDouble()) {
return false;
}
- break;
+ field_representation_ = Representation::Tagged();
+ }
+ if (!OptionalRefEquals(field_map_, that->field_map_)) {
+ field_map_ = {};
+ }
+ break;
+ }
+ case AccessMode::kStore:
+ case AccessMode::kStoreInLiteral: {
+ // For stores, the field map and field representation information
+ // must match exactly, otherwise we cannot merge the stores. We
+ // also need to make sure that in case of transitioning stores,
+ // the transition targets match.
+ if (!OptionalRefEquals(field_map_, that->field_map_) ||
+ !field_representation_.Equals(that->field_representation_) ||
+ !OptionalRefEquals(transition_map_, that->transition_map_)) {
+ return false;
}
+ break;
}
- this->field_type_ =
- Type::Union(this->field_type_, that->field_type_, zone);
- this->lookup_start_object_maps_.insert(
- this->lookup_start_object_maps_.end(),
- that->lookup_start_object_maps_.begin(),
- that->lookup_start_object_maps_.end());
- this->unrecorded_dependencies_.insert(
- this->unrecorded_dependencies_.end(),
- that->unrecorded_dependencies_.begin(),
- that->unrecorded_dependencies_.end());
- return true;
}
- return false;
+
+ field_type_ = Type::Union(field_type_, that->field_type_, zone);
+ AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_);
+ AppendVector(&unrecorded_dependencies_, that->unrecorded_dependencies_);
+ return true;
}
case kDictionaryProtoAccessorConstant:
case kFastAccessorConstant: {
// Check if we actually access the same constant.
- if (this->constant_.address() == that->constant_.address()) {
- DCHECK(this->unrecorded_dependencies_.empty());
- DCHECK(that->unrecorded_dependencies_.empty());
- this->lookup_start_object_maps_.insert(
- this->lookup_start_object_maps_.end(),
- that->lookup_start_object_maps_.begin(),
- that->lookup_start_object_maps_.end());
- return true;
- }
- return false;
+ if (!OptionalRefEquals(constant_, that->constant_)) return false;
+
+ DCHECK(unrecorded_dependencies_.empty());
+ DCHECK(that->unrecorded_dependencies_.empty());
+ AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_);
+ return true;
}
case kDictionaryProtoDataConstant: {
DCHECK_EQ(AccessMode::kLoad, access_mode);
- if (this->dictionary_index_ == that->dictionary_index_) {
- this->lookup_start_object_maps_.insert(
- this->lookup_start_object_maps_.end(),
- that->lookup_start_object_maps_.begin(),
- that->lookup_start_object_maps_.end());
- return true;
- }
- return false;
+ if (dictionary_index_ != that->dictionary_index_) return false;
+ AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_);
+ return true;
}
case kNotFound:
case kStringLength: {
- DCHECK(this->unrecorded_dependencies_.empty());
+ DCHECK(unrecorded_dependencies_.empty());
DCHECK(that->unrecorded_dependencies_.empty());
- this->lookup_start_object_maps_.insert(
- this->lookup_start_object_maps_.end(),
- that->lookup_start_object_maps_.begin(),
- that->lookup_start_object_maps_.end());
+ AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_);
return true;
}
case kModuleExport:
@@ -369,10 +370,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
ConstFieldInfo PropertyAccessInfo::GetConstFieldInfo() const {
- if (IsFastDataConstant()) {
- return ConstFieldInfo(field_owner_map_.ToHandleChecked());
- }
- return ConstFieldInfo::None();
+ return IsFastDataConstant() ? ConstFieldInfo(field_owner_map_->object())
+ : ConstFieldInfo::None();
}
AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
@@ -384,13 +383,9 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
zone_(zone) {}
base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
- Handle<Map> map, AccessMode access_mode) const {
- // Check if it is safe to inline element access for the {map}.
- base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
- if (!map_ref.has_value()) return {};
- if (!CanInlineElementAccess(*map_ref)) return base::nullopt;
- ElementsKind const elements_kind = map_ref->elements_kind();
- return ElementAccessInfo({{map}, zone()}, elements_kind, zone());
+ MapRef map, AccessMode access_mode) const {
+ if (!CanInlineElementAccess(map)) return {};
+ return ElementAccessInfo({{map}, zone()}, map.elements_kind(), zone());
}
bool AccessInfoFactory::ComputeElementAccessInfos(
@@ -412,13 +407,17 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
for (auto const& group : feedback.transition_groups()) {
DCHECK(!group.empty());
- Handle<Map> target = group.front();
+ base::Optional<MapRef> target =
+ MakeRefAssumeMemoryFence(broker(), group.front());
base::Optional<ElementAccessInfo> access_info =
- ComputeElementAccessInfo(target, access_mode);
+ ComputeElementAccessInfo(target.value(), access_mode);
if (!access_info.has_value()) return false;
for (size_t i = 1; i < group.size(); ++i) {
- access_info->AddTransitionSource(group[i]);
+ base::Optional<MapRef> map_ref =
+ MakeRefAssumeMemoryFence(broker(), group[i]);
+ if (!map_ref.has_value()) continue;
+ access_info->AddTransitionSource(map_ref.value());
}
access_infos->push_back(*access_info);
}
@@ -426,11 +425,11 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
}
PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
- Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
+ MapRef receiver_map, MapRef map, base::Optional<JSObjectRef> holder,
InternalIndex descriptor, AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle(
- map->instance_descriptors(kAcquireLoad));
+ // TODO(jgruber,v8:7790): Use DescriptorArrayRef instead.
+ Handle<DescriptorArray> descriptors = map.instance_descriptors().object();
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
Representation details_representation = details.representation();
@@ -442,34 +441,31 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// here and fall back to use the regular IC logic instead.
return Invalid();
}
- FieldIndex field_index =
- FieldIndex::ForPropertyIndex(*map, index, details_representation);
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(*map.object(), index,
+ details_representation);
Type field_type = Type::NonInternal();
- MaybeHandle<Map> field_map;
-
- base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
- if (!map_ref.has_value()) return Invalid();
+ base::Optional<MapRef> field_map;
ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
- if (!map_ref->TrySerializeOwnDescriptor(descriptor)) {
- return Invalid();
- }
+
+ Handle<FieldType> descriptors_field_type =
+ broker()->CanonicalPersistentHandle(
+ descriptors->GetFieldType(descriptor));
+ base::Optional<ObjectRef> descriptors_field_type_ref =
+ TryMakeRef<Object>(broker(), descriptors_field_type);
+ if (!descriptors_field_type_ref.has_value()) return Invalid();
+
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref,
- descriptor));
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ map, descriptor, details_representation));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref,
- descriptor));
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ map, descriptor, details_representation));
} else if (details_representation.IsHeapObject()) {
- // Extract the field type from the property details (make sure its
- // representation is TaggedPointer to reflect the heap object case).
- Handle<FieldType> descriptors_field_type =
- broker()->CanonicalPersistentHandle(
- descriptors->GetFieldType(descriptor));
if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
if (access_mode == AccessMode::kStore) {
@@ -480,16 +476,15 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// about the contents now.
}
unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref,
- descriptor));
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ map, descriptor, details_representation));
if (descriptors_field_type->IsClass()) {
// Remember the field map, and try to infer a useful type.
- Handle<Map> map = broker()->CanonicalPersistentHandle(
- descriptors_field_type->AsClass());
- base::Optional<MapRef> maybe_ref = TryMakeRef(broker(), map);
- if (!maybe_ref.has_value()) return Invalid();
- field_type = Type::For(*maybe_ref);
- field_map = MaybeHandle<Map>(map);
+ base::Optional<MapRef> maybe_field_map =
+ TryMakeRef(broker(), descriptors_field_type->AsClass());
+ if (!maybe_field_map.has_value()) return Invalid();
+ field_type = Type::For(maybe_field_map.value());
+ field_map = maybe_field_map;
}
} else {
CHECK(details_representation.IsTagged());
@@ -497,63 +492,74 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// TODO(turbofan): We may want to do this only depending on the use
// of the access info.
unrecorded_dependencies.push_back(
- dependencies()->FieldTypeDependencyOffTheRecord(*map_ref, descriptor));
+ dependencies()->FieldTypeDependencyOffTheRecord(
+ map, descriptor, descriptors_field_type_ref.value()));
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
} else {
- constness = dependencies()->DependOnFieldConstness(*map_ref, descriptor);
+ constness = dependencies()->DependOnFieldConstness(map, descriptor);
}
- // TODO(v8:11670): Make FindFieldOwner and friends robust wrt concurrency.
- Handle<Map> field_owner_map = broker()->CanonicalPersistentHandle(
- map->FindFieldOwner(isolate(), descriptor));
+
+ // Note: FindFieldOwner may be called multiple times throughout one
+ // compilation. This is safe since its result is fixed for a given map and
+ // descriptor.
+ MapRef field_owner_map = map.FindFieldOwner(descriptor);
+
switch (constness) {
case PropertyConstness::kMutable:
return PropertyAccessInfo::DataField(
zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
details_representation, field_type, field_owner_map, field_map,
- holder);
+ holder, {});
+
case PropertyConstness::kConst:
return PropertyAccessInfo::FastDataConstant(
zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
details_representation, field_type, field_owner_map, field_map,
- holder);
+ holder, {});
}
UNREACHABLE();
}
namespace {
+
using AccessorsObjectGetter = std::function<Handle<Object>()>;
PropertyAccessInfo AccessorAccessInfoHelper(
Isolate* isolate, Zone* zone, JSHeapBroker* broker,
- const AccessInfoFactory* ai_factory, Handle<Map> receiver_map,
- Handle<Name> name, Handle<Map> map, MaybeHandle<JSObject> holder,
- AccessMode access_mode, AccessorsObjectGetter get_accessors) {
- if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
- DCHECK(map->is_prototype_map());
+ const AccessInfoFactory* ai_factory, MapRef receiver_map, NameRef name,
+ MapRef map, base::Optional<JSObjectRef> holder, AccessMode access_mode,
+ AccessorsObjectGetter get_accessors) {
+ if (map.instance_type() == JS_MODULE_NAMESPACE_TYPE) {
+ DCHECK(map.object()->is_prototype_map());
Handle<PrototypeInfo> proto_info = broker->CanonicalPersistentHandle(
- PrototypeInfo::cast(map->prototype_info()));
+ PrototypeInfo::cast(map.object()->prototype_info()));
Handle<JSModuleNamespace> module_namespace =
broker->CanonicalPersistentHandle(
JSModuleNamespace::cast(proto_info->module_namespace()));
Handle<Cell> cell = broker->CanonicalPersistentHandle(
Cell::cast(module_namespace->module().exports().Lookup(
- isolate, name, Smi::ToInt(name->GetHash()))));
+ isolate, name.object(), Smi::ToInt(name.object()->GetHash()))));
if (cell->value().IsTheHole(isolate)) {
// This module has not been fully initialized yet.
return PropertyAccessInfo::Invalid(zone);
}
- return PropertyAccessInfo::ModuleExport(zone, receiver_map, cell);
+ base::Optional<CellRef> cell_ref = TryMakeRef(broker, cell);
+ if (!cell_ref.has_value()) {
+ return PropertyAccessInfo::Invalid(zone);
+ }
+ return PropertyAccessInfo::ModuleExport(zone, receiver_map,
+ cell_ref.value());
}
if (access_mode == AccessMode::kHas) {
// kHas is not supported for dictionary mode objects.
- DCHECK(!map->is_dictionary_map());
+ DCHECK(!map.is_dictionary_map());
// HasProperty checks don't call getter/setters, existence is sufficient.
- return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map,
- Handle<Object>(), holder);
+ return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map, {},
+ holder);
}
Handle<Object> maybe_accessors = get_accessors();
if (!maybe_accessors->IsAccessorPair()) {
@@ -561,61 +567,74 @@ PropertyAccessInfo AccessorAccessInfoHelper(
}
Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(maybe_accessors);
Handle<Object> accessor = broker->CanonicalPersistentHandle(
- access_mode == AccessMode::kLoad ? accessors->getter()
- : accessors->setter());
+ access_mode == AccessMode::kLoad ? accessors->getter(kAcquireLoad)
+ : accessors->setter(kAcquireLoad));
- ObjectData* data = broker->TryGetOrCreateData(accessor);
- if (data == nullptr) return PropertyAccessInfo::Invalid(zone);
+ base::Optional<ObjectRef> accessor_ref = TryMakeRef(broker, accessor);
+ if (!accessor_ref.has_value()) return PropertyAccessInfo::Invalid(zone);
if (!accessor->IsJSFunction()) {
CallOptimization optimization(broker->local_isolate_or_isolate(), accessor);
if (!optimization.is_simple_api_call() ||
optimization.IsCrossContextLazyAccessorPair(
- *broker->target_native_context().object(), *map)) {
+ *broker->target_native_context().object(), *map.object())) {
return PropertyAccessInfo::Invalid(zone);
}
CallOptimization::HolderLookup lookup;
- holder = broker->CanonicalPersistentHandle(
+ Handle<JSObject> holder_handle = broker->CanonicalPersistentHandle(
optimization.LookupHolderOfExpectedType(
- broker->local_isolate_or_isolate(), receiver_map, &lookup));
+ broker->local_isolate_or_isolate(), receiver_map.object(),
+ &lookup));
if (lookup == CallOptimization::kHolderNotFound) {
return PropertyAccessInfo::Invalid(zone);
}
DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
- holder.is_null());
- DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null());
+ holder_handle.is_null());
+ DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound,
+ !holder_handle.is_null());
+
+ if (holder_handle.is_null()) {
+ holder = {};
+ } else {
+ holder = TryMakeRef(broker, holder_handle);
+ if (!holder.has_value()) return PropertyAccessInfo::Invalid(zone);
+ }
}
if (access_mode == AccessMode::kLoad) {
- base::Optional<Name> maybe_cached_property_name =
+ base::Optional<Name> cached_property_name =
FunctionTemplateInfo::TryGetCachedPropertyName(isolate, *accessor);
- if (maybe_cached_property_name.has_value()) {
- Handle<Name> cached_property_name =
- broker->CanonicalPersistentHandle(maybe_cached_property_name.value());
- PropertyAccessInfo access_info = ai_factory->ComputePropertyAccessInfo(
- map, cached_property_name, access_mode);
- if (!access_info.IsInvalid()) return access_info;
+ if (cached_property_name.has_value()) {
+ base::Optional<NameRef> cached_property_name_ref =
+ TryMakeRef(broker, cached_property_name.value());
+ if (cached_property_name_ref.has_value()) {
+ PropertyAccessInfo access_info = ai_factory->ComputePropertyAccessInfo(
+ map, cached_property_name_ref.value(), access_mode);
+ if (!access_info.IsInvalid()) return access_info;
+ }
}
}
- if (map->is_dictionary_map()) {
+
+ if (map.is_dictionary_map()) {
return PropertyAccessInfo::DictionaryProtoAccessorConstant(
- zone, receiver_map, holder, accessor, name);
+ zone, receiver_map, holder, accessor_ref.value(), name);
} else {
- return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map,
- accessor, holder);
+ return PropertyAccessInfo::FastAccessorConstant(
+ zone, receiver_map, accessor_ref.value(), holder);
}
}
} // namespace
PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
- Handle<Map> receiver_map, Handle<Name> name, Handle<Map> holder_map,
- MaybeHandle<JSObject> holder, InternalIndex descriptor,
+ MapRef receiver_map, NameRef name, MapRef holder_map,
+ base::Optional<JSObjectRef> holder, InternalIndex descriptor,
AccessMode access_mode) const {
DCHECK(descriptor.is_found());
Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle(
- holder_map->instance_descriptors(kRelaxedLoad));
- SLOW_DCHECK(descriptor == descriptors->Search(*name, *holder_map));
+ holder_map.object()->instance_descriptors(kRelaxedLoad));
+ SLOW_DCHECK(descriptor ==
+ descriptors->Search(*name.object(), *holder_map.object()));
auto get_accessors = [&]() {
return broker()->CanonicalPersistentHandle(
@@ -627,11 +646,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
}
PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
- Handle<Map> receiver_map, Handle<Name> name, Handle<JSObject> holder,
+ MapRef receiver_map, NameRef name, JSObjectRef holder,
InternalIndex dictionary_index, AccessMode access_mode,
PropertyDetails details) const {
CHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
- DCHECK(holder->map().is_prototype_map());
+ DCHECK(holder.map().object()->is_prototype_map());
DCHECK_EQ(access_mode, AccessMode::kLoad);
// We can only inline accesses to constant properties.
@@ -645,11 +664,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
}
auto get_accessors = [&]() {
- return JSObject::DictionaryPropertyAt(isolate(), holder, dictionary_index);
+ return JSObject::DictionaryPropertyAt(isolate(), holder.object(),
+ dictionary_index);
};
- Handle<Map> holder_map = broker()->CanonicalPersistentHandle(holder->map());
return AccessorAccessInfoHelper(isolate(), zone(), broker(), this,
- receiver_map, name, holder_map, holder,
+ receiver_map, name, holder.map(), holder,
access_mode, get_accessors);
}
@@ -668,15 +687,15 @@ MinimorphicLoadPropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
bool AccessInfoFactory::TryLoadPropertyDetails(
- Handle<Map> map, MaybeHandle<JSObject> maybe_holder, Handle<Name> name,
+ MapRef map, base::Optional<JSObjectRef> maybe_holder, NameRef name,
InternalIndex* index_out, PropertyDetails* details_out) const {
- if (map->is_dictionary_map()) {
+ if (map.is_dictionary_map()) {
DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
- DCHECK(map->is_prototype_map());
+ DCHECK(map.object()->is_prototype_map());
DisallowGarbageCollection no_gc;
- if (maybe_holder.is_null()) {
+ if (!maybe_holder.has_value()) {
// TODO(v8:11457) In this situation, we have a dictionary mode prototype
// as a receiver. Consider other means of obtaining the holder in this
// situation.
@@ -685,24 +704,24 @@ bool AccessInfoFactory::TryLoadPropertyDetails(
return false;
}
- Handle<JSObject> holder = maybe_holder.ToHandleChecked();
+ Handle<JSObject> holder = maybe_holder->object();
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
SwissNameDictionary dict = holder->property_dictionary_swiss();
- *index_out = dict.FindEntry(isolate(), name);
+ *index_out = dict.FindEntry(isolate(), name.object());
if (index_out->is_found()) {
*details_out = dict.DetailsAt(*index_out);
}
} else {
NameDictionary dict = holder->property_dictionary();
- *index_out = dict.FindEntry(isolate(), name);
+ *index_out = dict.FindEntry(isolate(), name.object());
if (index_out->is_found()) {
*details_out = dict.DetailsAt(*index_out);
}
}
} else {
- DescriptorArray descriptors = map->instance_descriptors(kAcquireLoad);
- *index_out =
- descriptors.Search(*name, *map, broker()->is_concurrent_inlining());
+ DescriptorArray descriptors = *map.instance_descriptors().object();
+ *index_out = descriptors.Search(*name.object(), *map.object(),
+ broker()->is_concurrent_inlining());
if (index_out->is_found()) {
*details_out = descriptors.GetDetails(*index_out);
}
@@ -712,12 +731,17 @@ bool AccessInfoFactory::TryLoadPropertyDetails(
}
PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
- Handle<Map> map, Handle<Name> name, AccessMode access_mode) const {
- CHECK(name->IsUniqueName());
+ MapRef map, NameRef name, AccessMode access_mode) const {
+ CHECK(name.IsUniqueName());
+
+ // Dictionary property const tracking is unsupported when concurrent inlining
+ // is enabled.
+ CHECK_IMPLIES(V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
+ !broker()->is_concurrent_inlining());
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker());
- if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) {
+ if (access_mode == AccessMode::kHas && !map.object()->IsJSReceiverMap()) {
return Invalid();
}
@@ -737,8 +761,21 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
bool fast_mode_prototype_on_chain = false;
// Remember the receiver map. We use {map} as loop variable.
- Handle<Map> receiver_map = map;
- MaybeHandle<JSObject> holder;
+ MapRef receiver_map = map;
+ base::Optional<JSObjectRef> holder;
+
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ // Note: Keep sync'd with
+ // CompilationDependencies::DependOnStablePrototypeChains.
+ if (receiver_map.IsPrimitiveMap()) {
+ base::Optional<JSFunctionRef> constructor =
+ broker()->target_native_context().GetConstructorFunction(receiver_map);
+ if (!constructor.has_value()) return Invalid();
+ map = constructor->initial_map(broker()->dependencies());
+ DCHECK(!map.IsPrimitiveMap());
+ }
+
while (true) {
PropertyDetails details = PropertyDetails::Empty();
InternalIndex index = InternalIndex::NotFound();
@@ -749,13 +786,12 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
if (index.is_found()) {
if (access_mode == AccessMode::kStore ||
access_mode == AccessMode::kStoreInLiteral) {
- DCHECK(!map->is_dictionary_map());
+ DCHECK(!map.is_dictionary_map());
// Don't bother optimizing stores to read-only properties.
- if (details.IsReadOnly()) {
- return Invalid();
- }
- if (details.kind() == kData && !holder.is_null()) {
+ if (details.IsReadOnly()) return Invalid();
+
+ if (details.kind() == kData && holder.has_value()) {
// This is a store to a property not found on the receiver but on a
// prototype. According to ES6 section 9.1.9 [[Set]], we need to
// create a new data property on the receiver. We can still optimize
@@ -763,7 +799,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return LookupTransition(receiver_map, name, holder);
}
}
- if (map->is_dictionary_map()) {
+
+ if (map.is_dictionary_map()) {
DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
if (fast_mode_prototype_on_chain) {
@@ -776,10 +813,10 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
// TryLoadPropertyDetails only succeeds if we know the holder.
- return ComputeDictionaryProtoAccessInfo(receiver_map, name,
- holder.ToHandleChecked(), index,
- access_mode, details);
+ return ComputeDictionaryProtoAccessInfo(
+ receiver_map, name, holder.value(), index, access_mode, details);
}
+
if (dictionary_prototype_on_chain) {
// If V8_DICT_PROPERTY_CONST_TRACKING_BOOL was disabled, then a
// dictionary prototype would have caused a bailout earlier.
@@ -817,12 +854,13 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
// The property wasn't found on {map}. Look on the prototype if appropriate.
+ DCHECK(!index.is_found());
// Don't search on the prototype chain for special indices in case of
// integer indexed exotic objects (see ES6 section 9.4.5).
- if (map->IsJSTypedArrayMap() && name->IsString()) {
+ if (map.object()->IsJSTypedArrayMap() && name.IsString()) {
if (broker()->IsMainThread()) {
- if (IsSpecialIndex(String::cast(*name))) {
+ if (IsSpecialIndex(String::cast(*name.object()))) {
return Invalid();
}
} else {
@@ -839,72 +877,67 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
// Don't lookup private symbols on the prototype chain.
- if (name->IsPrivate()) {
+ if (name.object()->IsPrivate()) {
return Invalid();
}
- if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !holder.is_null()) {
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && holder.has_value()) {
// At this point, we are past the first loop iteration.
- DCHECK(holder.ToHandleChecked()->map().is_prototype_map());
- DCHECK_NE(holder.ToHandleChecked()->map(), *receiver_map);
+ DCHECK(holder->object()->map().is_prototype_map());
+ DCHECK(!holder->map().equals(receiver_map));
fast_mode_prototype_on_chain =
- fast_mode_prototype_on_chain || !map->is_dictionary_map();
+ fast_mode_prototype_on_chain || !map.is_dictionary_map();
dictionary_prototype_on_chain =
- dictionary_prototype_on_chain || map->is_dictionary_map();
+ dictionary_prototype_on_chain || map.is_dictionary_map();
}
// Walk up the prototype chain.
- base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
- if (!map_ref.has_value()) return Invalid();
- if (!map_ref->TrySerializePrototype()) return Invalid();
-
- // Acquire synchronously the map's prototype's map to guarantee that every
- // time we use it, we use the same Map.
- Handle<Map> map_prototype_map =
- broker()->CanonicalPersistentHandle(map->prototype().map(kAcquireLoad));
- if (!map_prototype_map->IsJSObjectMap()) {
- // Perform the implicit ToObject for primitives here.
- // Implemented according to ES6 section 7.3.2 GetV (V, P).
- Handle<JSFunction> constructor;
- base::Optional<JSFunction> maybe_constructor =
- Map::GetConstructorFunction(
- *map, *broker()->target_native_context().object());
- if (maybe_constructor.has_value()) {
- map = broker()->CanonicalPersistentHandle(
- maybe_constructor->initial_map());
- map_prototype_map = broker()->CanonicalPersistentHandle(
- map->prototype().map(kAcquireLoad));
- DCHECK(map_prototype_map->IsJSObjectMap());
- } else if (map->prototype().IsNull()) {
- if (dictionary_prototype_on_chain) {
- // TODO(v8:11248) See earlier comment about
- // dictionary_prototype_on_chain. We don't support absent properties
- // with dictionary mode prototypes on the chain, either. This is again
- // just due to how we currently deal with dependencies for dictionary
- // properties during finalization.
- return Invalid();
- }
+ if (!broker()->is_concurrent_inlining()) {
+ if (!map.TrySerializePrototype(NotConcurrentInliningTag{broker()})) {
+ return Invalid();
+ }
+ }
- // Store to property not found on the receiver or any prototype, we need
- // to transition to a new data property.
- // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
- if (access_mode == AccessMode::kStore) {
- return LookupTransition(receiver_map, name, holder);
- }
- // The property was not found (access returns undefined or throws
- // depending on the language mode of the load operation.
- // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
- return PropertyAccessInfo::NotFound(zone(), receiver_map, holder);
- } else {
+ // Load the map's prototype's map to guarantee that every time we use it,
+ // we use the same Map.
+ base::Optional<HeapObjectRef> prototype = map.prototype();
+ if (!prototype.has_value()) return Invalid();
+
+ MapRef map_prototype_map = prototype->map();
+ if (!map_prototype_map.object()->IsJSObjectMap()) {
+ // Don't allow proxies on the prototype chain.
+ if (!prototype->IsNull()) {
+ DCHECK(prototype->object()->IsJSProxy());
+ return Invalid();
+ }
+
+ DCHECK(prototype->IsNull());
+
+ if (dictionary_prototype_on_chain) {
+ // TODO(v8:11248) See earlier comment about
+ // dictionary_prototype_on_chain. We don't support absent properties
+ // with dictionary mode prototypes on the chain, either. This is again
+ // just due to how we currently deal with dependencies for dictionary
+ // properties during finalization.
return Invalid();
}
+
+ // Store to property not found on the receiver or any prototype, we need
+ // to transition to a new data property.
+ // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ if (access_mode == AccessMode::kStore) {
+ return LookupTransition(receiver_map, name, holder);
+ }
+
+ // The property was not found (access returns undefined or throws
+ // depending on the language mode of the load operation.
+ // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
+ return PropertyAccessInfo::NotFound(zone(), receiver_map, holder);
}
- holder =
- broker()->CanonicalPersistentHandle(JSObject::cast(map->prototype()));
+ holder = prototype->AsJSObject();
map = map_prototype_map;
- CHECK(!map->is_deprecated());
if (!CanInlinePropertyAccess(map, access_mode)) {
return Invalid();
@@ -912,8 +945,12 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Successful lookup on prototype chain needs to guarantee that all the
// prototypes up to the holder have stable maps, except for dictionary-mode
- // prototypes.
- CHECK_IMPLIES(!map->is_dictionary_map(), map->is_stable());
+ // prototypes. We currently do this by taking a
+ // DependOnStablePrototypeChains dependency in the caller.
+ //
+ // TODO(jgruber): This is brittle and easy to miss. Consider a refactor
+ // that moves the responsibility of taking the dependency into
+ // AccessInfoFactory.
}
UNREACHABLE();
}
@@ -932,15 +969,6 @@ PropertyAccessInfo AccessInfoFactory::FinalizePropertyAccessInfosAsOne(
return Invalid();
}
-void AccessInfoFactory::ComputePropertyAccessInfos(
- MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
- ZoneVector<PropertyAccessInfo>* access_infos) const {
- DCHECK(access_infos->empty());
- for (Handle<Map> map : maps) {
- access_infos->push_back(ComputePropertyAccessInfo(map, name, access_mode));
- }
-}
-
void PropertyAccessInfo::RecordDependencies(
CompilationDependencies* dependencies) {
for (CompilationDependency const* d : unrecorded_dependencies_) {
@@ -1007,7 +1035,7 @@ Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
ElementAccessFeedback const& feedback) const {
- if (feedback.transition_groups().empty()) return base::nullopt;
+ if (feedback.transition_groups().empty()) return {};
DCHECK(!feedback.transition_groups().front().empty());
Handle<Map> first_map = feedback.transition_groups().front().front();
@@ -1016,20 +1044,20 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
InstanceType instance_type = first_map_ref->instance_type();
ElementsKind elements_kind = first_map_ref->elements_kind();
- ZoneVector<Handle<Map>> maps(zone());
+ ZoneVector<MapRef> maps(zone());
for (auto const& group : feedback.transition_groups()) {
for (Handle<Map> map_handle : group) {
base::Optional<MapRef> map = TryMakeRef(broker(), map_handle);
if (!map.has_value()) return {};
if (map->instance_type() != instance_type ||
!CanInlineElementAccess(*map)) {
- return base::nullopt;
+ return {};
}
if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
.To(&elements_kind)) {
- return base::nullopt;
+ return {};
}
- maps.push_back(map->object());
+ maps.push_back(map.value());
}
}
@@ -1037,31 +1065,33 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
}
PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
- Handle<Map> map, Handle<Name> name) const {
+ MapRef map, NameRef name) const {
// Check for String::length field accessor.
- if (map->IsStringMap()) {
- if (Name::Equals(isolate(), name, isolate()->factory()->length_string())) {
+ if (map.object()->IsStringMap()) {
+ if (Name::Equals(isolate(), name.object(),
+ isolate()->factory()->length_string())) {
return PropertyAccessInfo::StringLength(zone(), map);
}
return Invalid();
}
// Check for special JSObject field accessors.
FieldIndex field_index;
- if (Accessors::IsJSObjectFieldAccessor(isolate(), map, name, &field_index)) {
+ if (Accessors::IsJSObjectFieldAccessor(isolate(), map.object(), name.object(),
+ &field_index)) {
Type field_type = Type::NonInternal();
Representation field_representation = Representation::Tagged();
- if (map->IsJSArrayMap()) {
- DCHECK(
- Name::Equals(isolate(), isolate()->factory()->length_string(), name));
+ if (map.object()->IsJSArrayMap()) {
+ DCHECK(Name::Equals(isolate(), isolate()->factory()->length_string(),
+ name.object()));
// The JSArray::length property is a smi in the range
// [0, FixedDoubleArray::kMaxLength] in case of fast double
// elements, a smi in the range [0, FixedArray::kMaxLength]
// in case of other fast elements, and [0, kMaxUInt32] in
// case of other arrays.
- if (IsDoubleElementsKind(map->elements_kind())) {
+ if (IsDoubleElementsKind(map.elements_kind())) {
field_type = type_cache_->kFixedDoubleArrayLengthType;
field_representation = Representation::Smi();
- } else if (IsFastElementsKind(map->elements_kind())) {
+ } else if (IsFastElementsKind(map.elements_kind())) {
field_type = type_cache_->kFixedArrayLengthType;
field_representation = Representation::Smi();
} else {
@@ -1070,97 +1100,96 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
}
// Special fields are always mutable.
return PropertyAccessInfo::DataField(zone(), map, {{}, zone()}, field_index,
- field_representation, field_type, map);
+ field_representation, field_type, map,
+ {}, {}, {});
}
return Invalid();
}
PropertyAccessInfo AccessInfoFactory::LookupTransition(
- Handle<Map> map, Handle<Name> name, MaybeHandle<JSObject> holder) const {
+ MapRef map, NameRef name, base::Optional<JSObjectRef> holder) const {
// Check if the {map} has a data transition with the given {name}.
- Map transition =
- TransitionsAccessor(isolate(), map, broker()->is_concurrent_inlining())
- .SearchTransition(*name, kData, NONE);
- if (transition.is_null()) {
- return Invalid();
- }
-
- Handle<Map> transition_map = broker()->CanonicalPersistentHandle(transition);
- InternalIndex const number = transition_map->LastAdded();
- Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle(
- transition_map->instance_descriptors(kAcquireLoad));
+ Map transition = TransitionsAccessor(isolate(), map.object(),
+ broker()->is_concurrent_inlining())
+ .SearchTransition(*name.object(), kData, NONE);
+ if (transition.is_null()) return Invalid();
+
+ base::Optional<MapRef> maybe_transition_map =
+ TryMakeRef(broker(), transition);
+ if (!maybe_transition_map.has_value()) return Invalid();
+ MapRef transition_map = maybe_transition_map.value();
+
+ InternalIndex const number = transition_map.object()->LastAdded();
+ Handle<DescriptorArray> descriptors =
+ transition_map.instance_descriptors().object();
PropertyDetails const details = descriptors->GetDetails(number);
+
// Don't bother optimizing stores to read-only properties.
- if (details.IsReadOnly()) {
- return Invalid();
- }
+ if (details.IsReadOnly()) return Invalid();
+
// TODO(bmeurer): Handle transition to data constant?
- if (details.location() != kField) {
- return Invalid();
- }
+ if (details.location() != kField) return Invalid();
+
int const index = details.field_index();
Representation details_representation = details.representation();
- FieldIndex field_index = FieldIndex::ForPropertyIndex(*transition_map, index,
- details_representation);
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *transition_map.object(), index, details_representation);
Type field_type = Type::NonInternal();
- MaybeHandle<Map> field_map;
-
- base::Optional<MapRef> transition_map_ref =
- TryMakeRef(broker(), transition_map);
- if (!transition_map_ref.has_value()) return Invalid();
+ base::Optional<MapRef> field_map;
ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
- if (!transition_map_ref->TrySerializeOwnDescriptor(number)) {
- return Invalid();
- }
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(
- *transition_map_ref, number));
+ transition_map, number, details_representation));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- if (!transition_map_ref->TrySerializeOwnDescriptor(number)) {
- return Invalid();
- }
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(
- *transition_map_ref, number));
+ transition_map, number, details_representation));
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
+ // TODO(jgruber,v8:7790): Use DescriptorArrayRef instead.
Handle<FieldType> descriptors_field_type =
broker()->CanonicalPersistentHandle(descriptors->GetFieldType(number));
+ base::Optional<ObjectRef> descriptors_field_type_ref =
+ TryMakeRef<Object>(broker(), descriptors_field_type);
+ if (!descriptors_field_type_ref.has_value()) return Invalid();
+
if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
return Invalid();
}
- if (!transition_map_ref->TrySerializeOwnDescriptor(number)) {
- return Invalid();
- }
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(
- *transition_map_ref, number));
+ transition_map, number, details_representation));
if (descriptors_field_type->IsClass()) {
unrecorded_dependencies.push_back(
- dependencies()->FieldTypeDependencyOffTheRecord(*transition_map_ref,
- number));
+ dependencies()->FieldTypeDependencyOffTheRecord(
+ transition_map, number,
+ MakeRef<Object>(broker(), descriptors_field_type)));
// Remember the field map, and try to infer a useful type.
- Handle<Map> map = broker()->CanonicalPersistentHandle(
- descriptors_field_type->AsClass());
- base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
- if (!map_ref.has_value()) return Invalid();
- field_type = Type::For(*map_ref);
- field_map = map;
+ base::Optional<MapRef> maybe_field_map =
+ TryMakeRef(broker(), descriptors_field_type->AsClass());
+ if (!maybe_field_map.has_value()) return Invalid();
+ field_type = Type::For(maybe_field_map.value());
+ field_map = maybe_field_map;
}
}
+
unrecorded_dependencies.push_back(
- dependencies()->TransitionDependencyOffTheRecord(*transition_map_ref));
- transition_map_ref->SerializeBackPointer(); // For BuildPropertyStore.
+ dependencies()->TransitionDependencyOffTheRecord(transition_map));
+ if (!broker()->is_concurrent_inlining()) {
+ transition_map.SerializeBackPointer(
+ NotConcurrentInliningTag{broker()}); // For BuildPropertyStore.
+ }
+
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
- switch (dependencies()->DependOnFieldConstness(*transition_map_ref, number)) {
+ switch (dependencies()->DependOnFieldConstness(transition_map, number)) {
case PropertyConstness::kMutable:
return PropertyAccessInfo::DataField(
zone(), map, std::move(unrecorded_dependencies), field_index,
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 93215ea0a0..72757da5b7 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -5,14 +5,8 @@
#ifndef V8_COMPILER_ACCESS_INFO_H_
#define V8_COMPILER_ACCESS_INFO_H_
-#include <iosfwd>
-
-#include "src/codegen/machine-type.h"
+#include "src/compiler/heap-refs.h"
#include "src/compiler/types.h"
-#include "src/objects/feedback-vector.h"
-#include "src/objects/field-index.h"
-#include "src/objects/map.h"
-#include "src/objects/objects.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -37,26 +31,26 @@ std::ostream& operator<<(std::ostream&, AccessMode);
// This class encapsulates all information required to access a certain element.
class ElementAccessInfo final {
public:
- ElementAccessInfo(ZoneVector<Handle<Map>>&& lookup_start_object_maps,
+ ElementAccessInfo(ZoneVector<MapRef>&& lookup_start_object_maps,
ElementsKind elements_kind, Zone* zone);
ElementsKind elements_kind() const { return elements_kind_; }
- ZoneVector<Handle<Map>> const& lookup_start_object_maps() const {
+ ZoneVector<MapRef> const& lookup_start_object_maps() const {
return lookup_start_object_maps_;
}
- ZoneVector<Handle<Map>> const& transition_sources() const {
+ ZoneVector<MapRef> const& transition_sources() const {
return transition_sources_;
}
- void AddTransitionSource(Handle<Map> map) {
+ void AddTransitionSource(MapRef map) {
CHECK_EQ(lookup_start_object_maps_.size(), 1);
transition_sources_.push_back(map);
}
private:
ElementsKind elements_kind_;
- ZoneVector<Handle<Map>> lookup_start_object_maps_;
- ZoneVector<Handle<Map>> transition_sources_;
+ ZoneVector<MapRef> lookup_start_object_maps_;
+ ZoneVector<MapRef> transition_sources_;
};
// This class encapsulates all information required to access a certain
@@ -75,37 +69,35 @@ class PropertyAccessInfo final {
kStringLength
};
- static PropertyAccessInfo NotFound(Zone* zone, Handle<Map> receiver_map,
- MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo NotFound(Zone* zone, MapRef receiver_map,
+ base::Optional<JSObjectRef> holder);
static PropertyAccessInfo DataField(
- Zone* zone, Handle<Map> receiver_map,
+ Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, Handle<Map> field_owner_map,
- MaybeHandle<Map> field_map = MaybeHandle<Map>(),
- MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
- MaybeHandle<Map> transition_map = MaybeHandle<Map>());
+ Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
+ base::Optional<JSObjectRef> holder,
+ base::Optional<MapRef> transition_map);
static PropertyAccessInfo FastDataConstant(
- Zone* zone, Handle<Map> receiver_map,
+ Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- MaybeHandle<JSObject> holder,
- MaybeHandle<Map> transition_map = MaybeHandle<Map>());
- static PropertyAccessInfo FastAccessorConstant(Zone* zone,
- Handle<Map> receiver_map,
- Handle<Object> constant,
- MaybeHandle<JSObject> holder);
- static PropertyAccessInfo ModuleExport(Zone* zone, Handle<Map> receiver_map,
- Handle<Cell> cell);
- static PropertyAccessInfo StringLength(Zone* zone, Handle<Map> receiver_map);
+ Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
+ base::Optional<JSObjectRef> holder,
+ base::Optional<MapRef> transition_map);
+ static PropertyAccessInfo FastAccessorConstant(
+ Zone* zone, MapRef receiver_map, base::Optional<ObjectRef> constant,
+ base::Optional<JSObjectRef> holder);
+ static PropertyAccessInfo ModuleExport(Zone* zone, MapRef receiver_map,
+ CellRef cell);
+ static PropertyAccessInfo StringLength(Zone* zone, MapRef receiver_map);
static PropertyAccessInfo Invalid(Zone* zone);
static PropertyAccessInfo DictionaryProtoDataConstant(
- Zone* zone, Handle<Map> receiver_map, Handle<JSObject> holder,
- InternalIndex dict_index, Handle<Name> name);
+ Zone* zone, MapRef receiver_map, JSObjectRef holder,
+ InternalIndex dict_index, NameRef name);
static PropertyAccessInfo DictionaryProtoAccessorConstant(
- Zone* zone, Handle<Map> receiver_map, MaybeHandle<JSObject> holder,
- Handle<Object> constant, Handle<Name> name);
+ Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder,
+ ObjectRef constant, NameRef name);
bool Merge(PropertyAccessInfo const* that, AccessMode access_mode,
Zone* zone) V8_WARN_UNUSED_RESULT;
@@ -128,7 +120,7 @@ class PropertyAccessInfo final {
return kind() == kDictionaryProtoAccessorConstant;
}
- bool HasTransitionMap() const { return !transition_map().is_null(); }
+ bool HasTransitionMap() const { return transition_map().has_value(); }
bool HasDictionaryHolder() const {
return kind_ == kDictionaryProtoDataConstant ||
kind_ == kDictionaryProtoAccessorConstant;
@@ -136,17 +128,22 @@ class PropertyAccessInfo final {
ConstFieldInfo GetConstFieldInfo() const;
Kind kind() const { return kind_; }
- MaybeHandle<JSObject> holder() const {
+ base::Optional<JSObjectRef> holder() const {
// TODO(neis): There was a CHECK here that tries to protect against
// using the access info without recording its dependencies first.
// Find a more suitable place for it.
return holder_;
}
- MaybeHandle<Map> transition_map() const {
+ base::Optional<MapRef> transition_map() const {
DCHECK(!HasDictionaryHolder());
return transition_map_;
}
- Handle<Object> constant() const { return constant_; }
+ base::Optional<ObjectRef> constant() const {
+ DCHECK_IMPLIES(constant_.has_value(),
+ IsModuleExport() || IsFastAccessorConstant() ||
+ IsDictionaryProtoAccessorConstant());
+ return constant_;
+ }
FieldIndex field_index() const {
DCHECK(!HasDictionaryHolder());
return field_index_;
@@ -160,11 +157,11 @@ class PropertyAccessInfo final {
DCHECK(!HasDictionaryHolder());
return field_representation_;
}
- MaybeHandle<Map> field_map() const {
+ base::Optional<MapRef> field_map() const {
DCHECK(!HasDictionaryHolder());
return field_map_;
}
- ZoneVector<Handle<Map>> const& lookup_start_object_maps() const {
+ ZoneVector<MapRef> const& lookup_start_object_maps() const {
return lookup_start_object_maps_;
}
@@ -173,46 +170,48 @@ class PropertyAccessInfo final {
return dictionary_index_;
}
- Handle<Name> name() const {
+ NameRef name() const {
DCHECK(HasDictionaryHolder());
- return name_.ToHandleChecked();
+ return name_.value();
}
private:
explicit PropertyAccessInfo(Zone* zone);
- PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps);
- PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- Handle<Object> constant, MaybeHandle<Name> name,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps);
- PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
- MaybeHandle<Map> transition_map, FieldIndex field_index,
+ PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ ZoneVector<MapRef>&& lookup_start_object_maps);
+ PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ base::Optional<ObjectRef> constant,
+ base::Optional<NameRef> name,
+ ZoneVector<MapRef>&& lookup_start_object_maps);
+ PropertyAccessInfo(Kind kind, base::Optional<JSObjectRef> holder,
+ base::Optional<MapRef> transition_map,
+ FieldIndex field_index,
Representation field_representation, Type field_type,
- Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps,
+ MapRef field_owner_map, base::Optional<MapRef> field_map,
+ ZoneVector<MapRef>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& dependencies);
- PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& lookup_start_object_maps,
- InternalIndex dictionary_index, Handle<Name> name);
+ PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ ZoneVector<MapRef>&& lookup_start_object_maps,
+ InternalIndex dictionary_index, NameRef name);
// Members used for fast and dictionary mode holders:
Kind kind_;
- ZoneVector<Handle<Map>> lookup_start_object_maps_;
- Handle<Object> constant_;
- MaybeHandle<JSObject> holder_;
+ ZoneVector<MapRef> lookup_start_object_maps_;
+ base::Optional<ObjectRef> constant_;
+ base::Optional<JSObjectRef> holder_;
// Members only used for fast mode holders:
ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
- MaybeHandle<Map> transition_map_;
+ base::Optional<MapRef> transition_map_;
FieldIndex field_index_;
Representation field_representation_;
Type field_type_;
- MaybeHandle<Map> field_owner_map_;
- MaybeHandle<Map> field_map_;
+ base::Optional<MapRef> field_owner_map_;
+ base::Optional<MapRef> field_map_;
// Members only used for dictionary mode holders:
InternalIndex dictionary_index_;
- MaybeHandle<Name> name_;
+ base::Optional<NameRef> name_;
};
// This class encapsulates information required to generate load properties
@@ -252,28 +251,22 @@ class AccessInfoFactory final {
Zone* zone);
base::Optional<ElementAccessInfo> ComputeElementAccessInfo(
- Handle<Map> map, AccessMode access_mode) const;
+ MapRef map, AccessMode access_mode) const;
bool ComputeElementAccessInfos(
ElementAccessFeedback const& feedback,
ZoneVector<ElementAccessInfo>* access_infos) const;
- PropertyAccessInfo ComputePropertyAccessInfo(Handle<Map> map,
- Handle<Name> name,
+ PropertyAccessInfo ComputePropertyAccessInfo(MapRef map, NameRef name,
AccessMode access_mode) const;
PropertyAccessInfo ComputeDictionaryProtoAccessInfo(
- Handle<Map> receiver_map, Handle<Name> name, Handle<JSObject> holder,
+ MapRef receiver_map, NameRef name, JSObjectRef holder,
InternalIndex dict_index, AccessMode access_mode,
PropertyDetails details) const;
MinimorphicLoadPropertyAccessInfo ComputePropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback) const;
- // Convenience wrapper around {ComputePropertyAccessInfo} for multiple maps.
- void ComputePropertyAccessInfos(
- MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
- ZoneVector<PropertyAccessInfo>* access_infos) const;
-
// Merge as many of the given {infos} as possible and record any dependencies.
// Return false iff any of them was invalid, in which case no dependencies are
// recorded.
@@ -291,18 +284,15 @@ class AccessInfoFactory final {
private:
base::Optional<ElementAccessInfo> ConsolidateElementLoad(
ElementAccessFeedback const& feedback) const;
- PropertyAccessInfo LookupSpecialFieldAccessor(Handle<Map> map,
- Handle<Name> name) const;
- PropertyAccessInfo LookupTransition(Handle<Map> map, Handle<Name> name,
- MaybeHandle<JSObject> holder) const;
- PropertyAccessInfo ComputeDataFieldAccessInfo(Handle<Map> receiver_map,
- Handle<Map> map,
- MaybeHandle<JSObject> holder,
- InternalIndex descriptor,
- AccessMode access_mode) const;
+ PropertyAccessInfo LookupSpecialFieldAccessor(MapRef map, NameRef name) const;
+ PropertyAccessInfo LookupTransition(MapRef map, NameRef name,
+ base::Optional<JSObjectRef> holder) const;
+ PropertyAccessInfo ComputeDataFieldAccessInfo(
+ MapRef receiver_map, MapRef map, base::Optional<JSObjectRef> holder,
+ InternalIndex descriptor, AccessMode access_mode) const;
PropertyAccessInfo ComputeAccessorDescriptorAccessInfo(
- Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, InternalIndex descriptor,
+ MapRef receiver_map, NameRef name, MapRef map,
+ base::Optional<JSObjectRef> holder, InternalIndex descriptor,
AccessMode access_mode) const;
PropertyAccessInfo Invalid() const {
@@ -313,8 +303,9 @@ class AccessInfoFactory final {
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* result) const;
- bool TryLoadPropertyDetails(Handle<Map> map, MaybeHandle<JSObject> holder,
- Handle<Name> name, InternalIndex* index_out,
+ bool TryLoadPropertyDetails(MapRef map,
+ base::Optional<JSObjectRef> maybe_holder,
+ NameRef name, InternalIndex* index_out,
PropertyDetails* details_out) const;
CompilationDependencies* dependencies() const { return dependencies_; }
@@ -327,7 +318,6 @@ class AccessInfoFactory final {
TypeCache const* const type_cache_;
Zone* const zone_;
- // TODO(nicohartmann@): Move to public
AccessInfoFactory(const AccessInfoFactory&) = delete;
AccessInfoFactory& operator=(const AccessInfoFactory&) = delete;
};
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
index f39c9bb0d0..18651e26e1 100644
--- a/deps/v8/src/compiler/allocation-builder-inl.h
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -17,6 +17,7 @@ namespace compiler {
void AllocationBuilder::Allocate(int size, AllocationType allocation,
Type type) {
+ CHECK_GT(size, 0);
DCHECK_LE(size, isolate()->heap()->MaxRegularHeapObjectSize(allocation));
effect_ = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect_);
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index c1f0c62e25..29c7897ec9 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -3814,7 +3814,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -3824,7 +3824,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
__ ldr(scratch, FieldMemOperand(
@@ -3837,12 +3837,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
- // We come from WebAssembly, there are no references for the GC.
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
- if (FLAG_debug_code) {
- __ stop();
- }
+ if (FLAG_debug_code) __ stop();
__ bind(&done);
}
@@ -3950,15 +3949,18 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_slots).
- __ add(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
+ // We must pop all arguments from the stack (including the receiver).
+ // The number of arguments without the receiver is
+ // max(argc_reg, parameter_slots-1), and the receiver is added in
+ // DropArguments().
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
if (parameter_slots > 1) {
- __ cmp(argc_reg, Operand(parameter_slots));
- __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
+ const int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC, lt);
}
- __ Drop(argc_reg);
+ __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index c907e83c3f..c121383426 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -3147,7 +3147,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -3156,7 +3156,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
UseScratchRegisterScope scope(tasm());
Register scratch = scope.AcquireX();
__ Ldr(scratch, FieldMemOperand(
@@ -3178,12 +3178,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
- // We come from WebAssembly, there are no references for the GC.
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
- if (FLAG_debug_code) {
- __ Brk(0);
- }
+ if (FLAG_debug_code) __ Brk(0);
__ Bind(&done);
}
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index bc5aa579d6..eaa39ccb82 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -189,7 +189,8 @@ class DeoptimizationExit : public ZoneObject {
public:
explicit DeoptimizationExit(SourcePosition pos, BytecodeOffset bailout_id,
int translation_id, int pc_offset,
- DeoptimizeKind kind, DeoptimizeReason reason)
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ NodeId node_id)
: deoptimization_id_(kNoDeoptIndex),
pos_(pos),
bailout_id_(bailout_id),
@@ -197,6 +198,7 @@ class DeoptimizationExit : public ZoneObject {
pc_offset_(pc_offset),
kind_(kind),
reason_(reason),
+ node_id_(node_id),
immediate_args_(nullptr),
emitted_(false) {}
@@ -220,6 +222,7 @@ class DeoptimizationExit : public ZoneObject {
int pc_offset() const { return pc_offset_; }
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ NodeId node_id() const { return node_id_; }
const ZoneVector<ImmediateOperand*>* immediate_args() const {
return immediate_args_;
}
@@ -243,6 +246,7 @@ class DeoptimizationExit : public ZoneObject {
const int pc_offset_;
const DeoptimizeKind kind_;
const DeoptimizeReason reason_;
+ const NodeId node_id_;
ZoneVector<ImmediateOperand*>* immediate_args_;
bool emitted_;
};
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 3b2285a4c5..9e378b8458 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -171,7 +171,7 @@ void CodeGenerator::AssembleDeoptImmediateArgs(
switch (constant.type()) {
case Constant::kInt32:
- tasm()->dp(constant.ToInt32());
+ tasm()->dp(constant.ToInt32(), RelocInfo::LITERAL_CONSTANT);
break;
#ifdef V8_TARGET_ARCH_64_BIT
case Constant::kInt64:
@@ -181,7 +181,7 @@ void CodeGenerator::AssembleDeoptImmediateArgs(
case Constant::kFloat64: {
int smi;
CHECK(DoubleToSmiInteger(constant.ToFloat64().value(), &smi));
- tasm()->dp(Smi::FromInt(smi).ptr());
+ tasm()->dp(Smi::FromInt(smi).ptr(), RelocInfo::LITERAL_CONSTANT);
break;
}
case Constant::kCompressedHeapObject:
@@ -221,8 +221,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
}
if (info()->source_positions()) {
- tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
- deoptimization_id);
+ tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
+ exit->pos(), deoptimization_id);
}
if (deopt_kind == DeoptimizeKind::kLazy) {
@@ -320,8 +320,12 @@ void CodeGenerator::AssembleCode() {
offsets_info_.blocks_start = tasm()->pc_offset();
for (const InstructionBlock* block : instructions()->ao_blocks()) {
// Align loop headers on vendor recommended boundaries.
- if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
- tasm()->CodeTargetAlign();
+ if (!tasm()->jump_optimization_info()) {
+ if (block->ShouldAlignLoopHeader()) {
+ tasm()->LoopHeaderAlign();
+ } else if (block->ShouldAlignCodeTarget()) {
+ tasm()->CodeTargetAlign();
+ }
}
if (info->trace_turbo_json()) {
block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
@@ -597,9 +601,9 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
isolate()->counters()->total_compiled_code_size()->Increment(
code->raw_body_size());
- LOG_CODE_EVENT(isolate(),
- CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
- *source_positions));
+ LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(
+ code->raw_instruction_start(),
+ *source_positions, JitCodeEvent::JIT_CODE));
return code;
}
@@ -1055,6 +1059,9 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetTranslationIndex(
i, Smi::FromInt(deoptimization_exit->translation_id()));
data->SetPc(i, Smi::FromInt(deoptimization_exit->pc_offset()));
+#ifdef DEBUG
+ data->SetNodeId(i, Smi::FromInt(deoptimization_exit->node_id()));
+#endif // DEBUG
}
return data;
@@ -1242,8 +1249,12 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
DeoptimizationExit* const exit = zone()->New<DeoptimizationExit>(
current_source_position_, descriptor->bailout_id(), translation_index,
- pc_offset, entry.kind(), entry.reason());
-
+ pc_offset, entry.kind(), entry.reason(),
+#ifdef DEBUG
+ entry.node_id());
+#else // DEBUG
+ 0);
+#endif // DEBUG
if (!Deoptimizer::kSupportsFixedDeoptExitSizes) {
exit->set_deoptimization_id(next_deoptimization_id_++);
}
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 5541f64897..5db3f20fa4 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -2083,22 +2083,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I64x2ShrS: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
- XMMRegister tmp = i.TempSimd128Register(0);
- XMMRegister tmp2 = i.TempSimd128Register(1);
- Operand shift = i.InputOperand(1);
-
- // Take shift value modulo 64.
- __ and_(shift, Immediate(63));
- __ Movd(tmp, shift);
-
- // Set up a mask [0x80000000,0,0x80000000,0].
- __ Pcmpeqb(tmp2, tmp2);
- __ Psllq(tmp2, tmp2, byte{63});
-
- __ Psrlq(tmp2, tmp2, tmp);
- __ Psrlq(dst, src, tmp);
- __ Pxor(dst, tmp2);
- __ Psubq(dst, tmp2);
+ if (HasImmediateInput(instr, 1)) {
+ __ I64x2ShrS(dst, src, i.InputInt6(1), kScratchDoubleReg);
+ } else {
+ __ I64x2ShrS(dst, src, i.InputRegister(1), kScratchDoubleReg,
+ i.TempSimd128Register(0), i.TempRegister(1));
+ }
break;
}
case kIA32I64x2Add: {
@@ -4537,7 +4527,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -4547,7 +4537,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
Register scratch = esi;
__ push(scratch);
__ mov(scratch,
@@ -4562,6 +4552,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ wasm_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
@@ -4652,11 +4644,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_slots).
- int parameter_slots_without_receiver =
- parameter_slots - 1; // Exclude the receiver to simplify the
- // computation. We'll account for it at the end.
+ // We must pop all arguments from the stack (including the receiver).
+ // The number of arguments without the receiver is
+ // max(argc_reg, parameter_slots-1), and the receiver is added in
+ // DropArguments().
+ int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = edx;
DCHECK_NE(argc_reg, scratch_reg);
@@ -4666,11 +4658,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
- __ PopReturnAddressTo(scratch_reg);
- __ lea(esp, Operand(esp, argc_reg, times_system_pointer_size,
- kSystemPointerSize)); // Also pop the receiver.
+ __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
- __ PushReturnAddressFrom(scratch_reg);
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 5d7c8fbec2..f36fdb2935 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -2417,16 +2417,16 @@ void InstructionSelector::VisitI64x2Neg(Node* node) {
void InstructionSelector::VisitI64x2ShrS(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
- if (IsSupported(AVX)) {
- Emit(kIA32I64x2ShrS, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
- arraysize(temps), temps);
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ Emit(kIA32I64x2ShrS, dst, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
} else {
- Emit(kIA32I64x2ShrS, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
- arraysize(temps), temps);
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
+ Emit(kIA32I64x2ShrS, dst, g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
}
}
@@ -2989,9 +2989,6 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
}
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
-#else
-void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
-#endif // V8_ENABLE_WEBASSEMBLY
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
InstructionCode op = kIA32I8x16Swizzle;
@@ -3012,6 +3009,10 @@ void InstructionSelector::VisitI8x16Swizzle(Node* node) {
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
arraysize(temps), temps);
}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitI8x16Swizzle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
namespace {
void VisitPminOrPmax(InstructionSelector* selector, Node* node,
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 923562dbd9..f279ea1590 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -873,7 +873,7 @@ Instruction* InstructionSelector::EmitWithContinuation(
opcode |= DeoptImmedArgsCountField::encode(immediate_args_count) |
DeoptFrameStateOffsetField::encode(static_cast<int>(input_count));
AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
- cont->reason(), cont->feedback(),
+ cont->reason(), cont->node_id(), cont->feedback(),
FrameState{cont->frame_state()});
} else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
@@ -906,13 +906,13 @@ Instruction* InstructionSelector::EmitWithContinuation(
void InstructionSelector::AppendDeoptimizeArguments(
InstructionOperandVector* args, DeoptimizeKind kind,
- DeoptimizeReason reason, FeedbackSource const& feedback,
+ DeoptimizeReason reason, NodeId node_id, FeedbackSource const& feedback,
FrameState frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
DCHECK_NE(DeoptimizeKind::kLazy, kind);
- int const state_id =
- sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
+ int const state_id = sequence()->AddDeoptimizationEntry(
+ descriptor, kind, reason, node_id, feedback);
args->push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -1112,7 +1112,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
int const state_id = sequence()->AddDeoptimizationEntry(
buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
- DeoptimizeReason::kUnknown, FeedbackSource());
+ DeoptimizeReason::kUnknown, call->id(), FeedbackSource());
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
@@ -1362,7 +1362,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
FrameState value{input->InputAt(0)};
- VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
+ VisitDeoptimize(p.kind(), p.reason(), input->id(), p.feedback(), value);
break;
}
case BasicBlock::kThrow:
@@ -3119,11 +3119,13 @@ void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
if (NeedsPoisoning(p.is_safety_check())) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
}
@@ -3132,11 +3134,13 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
if (NeedsPoisoning(p.is_safety_check())) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
}
@@ -3184,12 +3188,12 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) {
FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), p.feedback(), n.frame_state(),
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
VisitWordCompareZero(node, n.condition(), &cont);
} else {
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), n.frame_state(),
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
VisitWordCompareZero(node, n.condition(), &cont);
}
@@ -3214,10 +3218,12 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
+ NodeId node_id,
FeedbackSource const& feedback,
FrameState frame_state) {
InstructionOperandVector args(instruction_zone());
- AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
+ AppendDeoptimizeArguments(&args, kind, reason, node_id, feedback,
+ frame_state);
Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr);
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 837a22412c..11a329d1d6 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -64,20 +64,20 @@ class FlagsContinuation final {
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* frame_state,
+ NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
- feedback, frame_state, extra_args,
+ node_id, feedback, frame_state, extra_args,
extra_args_count);
}
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimizeAndPoison(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* frame_state,
+ NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
- reason, feedback, frame_state, extra_args,
+ reason, node_id, feedback, frame_state, extra_args,
extra_args_count);
}
@@ -123,6 +123,10 @@ class FlagsContinuation final {
DCHECK(IsDeoptimize());
return reason_;
}
+ NodeId node_id() const {
+ DCHECK(IsDeoptimize());
+ return node_id_;
+ }
FeedbackSource const& feedback() const {
DCHECK(IsDeoptimize());
return feedback_;
@@ -229,12 +233,14 @@ class FlagsContinuation final {
FlagsContinuation(FlagsMode mode, FlagsCondition condition,
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* frame_state,
- InstructionOperand* extra_args, int extra_args_count)
+ NodeId node_id, FeedbackSource const& feedback,
+ Node* frame_state, InstructionOperand* extra_args,
+ int extra_args_count)
: mode_(mode),
condition_(condition),
kind_(kind),
reason_(reason),
+ node_id_(node_id),
feedback_(feedback),
frame_state_or_result_(frame_state),
extra_args_(extra_args),
@@ -274,6 +280,7 @@ class FlagsContinuation final {
FlagsCondition condition_;
DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
+ NodeId node_id_; // Only valid if mode_ == kFlags_deoptimize*
FeedbackSource feedback_; // Only valid if mode_ == kFlags_deoptimize*
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
// or mode_ == kFlags_set.
@@ -524,7 +531,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void AppendDeoptimizeArguments(InstructionOperandVector* args,
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
+ NodeId node_id, FeedbackSource const& feedback,
FrameState frame_state);
void EmitTableSwitch(const SwitchInfo& sw,
@@ -660,7 +667,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, FrameState frame_state);
+ NodeId node_id, FeedbackSource const& feedback,
+ FrameState frame_state);
void VisitSelect(Node* node);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 24c8722b62..63ca78e060 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -607,7 +607,8 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
deferred_(deferred),
handler_(handler),
switch_target_(false),
- alignment_(false),
+ code_target_alignment_(false),
+ loop_header_alignment_(false),
needs_frame_(false),
must_construct_frame_(false),
must_deconstruct_frame_(false) {}
@@ -802,14 +803,14 @@ void InstructionSequence::ComputeAssemblyOrder() {
ao_blocks_->push_back(loop_end);
// This block will be the new machine-level loop header, so align
// this block instead of the loop header block.
- loop_end->set_alignment(true);
+ loop_end->set_loop_header_alignment(true);
header_align = false;
}
}
- block->set_alignment(header_align);
+ block->set_loop_header_alignment(header_align);
}
if (block->loop_header().IsValid() && block->IsSwitchTarget()) {
- block->set_alignment(true);
+ block->set_code_target_alignment(true);
}
block->set_ao_number(RpoNumber::FromInt(ao++));
ao_blocks_->push_back(block);
@@ -952,10 +953,10 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
int InstructionSequence::AddDeoptimizationEntry(
FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason, FeedbackSource const& feedback) {
+ DeoptimizeReason reason, NodeId node_id, FeedbackSource const& feedback) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
deoptimization_entries_.push_back(
- DeoptimizationEntry(descriptor, kind, reason, feedback));
+ DeoptimizationEntry(descriptor, kind, reason, node_id, feedback));
return deoptimization_id;
}
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index f20955727a..204683c973 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1449,24 +1449,35 @@ class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
// frame state descriptor that we have to go back to.
class DeoptimizationEntry final {
public:
- DeoptimizationEntry() = default;
DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason, FeedbackSource const& feedback)
+ DeoptimizeReason reason, NodeId node_id,
+ FeedbackSource const& feedback)
: descriptor_(descriptor),
kind_(kind),
reason_(reason),
- feedback_(feedback) {}
+#ifdef DEBUG
+ node_id_(node_id),
+#endif // DEBUG
+ feedback_(feedback) {
+ USE(node_id);
+ }
FrameStateDescriptor* descriptor() const { return descriptor_; }
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+#ifdef DEBUG
+ NodeId node_id() const { return node_id_; }
+#endif // DEBUG
FeedbackSource const& feedback() const { return feedback_; }
private:
- FrameStateDescriptor* descriptor_ = nullptr;
- DeoptimizeKind kind_ = DeoptimizeKind::kEager;
- DeoptimizeReason reason_ = DeoptimizeReason::kUnknown;
- FeedbackSource feedback_ = FeedbackSource();
+ FrameStateDescriptor* const descriptor_;
+ const DeoptimizeKind kind_;
+ const DeoptimizeReason reason_;
+#ifdef DEBUG
+ const NodeId node_id_;
+#endif // DEBUG
+ const FeedbackSource feedback_;
};
using DeoptimizationVector = ZoneVector<DeoptimizationEntry>;
@@ -1537,7 +1548,8 @@ class V8_EXPORT_PRIVATE InstructionBlock final
}
inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
inline bool IsSwitchTarget() const { return switch_target_; }
- inline bool ShouldAlign() const { return alignment_; }
+ inline bool ShouldAlignCodeTarget() const { return code_target_alignment_; }
+ inline bool ShouldAlignLoopHeader() const { return loop_header_alignment_; }
using Predecessors = ZoneVector<RpoNumber>;
Predecessors& predecessors() { return predecessors_; }
@@ -1560,7 +1572,8 @@ class V8_EXPORT_PRIVATE InstructionBlock final
void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
- void set_alignment(bool val) { alignment_ = val; }
+ void set_code_target_alignment(bool val) { code_target_alignment_ = val; }
+ void set_loop_header_alignment(bool val) { loop_header_alignment_ = val; }
void set_switch_target(bool val) { switch_target_ = val; }
@@ -1588,7 +1601,10 @@ class V8_EXPORT_PRIVATE InstructionBlock final
const bool deferred_ : 1; // Block contains deferred code.
bool handler_ : 1; // Block is a handler entry point.
bool switch_target_ : 1;
- bool alignment_ : 1; // insert alignment before this block
+ bool code_target_alignment_ : 1; // insert code target alignment before this
+ // block
+ bool loop_header_alignment_ : 1; // insert loop header alignment before this
+ // block
bool needs_frame_ : 1;
bool must_construct_frame_ : 1;
bool must_deconstruct_frame_ : 1;
@@ -1770,7 +1786,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback);
+ NodeId node_id, FeedbackSource const& feedback);
DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
int GetDeoptimizationEntryCount() const {
return static_cast<int>(deoptimization_entries_.size());
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index 96a3b144a0..e91b7e17d2 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -206,7 +206,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
// Skip empty blocks when the previous block doesn't fall through.
bool prev_fallthru = true;
- for (auto const block : code->instruction_blocks()) {
+ for (auto const block : code->ao_blocks()) {
RpoNumber block_rpo = block->rpo_number();
int block_num = block_rpo.ToInt();
RpoNumber result_rpo = result[block_num];
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index f921813766..2b8197e7e6 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -4139,7 +4139,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -4149,7 +4149,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
__ Lw(
kScratchReg,
FieldMemOperand(kWasmInstanceRegister,
@@ -4161,12 +4161,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
- // We come from WebAssembly, there are no references for the GC.
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
- if (FLAG_debug_code) {
- __ stop();
- }
+ if (FLAG_debug_code) __ stop();
__ bind(&done);
}
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index d05df5ceec..6fce103d24 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -4349,7 +4349,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -4359,7 +4359,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
__ Ld(
kScratchReg,
FieldMemOperand(kWasmInstanceRegister,
@@ -4371,12 +4371,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
- // We come from WebAssembly, there are no references for the GC.
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
- if (FLAG_debug_code) {
- __ stop();
- }
+ if (FLAG_debug_code) __ stop();
__ bind(&done);
}
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 24232aa7fb..cf324353f2 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -347,7 +347,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
i.InputRegister(1), i.OutputRCBit()); \
} else { \
__ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
- i.InputInt32(1), i.OutputRCBit()); \
+ i.InputImmediate(1), i.OutputRCBit()); \
} \
} while (0)
@@ -1227,29 +1227,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kPPC_ShiftLeft32:
- ASSEMBLE_BINOP_RC(slw, slwi);
+ ASSEMBLE_BINOP_RC(ShiftLeftU32, ShiftLeftU32);
break;
-#if V8_TARGET_ARCH_PPC64
case kPPC_ShiftLeft64:
- ASSEMBLE_BINOP_RC(sld, sldi);
+ ASSEMBLE_BINOP_RC(ShiftLeftU64, ShiftLeftU64);
break;
-#endif
case kPPC_ShiftRight32:
- ASSEMBLE_BINOP_RC(srw, srwi);
+ ASSEMBLE_BINOP_RC(ShiftRightU32, ShiftRightU32);
break;
-#if V8_TARGET_ARCH_PPC64
case kPPC_ShiftRight64:
- ASSEMBLE_BINOP_RC(srd, srdi);
+ ASSEMBLE_BINOP_RC(ShiftRightU64, ShiftRightU64);
break;
-#endif
case kPPC_ShiftRightAlg32:
- ASSEMBLE_BINOP_INT_RC(sraw, srawi);
+ ASSEMBLE_BINOP_INT_RC(ShiftRightS32, ShiftRightS32);
break;
-#if V8_TARGET_ARCH_PPC64
case kPPC_ShiftRightAlg64:
- ASSEMBLE_BINOP_INT_RC(srad, sradi);
+ ASSEMBLE_BINOP_INT_RC(ShiftRightS64, ShiftRightS64);
break;
-#endif
#if !V8_TARGET_ARCH_PPC64
case kPPC_AddPair:
// i.InputRegister(0) ... left low word.
@@ -1493,7 +1487,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
break;
case kPPC_Mod32:
- if (CpuFeatures::IsSupported(MODULO)) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
__ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
ASSEMBLE_MODULO(divw, mullw);
@@ -1501,7 +1495,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_Mod64:
- if (CpuFeatures::IsSupported(MODULO)) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
__ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
ASSEMBLE_MODULO(divd, mulld);
@@ -1509,7 +1503,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kPPC_ModU32:
- if (CpuFeatures::IsSupported(MODULO)) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
__ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
ASSEMBLE_MODULO(divwu, mullw);
@@ -1517,7 +1511,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_ModU64:
- if (CpuFeatures::IsSupported(MODULO)) {
+ if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
__ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
ASSEMBLE_MODULO(divdu, mulld);
@@ -1830,7 +1824,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
cr, static_cast<CRBit>(VXCVI % CRWIDTH));
__ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
__ li(kScratchReg, Operand(1));
- __ sldi(kScratchReg, kScratchReg, Operand(31)); // generate INT32_MIN.
+ __ ShiftLeftU64(kScratchReg, kScratchReg,
+ Operand(31)); // generate INT32_MIN.
__ isel(i.OutputRegister(0), kScratchReg, i.OutputRegister(0), crbit);
}
break;
@@ -1873,7 +1868,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
cr, static_cast<CRBit>(VXCVI % CRWIDTH));
__ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
// Handle conversion failures (such as overflow).
- if (CpuFeatures::IsSupported(ISELECT)) {
+ if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
if (check_conversion) {
__ li(i.OutputRegister(1), Operand(1));
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
@@ -1910,7 +1905,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int crbit = v8::internal::Assembler::encode_crbit(
cr, static_cast<CRBit>(VXCVI % CRWIDTH));
__ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
- if (CpuFeatures::IsSupported(ISELECT)) {
+ if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
__ li(i.OutputRegister(1), Operand(1));
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
} else {
@@ -2284,9 +2279,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
constexpr int lane_width_in_bytes = 8;
Simd128Register dst = i.OutputSimd128Register();
__ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
- __ mtvsrd(kScratchSimd128Reg, r0);
- __ vinsertd(dst, kScratchSimd128Reg,
- Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vinsd(dst, r0, Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ } else {
+ __ mtvsrd(kScratchSimd128Reg, r0);
+ __ vinsertd(dst, kScratchSimd128Reg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ }
break;
}
case kPPC_F32x4ReplaceLane: {
@@ -2294,27 +2293,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
constexpr int lane_width_in_bytes = 4;
Simd128Register dst = i.OutputSimd128Register();
__ MovFloatToInt(r0, i.InputDoubleRegister(2));
- __ mtvsrd(kScratchSimd128Reg, r0);
- __ vinsertw(dst, kScratchSimd128Reg,
- Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vinsw(dst, r0, Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ } else {
+ __ mtvsrd(kScratchSimd128Reg, r0);
+ __ vinsertw(dst, kScratchSimd128Reg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ }
break;
}
case kPPC_I64x2ReplaceLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 8;
Simd128Register dst = i.OutputSimd128Register();
- __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2));
- __ vinsertd(dst, kScratchSimd128Reg,
- Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vinsd(dst, i.InputRegister(2),
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ } else {
+ __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2));
+ __ vinsertd(dst, kScratchSimd128Reg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ }
break;
}
case kPPC_I32x4ReplaceLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
Simd128Register dst = i.OutputSimd128Register();
- __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2));
- __ vinsertw(dst, kScratchSimd128Reg,
- Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vinsw(dst, i.InputRegister(2),
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ } else {
+ __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2));
+ __ vinsertw(dst, kScratchSimd128Reg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ }
break;
}
case kPPC_I16x8ReplaceLane: {
@@ -2377,26 +2390,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
constexpr int lane_width_in_bytes = 8;
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register tempFPReg0 = i.ToSimd128Register(instr->TempAt(0));
+ Register tempReg1 = i.ToRegister(instr->TempAt(2));
+ Register scratch_0 = ip;
+ Register scratch_1 = r0;
Simd128Register dst = i.OutputSimd128Register();
- for (int i = 0; i < 2; i++) {
- if (i > 0) {
- __ vextractd(kScratchSimd128Reg, src0,
- Operand(1 * lane_width_in_bytes));
- __ vextractd(tempFPReg1, src1, Operand(1 * lane_width_in_bytes));
- src0 = kScratchSimd128Reg;
- src1 = tempFPReg1;
- }
- __ mfvsrd(r0, src0);
- __ mfvsrd(ip, src1);
- __ mulld(r0, r0, ip);
- if (i <= 0) {
- __ mtvsrd(dst, r0);
- } else {
- __ mtvsrd(kScratchSimd128Reg, r0);
- __ vinsertd(dst, kScratchSimd128Reg,
- Operand(1 * lane_width_in_bytes));
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vmulld(dst, src0, src1);
+ } else {
+ for (int i = 0; i < 2; i++) {
+ if (i > 0) {
+ __ vextractd(kScratchSimd128Reg, src0,
+ Operand(1 * lane_width_in_bytes));
+ __ vextractd(tempFPReg0, src1, Operand(1 * lane_width_in_bytes));
+ src0 = kScratchSimd128Reg;
+ src1 = tempFPReg0;
+ }
+ __ mfvsrd(scratch_0, src0);
+ __ mfvsrd(scratch_1, src1);
+ __ mulld(scratch_0, scratch_0, scratch_1);
+ scratch_0 = r0;
+ scratch_1 = tempReg1;
}
+ __ mtvsrdd(dst, ip, r0);
}
break;
}
@@ -3256,43 +3272,59 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_I64x2BitMask: {
- __ mov(kScratchReg,
- Operand(0x8080808080800040)); // Select 0 for the high bits.
- __ mtvsrd(kScratchSimd128Reg, kScratchReg);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vextractdm(i.OutputRegister(), i.InputSimd128Register(0));
+ } else {
+ __ mov(kScratchReg,
+ Operand(0x8080808080800040)); // Select 0 for the high bits.
+ __ mtvsrd(kScratchSimd128Reg, kScratchReg);
+ __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
+ kScratchSimd128Reg);
+ __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ }
break;
}
case kPPC_I32x4BitMask: {
- __ mov(kScratchReg,
- Operand(0x8080808000204060)); // Select 0 for the high bits.
- __ mtvsrd(kScratchSimd128Reg, kScratchReg);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vextractwm(i.OutputRegister(), i.InputSimd128Register(0));
+ } else {
+ __ mov(kScratchReg,
+ Operand(0x8080808000204060)); // Select 0 for the high bits.
+ __ mtvsrd(kScratchSimd128Reg, kScratchReg);
+ __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
+ kScratchSimd128Reg);
+ __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ }
break;
}
case kPPC_I16x8BitMask: {
- __ mov(kScratchReg, Operand(0x10203040506070));
- __ mtvsrd(kScratchSimd128Reg, kScratchReg);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vextracthm(i.OutputRegister(), i.InputSimd128Register(0));
+ } else {
+ __ mov(kScratchReg, Operand(0x10203040506070));
+ __ mtvsrd(kScratchSimd128Reg, kScratchReg);
+ __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
+ kScratchSimd128Reg);
+ __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ }
break;
}
case kPPC_I8x16BitMask: {
- Register temp = i.ToRegister(instr->TempAt(0));
- __ mov(temp, Operand(0x8101820283038));
- __ mov(ip, Operand(0x4048505860687078));
- __ mtvsrdd(kScratchSimd128Reg, temp, ip);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractuh(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ vextractbm(i.OutputRegister(), i.InputSimd128Register(0));
+ } else {
+ Register temp = i.ToRegister(instr->TempAt(0));
+ __ mov(temp, Operand(0x8101820283038));
+ __ mov(ip, Operand(0x4048505860687078));
+ __ mtvsrdd(kScratchSimd128Reg, temp, ip);
+ __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
+ kScratchSimd128Reg);
+ __ vextractuh(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
+ }
break;
}
case kPPC_I32x4DotI16x8S: {
@@ -3890,7 +3922,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Unnecessary for eq/lt & ne/ge since only FU bit will be set.
}
- if (CpuFeatures::IsSupported(ISELECT)) {
+ if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
switch (cond) {
case eq:
case lt:
@@ -3941,7 +3973,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ CmpU64(input, Operand(case_count), r0);
__ bge(GetLabel(i.InputRpo(1)));
__ mov_label_addr(kScratchReg, table);
- __ ShiftLeftImm(r0, input, Operand(kSystemPointerSizeLog2));
+ __ ShiftLeftU64(r0, input, Operand(kSystemPointerSizeLog2));
__ LoadU64(kScratchReg, MemOperand(kScratchReg, r0));
__ Jump(kScratchReg);
}
@@ -4058,7 +4090,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -4068,7 +4100,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
Register scratch = ip;
__ LoadU64(
scratch,
@@ -4083,12 +4115,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
- // We come from WebAssembly, there are no references for the GC.
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
- if (FLAG_debug_code) {
- __ stop();
- }
+ if (FLAG_debug_code) __ stop();
__ bind(&done);
}
@@ -4195,17 +4226,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// Constant pool is unavailable since the frame has been destructed
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
if (drop_jsargs) {
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_slots).
- __ addi(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
+ // We must pop all arguments from the stack (including the receiver).
+ // The number of arguments without the receiver is
+ // max(argc_reg, parameter_slots-1), and the receiver is added in
+ // DropArguments().
if (parameter_slots > 1) {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots), r0);
+ __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver), r0);
__ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver));
__ bind(&skip);
}
- __ Drop(argc_reg);
+ __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 3cbfc35588..c74211aa38 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -2395,14 +2395,14 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, S)
SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
-#define SIMD_VISIT_BINOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- PPCOperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempSimd128Register(), \
- g.TempSimd128Register()}; \
- Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+#define SIMD_VISIT_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register(), \
+ g.TempSimd128Register(), g.TempRegister()}; \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
}
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 060eb31da2..685293169d 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -2481,184 +2481,138 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
- // vector replicate element
- case kS390_F64x2Splat: {
- __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
- Condition(3));
- break;
- }
- case kS390_F32x4Splat: {
- __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
- Condition(2));
- break;
- }
- case kS390_I64x2Splat: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(3));
- __ vrep(dst, dst, Operand(0), Condition(3));
- break;
- }
- case kS390_I32x4Splat: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(2));
- __ vrep(dst, dst, Operand(0), Condition(2));
- break;
- }
- case kS390_I16x8Splat: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(1));
- __ vrep(dst, dst, Operand(0), Condition(1));
- break;
- }
- case kS390_I8x16Splat: {
- Simd128Register dst = i.OutputSimd128Register();
- __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(0));
- __ vrep(dst, dst, Operand(0), Condition(0));
- break;
- }
- // vector extract element
- case kS390_F64x2ExtractLane: {
- __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
- Operand(1 - i.InputInt8(1)), Condition(3));
- break;
- }
- case kS390_F32x4ExtractLane: {
- __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
- Operand(3 - i.InputInt8(1)), Condition(2));
- break;
- }
- case kS390_I64x2ExtractLane: {
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
- break;
- }
- case kS390_I32x4ExtractLane: {
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
- break;
- }
- case kS390_I16x8ExtractLaneU: {
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
- break;
- }
- case kS390_I16x8ExtractLaneS: {
- __ vlgv(kScratchReg, i.InputSimd128Register(0),
- MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
- __ lghr(i.OutputRegister(), kScratchReg);
- break;
- }
- case kS390_I8x16ExtractLaneU: {
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
- break;
- }
- case kS390_I8x16ExtractLaneS: {
- __ vlgv(kScratchReg, i.InputSimd128Register(0),
- MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
- __ lgbr(i.OutputRegister(), kScratchReg);
- break;
- }
- // vector replace element
- case kS390_F64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
- __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
- Condition(3));
- __ vlvg(kScratchDoubleReg, kScratchReg,
- MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
- __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
- break;
- }
- case kS390_F32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
- __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
- Condition(2));
- __ vlvg(kScratchDoubleReg, kScratchReg,
- MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
- __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
- break;
- }
- case kS390_I64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- if (src != dst) {
- __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
- }
- __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 1 - i.InputInt8(1)),
- Condition(3));
- break;
- }
- case kS390_I32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- if (src != dst) {
- __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
- }
- __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 3 - i.InputInt8(1)),
- Condition(2));
- break;
- }
- case kS390_I16x8ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- if (src != dst) {
- __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
- }
- __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 7 - i.InputInt8(1)),
- Condition(1));
- break;
- }
- case kS390_I8x16ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- if (src != dst) {
- __ vlr(dst, src, Condition(0), Condition(0), Condition(0));
- }
- __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 15 - i.InputInt8(1)),
- Condition(0));
- break;
- }
- // vector binops
- case kS390_F64x2Add: {
- __ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Sub: {
- __ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Mul: {
- __ vfm(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Div: {
- __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Min: {
- __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(1), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Max: {
- __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(1), Condition(0),
- Condition(3));
- break;
- }
+ // Simd Support.
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Min) \
+ V(F64x2Max) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
+ V(I32x4MinS) \
+ V(I32x4MinU) \
+ V(I32x4MaxS) \
+ V(I32x4MaxU) \
+ V(I16x8Add) \
+ V(I16x8Sub) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I16x8MinS) \
+ V(I16x8MinU) \
+ V(I16x8MaxS) \
+ V(I16x8MaxU) \
+ V(I8x16Add) \
+ V(I8x16Sub) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
+ V(I8x16MinS) \
+ V(I8x16MinU) \
+ V(I8x16MaxS) \
+ V(I8x16MaxU)
+
+#define EMIT_SIMD_BINOP(name) \
+ case kS390_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ break; \
+ }
+ SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
+#undef EMIT_SIMD_BINOP
+#undef SIMD_BINOP_LIST
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Splat, F64x2Splat, Simd128Register, DoubleRegister) \
+ V(F32x4Splat, F32x4Splat, Simd128Register, DoubleRegister) \
+ V(I64x2Splat, I64x2Splat, Simd128Register, Register) \
+ V(I32x4Splat, I32x4Splat, Simd128Register, Register) \
+ V(I16x8Splat, I16x8Splat, Simd128Register, Register) \
+ V(I8x16Splat, I8x16Splat, Simd128Register, Register)
+
+#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
+ case kS390_##name: { \
+ __ op(i.Output##dtype(), i.Input##stype(0)); \
+ break; \
+ }
+ SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
+#undef EMIT_SIMD_UNOP
+#undef SIMD_UNOP_LIST
+
+#define SIMD_EXTRACT_LANE_LIST(V) \
+ V(F64x2ExtractLane, DoubleRegister) \
+ V(F32x4ExtractLane, DoubleRegister) \
+ V(I64x2ExtractLane, Register) \
+ V(I32x4ExtractLane, Register) \
+ V(I16x8ExtractLaneU, Register) \
+ V(I16x8ExtractLaneS, Register) \
+ V(I8x16ExtractLaneU, Register) \
+ V(I8x16ExtractLaneS, Register)
+
+#define EMIT_SIMD_EXTRACT_LANE(name, dtype) \
+ case kS390_##name: { \
+ __ name(i.Output##dtype(), i.InputSimd128Register(0), i.InputInt8(1)); \
+ break; \
+ }
+ SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE)
+#undef EMIT_SIMD_EXTRACT_LANE
+#undef SIMD_EXTRACT_LANE_LIST
+
+#define SIMD_REPLACE_LANE_LIST(V) \
+ V(F64x2ReplaceLane, DoubleRegister) \
+ V(F32x4ReplaceLane, DoubleRegister) \
+ V(I64x2ReplaceLane, Register) \
+ V(I32x4ReplaceLane, Register) \
+ V(I16x8ReplaceLane, Register) \
+ V(I8x16ReplaceLane, Register)
+
+#define EMIT_SIMD_REPLACE_LANE(name, stype) \
+ case kS390_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.Input##stype(2), i.InputInt8(1)); \
+ break; \
+ }
+ SIMD_REPLACE_LANE_LIST(EMIT_SIMD_REPLACE_LANE)
+#undef EMIT_SIMD_REPLACE_LANE
+#undef SIMD_REPLACE_LANE_LIST
+ // vector binops
case kS390_F64x2Qfma: {
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2675,42 +2629,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfnms(dst, src1, src2, src0, Condition(3), Condition(0));
break;
}
- case kS390_F32x4Add: {
- __ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F32x4Sub: {
- __ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F32x4Mul: {
- __ vfm(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F32x4Div: {
- __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F32x4Min: {
- __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(1), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F32x4Max: {
- __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(1), Condition(0),
- Condition(2));
- break;
- }
case kS390_F32x4Qfma: {
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2727,81 +2645,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfnms(dst, src1, src2, src0, Condition(2), Condition(0));
break;
}
- case kS390_I64x2Add: {
- __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_I64x2Sub: {
- __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_I64x2Mul: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Register scratch_0 = r0;
- Register scratch_1 = r1;
- for (int i = 0; i < 2; i++) {
- __ vlgv(scratch_0, src0, MemOperand(r0, i), Condition(3));
- __ vlgv(scratch_1, src1, MemOperand(r0, i), Condition(3));
- __ MulS64(scratch_0, scratch_1);
- scratch_0 = r1;
- scratch_1 = ip;
- }
- __ vlvgp(i.OutputSimd128Register(), r0, r1);
- break;
- }
- case kS390_I32x4Add: {
- __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I32x4Sub: {
- __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I32x4Mul: {
- __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I16x8Add: {
- __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I16x8Sub: {
- __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I16x8Mul: {
- __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I8x16Add: {
- __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_I8x16Sub: {
- __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
case kS390_I16x8RoundingAverageU: {
__ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -2814,274 +2657,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- // vector comparisons
- case kS390_F64x2Eq: {
- __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Ne: {
- __ vfce(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(3));
- break;
- }
- case kS390_F64x2Le: {
- __ vfche(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_F64x2Lt: {
- __ vfch(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_I32x4MinS: {
- __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I32x4MinU: {
- __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I16x8MinS: {
- __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I16x8MinU: {
- __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I8x16MinS: {
- __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_I8x16MinU: {
- __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_I32x4MaxS: {
- __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I32x4MaxU: {
- __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I16x8MaxS: {
- __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I16x8MaxU: {
- __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I8x16MaxS: {
- __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_I8x16MaxU: {
- __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_F32x4Eq: {
- __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I64x2Eq: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- break;
- }
- case kS390_I32x4Eq: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- break;
- }
- case kS390_I16x8Eq: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- break;
- }
- case kS390_I8x16Eq: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- break;
- }
- case kS390_F32x4Ne: {
- __ vfce(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(2));
- __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(2));
- break;
- }
- case kS390_I64x2Ne: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
- i.OutputSimd128Register(), Condition(0), Condition(0),
- Condition(3));
- break;
- }
- case kS390_I32x4Ne: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
- i.OutputSimd128Register(), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I16x8Ne: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
- i.OutputSimd128Register(), Condition(0), Condition(0),
- Condition(1));
- break;
- }
- case kS390_I8x16Ne: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
- i.OutputSimd128Register(), Condition(0), Condition(0),
- Condition(0));
- break;
- }
- case kS390_F32x4Lt: {
- __ vfch(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_F32x4Le: {
- __ vfche(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(0),
- Condition(2));
- break;
- }
- case kS390_I64x2GtS: {
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- break;
- }
- case kS390_I64x2GeS: {
- // Compute !(B > A) which is equal to A >= B.
- __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(3));
- __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(3));
- break;
- }
- case kS390_I32x4GtS: {
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- break;
- }
- case kS390_I32x4GeS: {
- __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(2));
- __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(2));
- break;
- }
- case kS390_I32x4GtU: {
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- break;
- }
- case kS390_I32x4GeU: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
- break;
- }
- case kS390_I16x8GtS: {
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- break;
- }
- case kS390_I16x8GeS: {
- __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(1));
- __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(1));
- break;
- }
- case kS390_I16x8GtU: {
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- break;
- }
- case kS390_I16x8GeU: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(1));
- break;
- }
- case kS390_I8x16GtS: {
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- break;
- }
- case kS390_I8x16GeS: {
- __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
- i.InputSimd128Register(0), Condition(0), Condition(0));
- __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- break;
- }
- case kS390_I8x16GtU: {
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- break;
- }
- case kS390_I8x16GeU: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
- break;
- }
// vector shifts
#define VECTOR_SHIFT(op, mode) \
{ \
@@ -3851,14 +3426,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_F64x2ConvertLowI32x4S: {
__ vupl(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(2));
- __ vcdg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ __ vcdg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(4),
Condition(0), Condition(3));
break;
}
case kS390_F64x2ConvertLowI32x4U: {
__ vupll(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(2));
- __ vcdlg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ __ vcdlg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(4),
Condition(0), Condition(3));
break;
}
@@ -4214,7 +3789,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -4224,7 +3799,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
Register scratch = r1;
__ LoadU64(
scratch,
@@ -4238,12 +3813,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
- // We come from WebAssembly, there are no references for the GC.
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
- if (FLAG_debug_code) {
- __ stop();
- }
+ if (FLAG_debug_code) __ stop();
__ bind(&done);
}
@@ -4346,17 +3920,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_slots).
- __ AddS64(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
+ // We must pop all arguments from the stack (including the receiver).
+ // The number of arguments without the receiver is
+ // max(argc_reg, parameter_slots-1), and the receiver is added in
+ // DropArguments().
if (parameter_slots > 1) {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots));
+ __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver));
__ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver));
__ bind(&skip);
}
- __ Drop(argc_reg);
+ __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index a4a116a3f5..bcf5a8dfff 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -2790,7 +2790,7 @@ void InstructionSelector::EmitPrepareResults(
void InstructionSelector::VisitLoadLane(Node* node) {
// We should never reach here, see http://crrev.com/c/2577820
- UNIMPLEMENTED();
+ UNREACHABLE();
}
void InstructionSelector::VisitLoadTransform(Node* node) {
@@ -2800,7 +2800,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
void InstructionSelector::VisitStoreLane(Node* node) {
// We should never reach here, see http://crrev.com/c/2577820
- UNIMPLEMENTED();
+ UNREACHABLE();
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index f16c7a6c89..60a40fb489 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -2954,21 +2954,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2ShrS: {
// TODO(zhin): there is vpsraq but requires AVX512
- // ShrS on each quadword one at a time
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
- Register tmp = i.ToRegister(instr->TempAt(0));
- // Modulo 64 not required as sarq_cl will mask cl to 6 bits.
-
- // lower quadword
- __ Pextrq(tmp, src, int8_t{0x0});
- __ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, uint8_t{0x0});
-
- // upper quadword
- __ Pextrq(tmp, src, int8_t{0x1});
- __ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, uint8_t{0x1});
+ if (HasImmediateInput(instr, 1)) {
+ __ I64x2ShrS(dst, src, i.InputInt6(1), kScratchDoubleReg);
+ } else {
+ __ I64x2ShrS(dst, src, i.InputRegister(1), kScratchDoubleReg,
+ i.TempSimd128Register(0), kScratchRegister);
+ }
break;
}
case kX64I64x2Add: {
@@ -4025,8 +4018,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64S16x8HalfShuffle1: {
XMMRegister dst = i.OutputSimd128Register();
- ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, i.InputUint8(1));
- __ Pshufhw(dst, dst, i.InputUint8(2));
+ uint8_t mask_lo = i.InputUint8(1);
+ uint8_t mask_hi = i.InputUint8(2);
+ if (mask_lo != 0xe4) {
+ ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, mask_lo);
+ if (mask_hi != 0xe4) __ Pshufhw(dst, dst, mask_hi);
+ } else {
+ DCHECK_NE(mask_hi, 0xe4);
+ ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, mask_hi);
+ }
break;
}
case kX64S16x8HalfShuffle2: {
@@ -4725,7 +4725,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
#if V8_ENABLE_WEBASSEMBLY
- if (info()->IsWasm() && required_slots > 128) {
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -4735,7 +4735,7 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
__ movq(kScratchRegister,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
@@ -4748,6 +4748,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ near_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
@@ -4870,11 +4872,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_slots).
- int parameter_slots_without_receiver =
- parameter_slots - 1; // Exclude the receiver to simplify the
- // computation. We'll account for it at the end.
+ // We must pop all arguments from the stack (including the receiver).
+ // The number of arguments without the receiver is
+ // max(argc_reg, parameter_slots-1), and the receiver is added in
+ // DropArguments().
+ int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = r10;
DCHECK_NE(argc_reg, scratch_reg);
@@ -4884,11 +4886,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
- __ PopReturnAddressTo(scratch_reg);
- __ leaq(rsp, Operand(rsp, argc_reg, times_system_pointer_size,
- kSystemPointerSize)); // Also pop the receiver.
+ __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
- __ PushReturnAddressFrom(scratch_reg);
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
Register scratch_reg = r10;
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index f54b3d5792..53ee75064b 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -3298,11 +3298,17 @@ void InstructionSelector::VisitI64x2Neg(Node* node) {
void InstructionSelector::VisitI64x2ShrS(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempRegister()};
- // Use fixed to rcx, to use sarq_cl in codegen.
- Emit(kX64I64x2ShrS, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), rcx),
- arraysize(temps), temps);
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ Emit(kX64I64x2ShrS, dst, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64I64x2ShrS, dst, g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ }
}
void InstructionSelector::VisitI64x2Mul(Node* node) {
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 8059faa176..a864012a7a 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -101,8 +101,9 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) {
Node* input = inputs[i];
ControlPathConditions from_input = node_conditions_.Get(input);
if (!from_input.LookupCondition(branch_condition, &previous_branch,
- &condition_value))
+ &condition_value)) {
return;
+ }
if (phase_ == kEARLY) {
phi_inputs.emplace_back(condition_value ? jsgraph()->TrueConstant()
@@ -128,6 +129,7 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) {
Reduction BranchElimination::ReduceBranch(Node* node) {
Node* condition = node->InputAt(0);
Node* control_input = NodeProperties::GetControlInput(node, 0);
+ if (!reduced_.Get(control_input)) return NoChange();
ControlPathConditions from_input = node_conditions_.Get(control_input);
Node* branch;
bool condition_value;
@@ -283,7 +285,7 @@ Reduction BranchElimination::ReduceMerge(Node* node) {
}
Reduction BranchElimination::ReduceStart(Node* node) {
- return UpdateConditions(node, {});
+ return UpdateConditions(node, ControlPathConditions(zone_));
}
Reduction BranchElimination::ReduceOtherControl(Node* node) {
@@ -315,7 +317,7 @@ Reduction BranchElimination::UpdateConditions(
// The control path for the node is the path obtained by appending the
// current_condition to the prev_conditions. Use the original control path as
// a hint to avoid allocations.
- if (in_new_block || prev_conditions.Size() == 0) {
+ if (in_new_block || prev_conditions.blocks_.Size() == 0) {
prev_conditions.AddConditionInNewBlock(zone_, current_condition,
current_branch, is_true_branch);
} else {
@@ -330,14 +332,17 @@ void BranchElimination::ControlPathConditions::AddCondition(
Zone* zone, Node* condition, Node* branch, bool is_true,
ControlPathConditions hint) {
if (!LookupCondition(condition)) {
- FunctionalList<BranchCondition> prev_front = Front();
- if (hint.Size() > 0) {
- prev_front.PushFront({condition, branch, is_true}, zone, hint.Front());
+ BranchCondition branch_condition(condition, branch, is_true);
+ FunctionalList<BranchCondition> prev_front = blocks_.Front();
+ if (hint.blocks_.Size() > 0) {
+ prev_front.PushFront(branch_condition, zone, hint.blocks_.Front());
} else {
- prev_front.PushFront({condition, branch, is_true}, zone);
+ prev_front.PushFront(branch_condition, zone);
}
- DropFront();
- PushFront(prev_front, zone);
+ blocks_.DropFront();
+ blocks_.PushFront(prev_front, zone);
+ conditions_.Set(condition, branch_condition);
+ SLOW_DCHECK(BlocksAndConditionsInvariant());
}
}
@@ -345,35 +350,66 @@ void BranchElimination::ControlPathConditions::AddConditionInNewBlock(
Zone* zone, Node* condition, Node* branch, bool is_true) {
FunctionalList<BranchCondition> new_block;
if (!LookupCondition(condition)) {
- new_block.PushFront({condition, branch, is_true}, zone);
+ BranchCondition branch_condition(condition, branch, is_true);
+ new_block.PushFront(branch_condition, zone);
+ conditions_.Set(condition, branch_condition);
}
- PushFront(new_block, zone);
+ blocks_.PushFront(new_block, zone);
+ SLOW_DCHECK(BlocksAndConditionsInvariant());
}
bool BranchElimination::ControlPathConditions::LookupCondition(
Node* condition) const {
- for (auto block : *this) {
- for (BranchCondition element : block) {
- if (element.condition == condition) return true;
- }
- }
- return false;
+ return conditions_.Get(condition).IsSet();
}
bool BranchElimination::ControlPathConditions::LookupCondition(
Node* condition, Node** branch, bool* is_true) const {
- for (auto block : *this) {
- for (BranchCondition element : block) {
- if (element.condition == condition) {
- *is_true = element.is_true;
- *branch = element.branch;
- return true;
- }
- }
+ const BranchCondition& element = conditions_.Get(condition);
+ if (element.IsSet()) {
+ *is_true = element.is_true;
+ *branch = element.branch;
+ return true;
}
return false;
}
+void BranchElimination::ControlPathConditions::ResetToCommonAncestor(
+ ControlPathConditions other) {
+ while (other.blocks_.Size() > blocks_.Size()) other.blocks_.DropFront();
+ while (blocks_.Size() > other.blocks_.Size()) {
+ for (BranchCondition branch_condition : blocks_.Front()) {
+ conditions_.Set(branch_condition.condition, {});
+ }
+ blocks_.DropFront();
+ }
+ while (blocks_ != other.blocks_) {
+ for (BranchCondition branch_condition : blocks_.Front()) {
+ conditions_.Set(branch_condition.condition, {});
+ }
+ blocks_.DropFront();
+ other.blocks_.DropFront();
+ }
+ SLOW_DCHECK(BlocksAndConditionsInvariant());
+}
+
+#if DEBUG
+bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() {
+ PersistentMap<Node*, BranchCondition> conditions_copy(conditions_);
+ for (auto block : blocks_) {
+ for (BranchCondition condition : block) {
+ // Every element of blocks_ has to be in conditions_.
+ if (conditions_copy.Get(condition.condition) != condition) return false;
+ conditions_copy.Set(condition.condition, {});
+ }
+ }
+ // Every element of {conditions_} has to be in {blocks_}. We removed all
+ // elements of blocks_ from condition_copy, so if it is not empty, the
+ // invariant fails.
+ return conditions_copy.begin() == conditions_copy.end();
+}
+#endif
+
void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) {
// Check if {branch} is dead because we might have a stale side-table entry.
if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead &&
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 6bc45a020d..9078c39038 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -10,6 +10,7 @@
#include "src/compiler/functional-list.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/node-aux-data.h"
+#include "src/compiler/persistent-map.h"
namespace v8 {
namespace internal {
@@ -38,6 +39,9 @@ class V8_EXPORT_PRIVATE BranchElimination final
// Represents a condition along with its value in the current control path.
// Also stores the node that branched on this condition.
struct BranchCondition {
+ BranchCondition() : condition(nullptr), branch(nullptr), is_true(false) {}
+ BranchCondition(Node* condition, Node* branch, bool is_true)
+ : condition(condition), branch(branch), is_true(is_true) {}
Node* condition;
Node* branch;
bool is_true;
@@ -47,15 +51,17 @@ class V8_EXPORT_PRIVATE BranchElimination final
is_true == other.is_true;
}
bool operator!=(BranchCondition other) const { return !(*this == other); }
+
+ bool IsSet() const { return branch != nullptr; }
};
// Class for tracking information about branch conditions. It is represented
// as a linked list of condition blocks, each of which corresponds to a block
// of code bewteen an IfTrue/IfFalse and a Merge. Each block is in turn
// represented as a linked list of {BranchCondition}s.
- class ControlPathConditions
- : public FunctionalList<FunctionalList<BranchCondition>> {
+ class ControlPathConditions {
public:
+ explicit ControlPathConditions(Zone* zone) : conditions_(zone) {}
// Checks if {condition} is present in this {ControlPathConditions}.
bool LookupCondition(Node* condition) const;
// Checks if {condition} is present in this {ControlPathConditions} and
@@ -68,9 +74,29 @@ class V8_EXPORT_PRIVATE BranchElimination final
// Adds a condition in a new block.
void AddConditionInNewBlock(Zone* zone, Node* condition, Node* branch,
bool is_true);
+ // Reset this {ControlPathConditions} to the longest prefix that is common
+ // with {other}.
+ void ResetToCommonAncestor(ControlPathConditions other);
+
+ bool operator==(const ControlPathConditions& other) const {
+ return blocks_ == other.blocks_;
+ }
+ bool operator!=(const ControlPathConditions& other) const {
+ return blocks_ != other.blocks_;
+ }
+
+ friend class BranchElimination;
private:
- using FunctionalList<FunctionalList<BranchCondition>>::PushFront;
+ FunctionalList<FunctionalList<BranchCondition>> blocks_;
+ // This is an auxilliary data structure that provides fast lookups in the
+ // set of conditions. It should hold at any point that the contents of
+ // {blocks_} and {conditions_} is the same, which is implemented in
+ // {BlocksAndConditionsInvariant}.
+ PersistentMap<Node*, BranchCondition> conditions_;
+#if DEBUG
+ bool BlocksAndConditionsInvariant();
+#endif
};
Reduction ReduceBranch(Node* node);
@@ -101,7 +127,9 @@ class V8_EXPORT_PRIVATE BranchElimination final
// Maps each control node to the condition information known about the node.
// If the information is nullptr, then we have not calculated the information
// yet.
- NodeAuxData<ControlPathConditions> node_conditions_;
+
+ NodeAuxData<ControlPathConditions, ZoneConstruct<ControlPathConditions>>
+ node_conditions_;
NodeAuxData<bool> reduced_;
Zone* zone_;
Node* dead_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index c46ec6944b..985a256c57 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -77,28 +77,6 @@ class BytecodeGraphBuilder {
Node* GetParameter(int index, const char* debug_name_hint = nullptr);
CodeKind code_kind() const { return code_kind_; }
- bool native_context_independent() const {
- // TODO(jgruber,v8:8888): Remove dependent code.
- return false;
- }
- bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
- bool generate_full_feedback_collection() const {
- // NCI code currently collects full feedback.
- DCHECK_IMPLIES(native_context_independent(),
- CollectFeedbackInGenericLowering());
- return native_context_independent();
- }
-
- static JSTypeHintLowering::LoweringResult NoChange() {
- return JSTypeHintLowering::LoweringResult::NoChange();
- }
- bool CanApplyTypeHintLowering(IrOpcode::Value opcode) const {
- return !generate_full_feedback_collection() ||
- !IrOpcode::IsFeedbackCollectingOpcode(opcode);
- }
- bool CanApplyTypeHintLowering(const Operator* op) const {
- return CanApplyTypeHintLowering(static_cast<IrOpcode::Value>(op->opcode()));
- }
// The node representing the current feedback vector is generated once prior
// to visiting bytecodes, and is later passed as input to other nodes that
@@ -107,22 +85,20 @@ class BytecodeGraphBuilder {
// to feedback_vector() once all uses of the direct heap object reference
// have been replaced with a Node* reference.
void CreateFeedbackVectorNode();
- Node* BuildLoadFeedbackVector();
Node* feedback_vector_node() const {
DCHECK_NOT_NULL(feedback_vector_node_);
return feedback_vector_node_;
}
void CreateFeedbackCellNode();
- Node* BuildLoadFeedbackCell();
Node* feedback_cell_node() const {
+ DCHECK(CodeKindCanTierUp(code_kind()));
DCHECK_NOT_NULL(feedback_cell_node_);
return feedback_cell_node_;
}
// Same as above for the feedback vector node.
void CreateNativeContextNode();
- Node* BuildLoadNativeContext();
Node* native_context_node() const {
DCHECK_NOT_NULL(native_context_node_);
return native_context_node_;
@@ -135,13 +111,6 @@ class BytecodeGraphBuilder {
// Only relevant for specific code kinds (see CodeKindCanTierUp).
void MaybeBuildTierUpCheck();
- // Like bytecode, NCI code must collect call feedback to preserve proper
- // behavior of inlining heuristics when tiering up to Turbofan in the future.
- // The invocation count (how often a particular JSFunction has been called)
- // is tracked by the callee. For bytecode, this happens in the
- // InterpreterEntryTrampoline, for NCI code it happens here in the prologue.
- void MaybeBuildIncrementInvocationCount();
-
// Builder for loading the a native context field.
Node* BuildLoadNativeContextField(int index);
@@ -255,8 +224,6 @@ class BytecodeGraphBuilder {
// former points at JumpLoop, the latter at the loop header, i.e. the target
// of JumpLoop).
void PrepareFrameStateForOSREntryStackCheck(Node* node) {
- DCHECK_EQ(bytecode_iterator().current_offset(),
- bytecode_analysis().osr_entry_point());
DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
DCHECK(node->opcode() == IrOpcode::kJSStackCheck);
const int offset = bytecode_analysis().osr_bailout_id().ToInt();
@@ -361,7 +328,7 @@ class BytecodeGraphBuilder {
// StackChecks.
void BuildFunctionEntryStackCheck();
void BuildIterationBodyStackCheck();
- void MaybeBuildOSREntryStackCheck();
+ void BuildOSREntryStackCheck();
// Control flow plumbing.
void BuildJump();
@@ -511,7 +478,6 @@ class BytecodeGraphBuilder {
Environment* environment_;
bool const osr_;
int currently_peeled_loop_offset_;
- bool is_osr_entry_stack_check_pending_;
const bool skip_first_stack_and_tierup_check_;
@@ -1087,7 +1053,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
shared_info_(shared_info),
bytecode_array_(shared_info.GetBytecodeArray()),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.value()->AsFeedbackVector()),
+ feedback_vector_(feedback_cell.value().value()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -1107,7 +1073,6 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
environment_(nullptr),
osr_(!osr_offset.IsNone()),
currently_peeled_loop_offset_(-1),
- is_osr_entry_stack_check_pending_(osr_),
skip_first_stack_and_tierup_check_(
flags & BytecodeGraphBuilderFlag::kSkipFirstStackAndTierupCheck),
merge_environments_(local_zone),
@@ -1160,70 +1125,24 @@ Node* BytecodeGraphBuilder::GetParameter(int parameter_index,
void BytecodeGraphBuilder::CreateFeedbackCellNode() {
DCHECK_NULL(feedback_cell_node_);
- if (native_context_independent()) {
- feedback_cell_node_ = BuildLoadFeedbackCell();
- } else if (is_turboprop()) {
- feedback_cell_node_ = jsgraph()->Constant(feedback_cell_);
- }
-}
-
-Node* BytecodeGraphBuilder::BuildLoadFeedbackCell() {
- DCHECK(native_context_independent());
- DCHECK_NULL(feedback_cell_node_);
- return NewNode(
- simplified()->LoadField(AccessBuilder::ForJSFunctionFeedbackCell()),
- GetFunctionClosure());
+ // Only used by tier-up logic; for code that doesn't tier-up, we can skip
+ // this.
+ if (!CodeKindCanTierUp(code_kind())) return;
+ feedback_cell_node_ = jsgraph()->Constant(feedback_cell_);
}
void BytecodeGraphBuilder::CreateFeedbackVectorNode() {
DCHECK_NULL(feedback_vector_node_);
- feedback_vector_node_ = native_context_independent()
- ? BuildLoadFeedbackVector()
- : jsgraph()->Constant(feedback_vector());
-}
-
-Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
- DCHECK(native_context_independent());
- DCHECK_NULL(feedback_vector_node_);
-
- // The feedback vector must exist and remain live while the generated code
- // lives. Specifically that means it must be created when NCI code is
- // installed, and must not be flushed.
- return NewNode(simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()),
- feedback_cell_node());
+ feedback_vector_node_ = jsgraph()->Constant(feedback_vector());
}
Node* BytecodeGraphBuilder::BuildLoadFeedbackCell(int index) {
- if (native_context_independent()) {
- // TODO(jgruber,v8:8888): Assumes that the feedback vector has been
- // allocated.
- Node* closure_feedback_cell_array =
- NewNode(simplified()->LoadField(
- AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray()),
- feedback_vector_node());
-
- return NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)),
- closure_feedback_cell_array);
- } else {
- return jsgraph()->Constant(feedback_vector().GetClosureFeedbackCell(index));
- }
+ return jsgraph()->Constant(feedback_vector().GetClosureFeedbackCell(index));
}
void BytecodeGraphBuilder::CreateNativeContextNode() {
DCHECK_NULL(native_context_node_);
- native_context_node_ = native_context_independent()
- ? BuildLoadNativeContext()
- : jsgraph()->Constant(native_context());
-}
-
-Node* BytecodeGraphBuilder::BuildLoadNativeContext() {
- DCHECK(native_context_independent());
- DCHECK_NULL(native_context_node_);
- Node* context_map = NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- environment()->Context());
- return NewNode(simplified()->LoadField(AccessBuilder::ForMapNativeContext()),
- context_map);
+ native_context_node_ = jsgraph()->Constant(native_context());
}
void BytecodeGraphBuilder::MaybeBuildTierUpCheck() {
@@ -1245,21 +1164,6 @@ void BytecodeGraphBuilder::MaybeBuildTierUpCheck() {
new_target, argc, context);
}
-void BytecodeGraphBuilder::MaybeBuildIncrementInvocationCount() {
- if (!generate_full_feedback_collection()) return;
-
- Node* current_invocation_count =
- NewNode(simplified()->LoadField(
- AccessBuilder::ForFeedbackVectorInvocationCount()),
- feedback_vector_node());
- Node* next_invocation_count =
- NewNode(simplified()->NumberAdd(), current_invocation_count,
- jsgraph()->SmiConstant(1));
- NewNode(simplified()->StoreField(
- AccessBuilder::ForFeedbackVectorInvocationCount()),
- feedback_vector_node(), next_invocation_count);
-}
-
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
NodeProperties::ReplaceContextInput(result, native_context_node());
@@ -1293,7 +1197,6 @@ void BytecodeGraphBuilder::CreateGraph() {
CreateFeedbackCellNode();
CreateFeedbackVectorNode();
MaybeBuildTierUpCheck();
- MaybeBuildIncrementInvocationCount();
CreateNativeContextNode();
VisitBytecodes();
@@ -1460,6 +1363,7 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset(
void BytecodeGraphBuilder::BuildFunctionEntryStackCheck() {
if (!skip_first_stack_check()) {
+ DCHECK(exception_handlers_.empty());
Node* node =
NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
PrepareFrameStateForFunctionEntryStackCheck(node);
@@ -1472,26 +1376,36 @@ void BytecodeGraphBuilder::BuildIterationBodyStackCheck() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::MaybeBuildOSREntryStackCheck() {
- if (V8_UNLIKELY(is_osr_entry_stack_check_pending_)) {
- is_osr_entry_stack_check_pending_ = false;
- Node* node =
- NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
- PrepareFrameStateForOSREntryStackCheck(node);
- }
+void BytecodeGraphBuilder::BuildOSREntryStackCheck() {
+ DCHECK(exception_handlers_.empty());
+ Node* node =
+ NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
+ PrepareFrameStateForOSREntryStackCheck(node);
}
// We will iterate through the OSR loop, then its parent, and so on
// until we have reached the outmost loop containing the OSR loop. We do
// not generate nodes for anything before the outermost loop.
void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
+ environment()->FillWithOsrValues();
+
+ // The entry stack check has to happen *before* initialising the OSR prelude;
+ // it has to happen before setting up exception handlers, so that the
+ // optimized code can't accidentally catch a failingstack with a OSR-ed loop
+ // inside a try-catch, e.g.
+ //
+ // try {
+ // loop { OSR(); }
+ // } catch {
+ // // Ignore failed stack check.
+ // }
+ BuildOSREntryStackCheck();
+
OsrIteratorState iterator_states(this);
iterator_states.ProcessOsrPrelude();
int osr_entry = bytecode_analysis().osr_entry_point();
DCHECK_EQ(bytecode_iterator().current_offset(), osr_entry);
- environment()->FillWithOsrValues();
-
// Suppose we have n nested loops, loop_0 being the outermost one, and
// loop_n being the OSR loop. We start iterating the bytecode at the header
// of loop_n (the OSR loop), and then we peel the part of the the body of
@@ -1562,12 +1476,6 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
- // The OSR-entry stack check must be emitted during the first call to
- // VisitSingleBytecode in an OSR'd function. We don't know if that call
- // will be made from AdvanceToOsrEntryAndPeelLoops or from VisitBytecodes,
- // therefore we insert the logic here inside VisitSingleBytecode itself.
- MaybeBuildOSREntryStackCheck();
-
switch (bytecode_iterator().current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
@@ -1675,8 +1583,7 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name,
TypeofMode typeof_mode) {
FeedbackSource feedback = CreateFeedbackSource(feedback_slot_index);
DCHECK(IsLoadGlobalICKind(broker()->GetFeedbackSlotKind(feedback)));
- const Operator* op =
- javascript()->LoadGlobal(name.object(), feedback, typeof_mode);
+ const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
return NewNode(op, feedback_vector_node());
}
@@ -1707,8 +1614,7 @@ void BytecodeGraphBuilder::VisitStaGlobal() {
LanguageMode language_mode =
GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback));
- const Operator* op =
- javascript()->StoreGlobal(language_mode, name.object(), feedback);
+ const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* node = NewNode(op, value, feedback_vector_node());
environment()->RecordAfterState(node, Environment::kAttachFrameState);
@@ -1891,13 +1797,12 @@ base::Optional<ScopeInfoRef> BytecodeGraphBuilder::TryGetScopeInfo() {
Node* context = environment()->Context();
switch (context->opcode()) {
case IrOpcode::kJSCreateFunctionContext:
- return MakeRef(
- broker(),
- CreateFunctionContextParametersOf(context->op()).scope_info());
+ return CreateFunctionContextParametersOf(context->op())
+ .scope_info(broker());
case IrOpcode::kJSCreateBlockContext:
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateWithContext:
- return MakeRef(broker(), ScopeInfoOf(context->op()));
+ return ScopeInfoOf(broker(), context->op());
case IrOpcode::kParameter: {
ScopeInfoRef scope_info = shared_info_.scope_info();
if (scope_info.HasOuterScopeInfo()) {
@@ -2100,7 +2005,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
NameRef name = MakeRefForConstantForIndexOperand<Name>(1);
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
- const Operator* op = javascript()->LoadNamed(name.object(), feedback);
+ const Operator* op = javascript()->LoadNamed(name, feedback);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedLoadNamed(op, feedback.slot);
@@ -2126,8 +2031,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
- const Operator* op =
- javascript()->LoadNamedFromSuper(name.object(), feedback);
+ const Operator* op = javascript()->LoadNamedFromSuper(name, feedback);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedLoadNamed(op, feedback.slot);
@@ -2185,12 +2089,12 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
broker()->GetFeedbackSlotKind(feedback));
- op = javascript()->StoreNamedOwn(name.object(), feedback);
+ op = javascript()->StoreNamedOwn(name, feedback);
} else {
DCHECK_EQ(StoreMode::kNormal, store_mode);
LanguageMode language_mode =
GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback));
- op = javascript()->StoreNamed(language_mode, name.object(), feedback);
+ op = javascript()->StoreNamed(language_mode, name, feedback);
}
JSTypeHintLowering::LoweringResult lowering =
@@ -2288,10 +2192,10 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
bytecode_iterator().GetFlagOperand(2))
? AllocationType::kOld
: AllocationType::kYoung;
- Handle<CodeT> compile_lazy = broker()->CanonicalPersistentHandle(
- ToCodeT(*BUILTIN_CODE(jsgraph()->isolate(), CompileLazy)));
- const Operator* op = javascript()->CreateClosure(shared_info.object(),
- compile_lazy, allocation);
+ CodeTRef compile_lazy = MakeRef(
+ broker(), ToCodeT(*BUILTIN_CODE(jsgraph()->isolate(), CompileLazy)));
+ const Operator* op =
+ javascript()->CreateClosure(shared_info, compile_lazy, allocation);
Node* closure = NewNode(
op, BuildLoadFeedbackCell(bytecode_iterator().GetIndexOperand(1)));
environment()->BindAccumulator(closure);
@@ -2299,7 +2203,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
void BytecodeGraphBuilder::VisitCreateBlockContext() {
ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0);
- const Operator* op = javascript()->CreateBlockContext(scope_info.object());
+ const Operator* op = javascript()->CreateBlockContext(scope_info);
Node* context = NewNode(op);
environment()->BindAccumulator(context);
}
@@ -2307,8 +2211,8 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0);
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
- const Operator* op = javascript()->CreateFunctionContext(
- scope_info.object(), slots, FUNCTION_SCOPE);
+ const Operator* op =
+ javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE);
Node* context = NewNode(op);
environment()->BindAccumulator(context);
}
@@ -2316,8 +2220,8 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
void BytecodeGraphBuilder::VisitCreateEvalContext() {
ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0);
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
- const Operator* op = javascript()->CreateFunctionContext(scope_info.object(),
- slots, EVAL_SCOPE);
+ const Operator* op =
+ javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE);
Node* context = NewNode(op);
environment()->BindAccumulator(context);
}
@@ -2327,7 +2231,7 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
Node* exception = environment()->LookupRegister(reg);
ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(1);
- const Operator* op = javascript()->CreateCatchContext(scope_info.object());
+ const Operator* op = javascript()->CreateCatchContext(scope_info);
Node* context = NewNode(op, exception);
environment()->BindAccumulator(context);
}
@@ -2337,7 +2241,7 @@ void BytecodeGraphBuilder::VisitCreateWithContext() {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(1);
- const Operator* op = javascript()->CreateWithContext(scope_info.object());
+ const Operator* op = javascript()->CreateWithContext(scope_info);
Node* context = NewNode(op, object);
environment()->BindAccumulator(context);
}
@@ -2366,8 +2270,8 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
FeedbackSource pair = CreateFeedbackSource(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
STATIC_ASSERT(JSCreateLiteralRegExpNode::FeedbackVectorIndex() == 0);
- const Operator* op = javascript()->CreateLiteralRegExp(
- constant_pattern.object(), pair, literal_flags);
+ const Operator* op =
+ javascript()->CreateLiteralRegExp(constant_pattern, pair, literal_flags);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* literal = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
@@ -2389,9 +2293,8 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
int number_of_elements =
array_boilerplate_description.constants_elements_length();
STATIC_ASSERT(JSCreateLiteralArrayNode::FeedbackVectorIndex() == 0);
- const Operator* op =
- javascript()->CreateLiteralArray(array_boilerplate_description.object(),
- pair, literal_flags, number_of_elements);
+ const Operator* op = javascript()->CreateLiteralArray(
+ array_boilerplate_description, pair, literal_flags, number_of_elements);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* literal = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
@@ -2423,7 +2326,7 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
int number_of_properties = constant_properties.size();
STATIC_ASSERT(JSCreateLiteralObjectNode::FeedbackVectorIndex() == 0);
const Operator* op = javascript()->CreateLiteralObject(
- constant_properties.object(), pair, literal_flags, number_of_properties);
+ constant_properties, pair, literal_flags, number_of_properties);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* literal = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
@@ -2455,8 +2358,8 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
TemplateObjectDescriptionRef description =
MakeRefForConstantForIndexOperand<TemplateObjectDescription>(0);
STATIC_ASSERT(JSGetTemplateObjectNode::FeedbackVectorIndex() == 0);
- const Operator* op = javascript()->GetTemplateObject(
- description.object(), shared_info().object(), source);
+ const Operator* op =
+ javascript()->GetTemplateObject(description, shared_info(), source);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* template_object = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(template_object);
@@ -4162,7 +4065,6 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedUnaryOp(const Operator* op,
Node* operand,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4176,7 +4078,6 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op, Node* left,
Node* right,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4191,7 +4092,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver,
Node* cache_array,
Node* cache_type, Node* index,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(IrOpcode::kJSForInNext)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4204,7 +4104,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver,
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedForInPrepare(Node* enumerator,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(IrOpcode::kJSForInPrepare)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4217,7 +4116,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInPrepare(Node* enumerator,
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(IrOpcode::kJSToNumber)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4229,7 +4127,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedCall(
const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4244,7 +4141,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
Node* const* args,
int arg_count,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4259,7 +4155,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op,
Node* receiver,
FeedbackSlot load_slot,
FeedbackSlot call_slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult early_reduction =
@@ -4272,7 +4167,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op,
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult early_reduction =
@@ -4285,7 +4179,6 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op,
Node* receiver, Node* key,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4299,7 +4192,6 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op,
Node* receiver, Node* value,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4314,7 +4206,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op,
Node* receiver, Node* key,
Node* value,
FeedbackSlot slot) {
- if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4561,9 +4452,6 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter,
ObserveNodeInfo const& observe_node_info) {
- DCHECK(broker->IsSerializedForCompilation(
- shared_info, feedback_cell.value()->AsFeedbackVector()));
- DCHECK(feedback_cell.value()->AsFeedbackVector().serialized());
BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info,
feedback_cell, osr_offset, jsgraph, invocation_frequency,
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 4e0afd8ea5..0e6872aa66 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -125,7 +125,7 @@ class SymbolWrapper;
class Undetectable;
class UniqueName;
class WasmCapiFunctionData;
-class WasmExceptionObject;
+class WasmTagObject;
class WasmExceptionPackage;
class WasmExceptionTag;
class WasmExportedFunctionData;
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 85cd1bf303..dc2db32753 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -21,14 +21,15 @@ namespace compiler {
CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
- : zone_(zone), broker_(broker), dependencies_(zone) {}
+ : zone_(zone), broker_(broker), dependencies_(zone) {
+ broker->set_dependencies(this);
+}
class InitialMapDependency final : public CompilationDependency {
public:
- InitialMapDependency(const JSFunctionRef& function, const MapRef& initial_map)
+ InitialMapDependency(JSHeapBroker* broker, const JSFunctionRef& function,
+ const MapRef& initial_map)
: function_(function), initial_map_(initial_map) {
- DCHECK(function_.has_initial_map());
- DCHECK(function_.initial_map().equals(initial_map_));
}
bool IsValid() const override {
@@ -51,19 +52,22 @@ class InitialMapDependency final : public CompilationDependency {
class PrototypePropertyDependency final : public CompilationDependency {
public:
- PrototypePropertyDependency(const JSFunctionRef& function,
+ PrototypePropertyDependency(JSHeapBroker* broker,
+ const JSFunctionRef& function,
const ObjectRef& prototype)
: function_(function), prototype_(prototype) {
- DCHECK(function_.has_prototype());
- DCHECK(!function_.PrototypeRequiresRuntimeLookup());
- DCHECK(function_.prototype().equals(prototype_));
+ DCHECK(function_.has_instance_prototype(broker->dependencies()));
+ DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker->dependencies()));
+ DCHECK(function_.instance_prototype(broker->dependencies())
+ .equals(prototype_));
}
bool IsValid() const override {
Handle<JSFunction> function = function_.object();
- return function->has_prototype_slot() && function->has_prototype() &&
+ return function->has_prototype_slot() &&
+ function->has_instance_prototype() &&
!function->PrototypeRequiresRuntimeLookup() &&
- function->prototype() == *prototype_.object();
+ function->instance_prototype() == *prototype_.object();
}
void PrepareInstall() const override {
@@ -75,7 +79,7 @@ class PrototypePropertyDependency final : public CompilationDependency {
void Install(Handle<Code> code) const override {
SLOW_DCHECK(IsValid());
Handle<JSFunction> function = function_.object();
- DCHECK(function->has_initial_map());
+ CHECK(function->has_initial_map());
Handle<Map> initial_map(function->initial_map(), function_.isolate());
DependentCode::InstallDependency(function_.isolate(), code, initial_map,
DependentCode::kInitialMapChangedGroup);
@@ -338,10 +342,29 @@ class OwnConstantDictionaryPropertyDependency final
ObjectRef const value_;
};
+class ConsistentJSFunctionViewDependency final : public CompilationDependency {
+ public:
+ explicit ConsistentJSFunctionViewDependency(const JSFunctionRef& function)
+ : function_(function) {}
+
+ bool IsValid() const override {
+ return function_.IsConsistentWithHeapState();
+ }
+
+ void Install(Handle<Code> code) const override {}
+
+#ifdef DEBUG
+ bool IsConsistentJSFunctionViewDependency() const override { return true; }
+#endif
+
+ private:
+ const JSFunctionRef function_;
+};
+
class TransitionDependency final : public CompilationDependency {
public:
explicit TransitionDependency(const MapRef& map) : map_(map) {
- DCHECK(!map_.is_deprecated());
+ DCHECK(map_.CanBeDeprecated());
}
bool IsValid() const override { return !map_.object()->is_deprecated(); }
@@ -384,108 +407,107 @@ class PretenureModeDependency final : public CompilationDependency {
class FieldRepresentationDependency final : public CompilationDependency {
public:
- FieldRepresentationDependency(const MapRef& owner, InternalIndex descriptor,
+ FieldRepresentationDependency(const MapRef& map, InternalIndex descriptor,
Representation representation)
- : owner_(owner),
- descriptor_(descriptor),
- representation_(representation) {
- }
+ : map_(map), descriptor_(descriptor), representation_(representation) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
- Handle<Map> owner = owner_.object();
- Isolate* isolate = owner_.isolate();
-
- // TODO(v8:11670): Consider turn this back into a CHECK inside the
- // constructor, if possible in light of concurrent heap state
- // modifications.
- if (owner->FindFieldOwner(isolate, descriptor_) != *owner) return false;
-
- return representation_.Equals(owner->instance_descriptors(isolate)
+ if (map_.object()->is_deprecated()) return false;
+ return representation_.Equals(map_.object()
+ ->instance_descriptors(map_.isolate())
.GetDetails(descriptor_)
.representation());
}
void Install(Handle<Code> code) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
+ Isolate* isolate = map_.isolate();
+ Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
+ isolate);
+ CHECK(!owner->is_deprecated());
+ CHECK(representation_.Equals(owner->instance_descriptors(isolate)
+ .GetDetails(descriptor_)
+ .representation()));
+ DependentCode::InstallDependency(isolate, code, owner,
DependentCode::kFieldRepresentationGroup);
}
#ifdef DEBUG
bool IsFieldRepresentationDependencyOnMap(
Handle<Map> const& receiver_map) const override {
- return owner_.object().equals(receiver_map);
+ return map_.object().equals(receiver_map);
}
#endif
private:
- MapRef owner_;
+ MapRef map_;
InternalIndex descriptor_;
Representation representation_;
};
class FieldTypeDependency final : public CompilationDependency {
public:
- FieldTypeDependency(const MapRef& owner, InternalIndex descriptor,
+ FieldTypeDependency(const MapRef& map, InternalIndex descriptor,
const ObjectRef& type)
- : owner_(owner), descriptor_(descriptor), type_(type) {}
+ : map_(map), descriptor_(descriptor), type_(type) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
- Handle<Map> owner = owner_.object();
- Isolate* isolate = owner_.isolate();
-
- // TODO(v8:11670): Consider turn this back into a CHECK inside the
- // constructor, if possible in light of concurrent heap state
- // modifications.
- if (owner->FindFieldOwner(isolate, descriptor_) != *owner) return false;
-
- Handle<Object> type = type_.object();
- return *type ==
- owner->instance_descriptors(isolate).GetFieldType(descriptor_);
+ if (map_.object()->is_deprecated()) return false;
+ return *type_.object() == map_.object()
+ ->instance_descriptors(map_.isolate())
+ .GetFieldType(descriptor_);
}
void Install(Handle<Code> code) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
+ Isolate* isolate = map_.isolate();
+ Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
+ isolate);
+ CHECK(!owner->is_deprecated());
+ CHECK_EQ(*type_.object(),
+ owner->instance_descriptors(isolate).GetFieldType(descriptor_));
+ DependentCode::InstallDependency(isolate, code, owner,
DependentCode::kFieldTypeGroup);
}
private:
- MapRef owner_;
+ MapRef map_;
InternalIndex descriptor_;
ObjectRef type_;
};
class FieldConstnessDependency final : public CompilationDependency {
public:
- FieldConstnessDependency(const MapRef& owner, InternalIndex descriptor)
- : owner_(owner), descriptor_(descriptor) {}
+ FieldConstnessDependency(const MapRef& map, InternalIndex descriptor)
+ : map_(map), descriptor_(descriptor) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
- Handle<Map> owner = owner_.object();
- Isolate* isolate = owner_.isolate();
-
- // TODO(v8:11670): Consider turn this back into a CHECK inside the
- // constructor, if possible in light of concurrent heap state
- // modifications.
- if (owner->FindFieldOwner(isolate, descriptor_) != *owner) return false;
-
- return PropertyConstness::kConst == owner->instance_descriptors(isolate)
- .GetDetails(descriptor_)
- .constness();
+ if (map_.object()->is_deprecated()) return false;
+ return PropertyConstness::kConst ==
+ map_.object()
+ ->instance_descriptors(map_.isolate())
+ .GetDetails(descriptor_)
+ .constness();
}
void Install(Handle<Code> code) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
+ Isolate* isolate = map_.isolate();
+ Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
+ isolate);
+ CHECK(!owner->is_deprecated());
+ CHECK_EQ(PropertyConstness::kConst, owner->instance_descriptors(isolate)
+ .GetDetails(descriptor_)
+ .constness());
+ DependentCode::InstallDependency(isolate, code, owner,
DependentCode::kFieldConstGroup);
}
private:
- MapRef owner_;
+ MapRef map_;
InternalIndex descriptor_;
};
@@ -523,9 +545,7 @@ class GlobalPropertyDependency final : public CompilationDependency {
class ProtectorDependency final : public CompilationDependency {
public:
- explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
- DCHECK_EQ(cell_.value().AsSmi(), Protectors::kProtectorValid);
- }
+ explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {}
bool IsValid() const override {
Handle<PropertyCell> cell = cell_.object();
@@ -638,23 +658,20 @@ void CompilationDependencies::RecordDependency(
MapRef CompilationDependencies::DependOnInitialMap(
const JSFunctionRef& function) {
- DCHECK(!function.IsNeverSerializedHeapObject());
- MapRef map = function.initial_map();
- RecordDependency(zone_->New<InitialMapDependency>(function, map));
+ MapRef map = function.initial_map(this);
+ RecordDependency(zone_->New<InitialMapDependency>(broker_, function, map));
return map;
}
ObjectRef CompilationDependencies::DependOnPrototypeProperty(
const JSFunctionRef& function) {
- DCHECK(!function.IsNeverSerializedHeapObject());
- ObjectRef prototype = function.prototype();
+ ObjectRef prototype = function.instance_prototype(this);
RecordDependency(
- zone_->New<PrototypePropertyDependency>(function, prototype));
+ zone_->New<PrototypePropertyDependency>(broker_, function, prototype));
return prototype;
}
void CompilationDependencies::DependOnStableMap(const MapRef& map) {
- DCHECK(!map.IsNeverSerializedHeapObject());
if (map.CanTransition()) {
RecordDependency(zone_->New<StableMapDependency>(map));
}
@@ -677,11 +694,7 @@ AllocationType CompilationDependencies::DependOnPretenureMode(
PropertyConstness CompilationDependencies::DependOnFieldConstness(
const MapRef& map, InternalIndex descriptor) {
- DCHECK(!map.IsNeverSerializedHeapObject());
- MapRef owner = map.FindFieldOwner(descriptor);
- DCHECK(!owner.IsNeverSerializedHeapObject());
- PropertyConstness constness =
- owner.GetPropertyDetails(descriptor).constness();
+ PropertyConstness constness = map.GetPropertyDetails(descriptor).constness();
if (constness == PropertyConstness::kMutable) return constness;
// If the map can have fast elements transitions, then the field can be only
@@ -696,7 +709,7 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness(
}
DCHECK_EQ(constness, PropertyConstness::kConst);
- RecordDependency(zone_->New<FieldConstnessDependency>(owner, descriptor));
+ RecordDependency(zone_->New<FieldConstnessDependency>(map, descriptor));
return PropertyConstness::kConst;
}
@@ -708,7 +721,7 @@ void CompilationDependencies::DependOnGlobalProperty(
}
bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
- cell.SerializeAsProtector();
+ cell.CacheAsProtector();
if (cell.value().AsSmi() != Protectors::kProtectorValid) return false;
RecordDependency(zone_->New<ProtectorDependency>(cell));
return true;
@@ -783,12 +796,6 @@ void CompilationDependencies::DependOnOwnConstantDictionaryProperty(
}
bool CompilationDependencies::Commit(Handle<Code> code) {
- // Dependencies are context-dependent. In the future it may be possible to
- // restore them in the consumer native context, but for now they are
- // disabled.
- CHECK_IMPLIES(broker_->is_native_context_independent(),
- dependencies_.empty());
-
for (auto dep : dependencies_) {
if (!dep->IsValid()) {
dependencies_.clear();
@@ -812,17 +819,27 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
}
// It is even possible that a GC during the above installations invalidated
- // one of the dependencies. However, this should only affect pretenure mode
- // dependencies, which we assert below. It is safe to return successfully in
- // these cases, because once the code gets executed it will do a stack check
- // that triggers its deoptimization.
+ // one of the dependencies. However, this should only affect
+ //
+ // 1. pretenure mode dependencies, or
+ // 2. function consistency dependencies,
+ //
+ // which we assert below. It is safe to return successfully in these cases,
+ // because
+ //
+ // 1. once the code gets executed it will do a stack check that triggers its
+ // deoptimization.
+ // 2. since the function state was deemed consistent above, that means the
+ // compilation saw a self-consistent state of the jsfunction.
if (FLAG_stress_gc_during_compilation) {
broker_->isolate()->heap()->PreciseCollectAllGarbage(
Heap::kForcedGC, GarbageCollectionReason::kTesting, kNoGCCallbackFlags);
}
#ifdef DEBUG
for (auto dep : dependencies_) {
- CHECK_IMPLIES(!dep->IsValid(), dep->IsPretenureModeDependency());
+ CHECK_IMPLIES(!dep->IsValid(),
+ dep->IsPretenureModeDependency() ||
+ dep->IsConsistentJSFunctionViewDependency());
}
#endif
@@ -847,29 +864,22 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
}
} // namespace
-template <class MapContainer>
void CompilationDependencies::DependOnStablePrototypeChains(
- MapContainer const& receiver_maps, WhereToStart start,
+ ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype) {
- for (auto map : receiver_maps) {
- MapRef receiver_map = MakeRef(broker_, map);
- if (start == kStartAtReceiver) DependOnStableMap(receiver_map);
+ for (MapRef receiver_map : receiver_maps) {
if (receiver_map.IsPrimitiveMap()) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
+ // Note: Keep sync'd with AccessInfoFactory::ComputePropertyAccessInfo.
base::Optional<JSFunctionRef> constructor =
broker_->target_native_context().GetConstructorFunction(receiver_map);
- if (constructor.has_value()) receiver_map = constructor->initial_map();
+ receiver_map = constructor.value().initial_map(this);
}
+ if (start == kStartAtReceiver) DependOnStableMap(receiver_map);
DependOnStablePrototypeChain(this, receiver_map, last_prototype);
}
}
-template void CompilationDependencies::DependOnStablePrototypeChains(
- ZoneVector<Handle<Map>> const& receiver_maps, WhereToStart start,
- base::Optional<JSObjectRef> last_prototype);
-template void CompilationDependencies::DependOnStablePrototypeChains(
- ZoneHandleSet<Map> const& receiver_maps, WhereToStart start,
- base::Optional<JSObjectRef> last_prototype);
void CompilationDependencies::DependOnElementsKinds(
const AllocationSiteRef& site) {
@@ -882,6 +892,12 @@ void CompilationDependencies::DependOnElementsKinds(
CHECK_EQ(current.nested_site().AsSmi(), 0);
}
+void CompilationDependencies::DependOnConsistentJSFunctionView(
+ const JSFunctionRef& function) {
+ DCHECK(broker_->is_concurrent_inlining());
+ RecordDependency(zone_->New<ConsistentJSFunctionViewDependency>(function));
+}
+
SlackTrackingPrediction::SlackTrackingPrediction(MapRef initial_map,
int instance_size)
: instance_size_(instance_size),
@@ -893,20 +909,19 @@ SlackTrackingPrediction
CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
const JSFunctionRef& function) {
MapRef initial_map = DependOnInitialMap(function);
- int instance_size = function.InitialMapInstanceSizeWithMinSlack();
+ int instance_size = function.InitialMapInstanceSizeWithMinSlack(this);
// Currently, we always install the prediction dependency. If this turns out
// to be too expensive, we can only install the dependency if slack
// tracking is active.
RecordDependency(zone_->New<InitialMapInstanceSizePredictionDependency>(
function, instance_size));
- DCHECK_LE(instance_size, function.initial_map().instance_size());
+ CHECK_LE(instance_size, function.initial_map(this).instance_size());
return SlackTrackingPrediction(initial_map, instance_size);
}
CompilationDependency const*
CompilationDependencies::TransitionDependencyOffTheRecord(
const MapRef& target_map) const {
- DCHECK(!target_map.IsNeverSerializedHeapObject());
if (target_map.CanBeDeprecated()) {
return zone_->New<TransitionDependency>(target_map);
} else {
@@ -917,26 +932,16 @@ CompilationDependencies::TransitionDependencyOffTheRecord(
CompilationDependency const*
CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
- const MapRef& map, InternalIndex descriptor) const {
- DCHECK(!map.IsNeverSerializedHeapObject());
- MapRef owner = map.FindFieldOwner(descriptor);
- DCHECK(!owner.IsNeverSerializedHeapObject());
- PropertyDetails details = owner.GetPropertyDetails(descriptor);
- CHECK(details.representation().Equals(
- map.GetPropertyDetails(descriptor).representation()));
- return zone_->New<FieldRepresentationDependency>(owner, descriptor,
- details.representation());
+ const MapRef& map, InternalIndex descriptor,
+ Representation representation) const {
+ return zone_->New<FieldRepresentationDependency>(map, descriptor,
+ representation);
}
CompilationDependency const*
CompilationDependencies::FieldTypeDependencyOffTheRecord(
- const MapRef& map, InternalIndex descriptor) const {
- DCHECK(!map.IsNeverSerializedHeapObject());
- MapRef owner = map.FindFieldOwner(descriptor);
- DCHECK(!owner.IsNeverSerializedHeapObject());
- ObjectRef type = owner.GetFieldType(descriptor);
- CHECK(type.equals(map.GetFieldType(descriptor)));
- return zone_->New<FieldTypeDependency>(owner, descriptor, type);
+ const MapRef& map, InternalIndex descriptor, const ObjectRef& type) const {
+ return zone_->New<FieldTypeDependency>(map, descriptor, type);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 0e7b02cbfb..be507c6843 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -34,10 +34,6 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
- // TODO(jgruber): Remove this method once GetPropertyAccessInfo no longer
- // uses the two-phase approach between serialization and compilation.
- void ClearForConcurrentGetPropertyAccessInfo() { dependencies_.clear(); }
-
// Return the initial map of {function} and record the assumption that it
// stays the initial map.
MapRef DependOnInitialMap(const JSFunctionRef& function);
@@ -116,15 +112,16 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// For each given map, depend on the stability of (the maps of) all prototypes
// up to (and including) the {last_prototype}.
- template <class MapContainer>
void DependOnStablePrototypeChains(
- MapContainer const& receiver_maps, WhereToStart start,
+ ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype =
base::Optional<JSObjectRef>());
// Like DependOnElementsKind but also applies to all nested allocation sites.
void DependOnElementsKinds(const AllocationSiteRef& site);
+ void DependOnConsistentJSFunctionView(const JSFunctionRef& function);
+
// Predict the final instance size for {function}'s initial map and record
// the assumption that this prediction is correct. In addition, register
// the initial map dependency. This method returns the {function}'s the
@@ -148,12 +145,14 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Gather the assumption that the field representation of a field does not
// change. The field is identified by the arguments.
CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
- const MapRef& map, InternalIndex descriptor) const;
+ const MapRef& map, InternalIndex descriptor,
+ Representation representation) const;
// Gather the assumption that the field type of a field does not change. The
// field is identified by the arguments.
CompilationDependency const* FieldTypeDependencyOffTheRecord(
- const MapRef& map, InternalIndex descriptor) const;
+ const MapRef& map, InternalIndex descriptor,
+ const ObjectRef& /* Contains a FieldType underneath. */ type) const;
private:
Zone* const zone_;
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
index 1cacb4d6df..852c7b7640 100644
--- a/deps/v8/src/compiler/compilation-dependency.h
+++ b/deps/v8/src/compiler/compilation-dependency.h
@@ -26,6 +26,7 @@ class CompilationDependency : public ZoneObject {
Handle<Map> const& receiver_map) const {
return false;
}
+ virtual bool IsConsistentJSFunctionViewDependency() const { return false; }
#endif
};
diff --git a/deps/v8/src/compiler/compiler-source-position-table.h b/deps/v8/src/compiler/compiler-source-position-table.h
index 699402c8ef..f66d132df1 100644
--- a/deps/v8/src/compiler/compiler-source-position-table.h
+++ b/deps/v8/src/compiler/compiler-source-position-table.h
@@ -62,10 +62,14 @@ class V8_EXPORT_PRIVATE SourcePositionTable final
private:
class Decorator;
+ static SourcePosition UnknownSourcePosition(Zone* zone) {
+ return SourcePosition::Unknown();
+ }
+
Graph* const graph_;
Decorator* decorator_;
SourcePosition current_position_;
- NodeAuxData<SourcePosition, SourcePosition::Unknown> table_;
+ NodeAuxData<SourcePosition, UnknownSourcePosition> table_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index dadbeb0f7b..b5df8b542b 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -74,99 +74,261 @@ bool Subsumes(MachineRepresentation from, MachineRepresentation to) {
return false;
}
-bool ObjectMayAlias(Node* a, Node* b) {
- if (a != b) {
- if (NodeProperties::IsFreshObject(b)) std::swap(a, b);
- if (NodeProperties::IsFreshObject(a) &&
- (NodeProperties::IsFreshObject(b) ||
- b->opcode() == IrOpcode::kParameter ||
- b->opcode() == IrOpcode::kLoadImmutable ||
- IrOpcode::IsConstantOpcode(b->opcode()))) {
- return false;
- }
- }
- return true;
+bool IsConstantObject(Node* object) {
+ return object->opcode() == IrOpcode::kParameter ||
+ object->opcode() == IrOpcode::kLoadImmutable ||
+ NodeProperties::IsConstant(object);
}
-bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2,
- MachineRepresentation repr2) {
- IntPtrMatcher matcher1(offset1);
- IntPtrMatcher matcher2(offset2);
- // If either of the offsets is variable, accesses may alias
- if (!matcher1.HasResolvedValue() || !matcher2.HasResolvedValue()) {
- return true;
- }
- // Otherwise, we return whether accesses overlap
- intptr_t start1 = matcher1.ResolvedValue();
- intptr_t end1 = start1 + ElementSizeInBytes(repr1);
- intptr_t start2 = matcher2.ResolvedValue();
- intptr_t end2 = start2 + ElementSizeInBytes(repr2);
- return !(end1 <= start2 || end2 <= start1);
+bool IsFreshObject(Node* object) {
+ DCHECK_IMPLIES(NodeProperties::IsFreshObject(object),
+ !IsConstantObject(object));
+ return NodeProperties::IsFreshObject(object);
}
} // namespace CsaLoadEliminationHelpers
namespace Helpers = CsaLoadEliminationHelpers;
-void CsaLoadElimination::AbstractState::Merge(AbstractState const* that,
- Zone* zone) {
+// static
+template <typename OuterKey>
+void CsaLoadElimination::AbstractState::IntersectWith(
+ OuterMap<OuterKey>& to, const OuterMap<OuterKey>& from) {
FieldInfo empty_info;
- for (std::pair<Field, FieldInfo> entry : field_infos_) {
- if (that->field_infos_.Get(entry.first) != entry.second) {
- field_infos_.Set(entry.first, empty_info);
+ for (const std::pair<OuterKey, InnerMap>& to_map : to) {
+ InnerMap to_map_copy(to_map.second);
+ OuterKey key = to_map.first;
+ InnerMap current_map = from.Get(key);
+ for (std::pair<Node*, FieldInfo> info : to_map.second) {
+ if (current_map.Get(info.first) != info.second) {
+ to_map_copy.Set(info.first, empty_info);
+ }
}
+ to.Set(key, to_map_copy);
}
}
+void CsaLoadElimination::AbstractState::IntersectWith(
+ AbstractState const* that) {
+ IntersectWith(fresh_entries_, that->fresh_entries_);
+ IntersectWith(constant_entries_, that->constant_entries_);
+ IntersectWith(arbitrary_entries_, that->arbitrary_entries_);
+ IntersectWith(fresh_unknown_entries_, that->fresh_unknown_entries_);
+ IntersectWith(constant_unknown_entries_, that->constant_unknown_entries_);
+ IntersectWith(arbitrary_unknown_entries_, that->arbitrary_unknown_entries_);
+}
+
CsaLoadElimination::AbstractState const*
-CsaLoadElimination::AbstractState::KillField(Node* kill_object,
- Node* kill_offset,
- MachineRepresentation kill_repr,
- Zone* zone) const {
- FieldInfo empty_info;
- AbstractState* that = zone->New<AbstractState>(*this);
- for (std::pair<Field, FieldInfo> entry : that->field_infos_) {
- Field field = entry.first;
- MachineRepresentation field_repr = entry.second.representation;
- if (Helpers::OffsetMayAlias(kill_offset, kill_repr, field.second,
- field_repr) &&
- Helpers::ObjectMayAlias(kill_object, field.first)) {
- that->field_infos_.Set(field, empty_info);
+CsaLoadElimination::AbstractState::KillField(Node* object, Node* offset,
+ MachineRepresentation repr) const {
+ AbstractState* result = zone_->New<AbstractState>(*this);
+ UnknownOffsetInfos empty_unknown(zone_, InnerMap(zone_));
+ IntPtrMatcher m(offset);
+ if (m.HasResolvedValue()) {
+ uint32_t num_offset = static_cast<uint32_t>(m.ResolvedValue());
+ if (Helpers::IsFreshObject(object)) {
+ // May alias with:
+ // - The same object/offset
+ // - Arbitrary objects with the same offset
+ // - The same object, unkwown offset
+ // - Arbitrary objects with unkwown offset
+ result->KillOffsetInFresh(object, num_offset, repr);
+ KillOffset(result->arbitrary_entries_, num_offset, repr, zone_);
+ result->fresh_unknown_entries_.Set(object, InnerMap(zone_));
+ result->arbitrary_unknown_entries_ = empty_unknown;
+ } else if (Helpers::IsConstantObject(object)) {
+ // May alias with:
+ // - Constant/arbitrary objects with the same offset
+ // - Constant/arbitrary objects with unkwown offset
+ KillOffset(result->constant_entries_, num_offset, repr, zone_);
+ KillOffset(result->arbitrary_entries_, num_offset, repr, zone_);
+ result->constant_unknown_entries_ = empty_unknown;
+ result->arbitrary_unknown_entries_ = empty_unknown;
+ } else {
+ // May alias with:
+ // - Any object with the same or unknown offset
+ KillOffset(result->fresh_entries_, num_offset, repr, zone_);
+ KillOffset(result->constant_entries_, num_offset, repr, zone_);
+ KillOffset(result->arbitrary_entries_, num_offset, repr, zone_);
+ result->fresh_unknown_entries_ = empty_unknown;
+ result->constant_unknown_entries_ = empty_unknown;
+ result->arbitrary_unknown_entries_ = empty_unknown;
+ }
+ } else {
+ ConstantOffsetInfos empty_constant(zone_, InnerMap(zone_));
+ if (Helpers::IsFreshObject(object)) {
+ // May alias with:
+ // - The same object with any known/unknown offset
+ // - Arbitrary objects with any known/unknown offset
+ for (auto map : result->fresh_entries_) {
+ // TODO(manoskouk): Consider adding a map from fresh objects to offsets
+ // to implement this efficiently.
+ InnerMap map_copy(map.second);
+ map_copy.Set(object, FieldInfo());
+ result->fresh_entries_.Set(map.first, map_copy);
+ }
+ result->fresh_unknown_entries_.Set(object, InnerMap(zone_));
+ result->arbitrary_entries_ = empty_constant;
+ result->arbitrary_unknown_entries_ = empty_unknown;
+ } else if (Helpers::IsConstantObject(object)) {
+ // May alias with:
+ // - Constant/arbitrary objects with the any known/unknown offset
+ result->constant_entries_ = empty_constant;
+ result->constant_unknown_entries_ = empty_unknown;
+ result->arbitrary_entries_ = empty_constant;
+ result->arbitrary_unknown_entries_ = empty_unknown;
+ } else {
+ // May alias with anything. Clear the state.
+ return zone_->New<AbstractState>(zone_);
}
}
- return that;
+
+ return result;
}
CsaLoadElimination::AbstractState const*
CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
- CsaLoadElimination::FieldInfo info,
- Zone* zone) const {
- AbstractState* that = zone->New<AbstractState>(*this);
- that->field_infos_.Set({object, offset}, info);
- return that;
+ Node* value,
+ MachineRepresentation repr) const {
+ AbstractState* new_state = zone_->New<AbstractState>(*this);
+ IntPtrMatcher m(offset);
+ if (m.HasResolvedValue()) {
+ uint32_t offset_num = static_cast<uint32_t>(m.ResolvedValue());
+ ConstantOffsetInfos& infos = Helpers::IsFreshObject(object)
+ ? new_state->fresh_entries_
+ : Helpers::IsConstantObject(object)
+ ? new_state->constant_entries_
+ : new_state->arbitrary_entries_;
+ Update(infos, offset_num, object, FieldInfo(value, repr));
+ } else {
+ UnknownOffsetInfos& infos =
+ Helpers::IsFreshObject(object)
+ ? new_state->fresh_unknown_entries_
+ : Helpers::IsConstantObject(object)
+ ? new_state->constant_unknown_entries_
+ : new_state->arbitrary_unknown_entries_;
+ Update(infos, object, offset, FieldInfo(value, repr));
+ }
+ return new_state;
}
CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
Node* object, Node* offset) const {
- if (object->IsDead()) {
- return {};
+ IntPtrMatcher m(offset);
+ if (m.HasResolvedValue()) {
+ uint32_t num_offset = static_cast<uint32_t>(m.ResolvedValue());
+ const ConstantOffsetInfos& infos = Helpers::IsFreshObject(object)
+ ? fresh_entries_
+ : Helpers::IsConstantObject(object)
+ ? constant_entries_
+ : arbitrary_entries_;
+ return infos.Get(num_offset).Get(object);
+ } else {
+ const UnknownOffsetInfos& infos = Helpers::IsFreshObject(object)
+ ? fresh_unknown_entries_
+ : Helpers::IsConstantObject(object)
+ ? constant_unknown_entries_
+ : arbitrary_unknown_entries_;
+ return infos.Get(object).Get(offset);
}
- return field_infos_.Get({object, offset});
}
-void CsaLoadElimination::AbstractState::Print() const {
- for (std::pair<Field, FieldInfo> entry : field_infos_) {
- Field field = entry.first;
- Node* object = field.first;
- Node* offset = field.second;
- FieldInfo info = entry.second;
- PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(),
- object->op()->mnemonic(), info.value->id(),
- info.value->op()->mnemonic(),
- MachineReprToString(info.representation));
+// static
+// Kill all elements in {infos} that overlap with an element with {offset} and
+// size {ElementSizeInBytes(repr)}.
+void CsaLoadElimination::AbstractState::KillOffset(ConstantOffsetInfos& infos,
+ uint32_t offset,
+ MachineRepresentation repr,
+ Zone* zone) {
+ // All elements in the range [{offset}, {offset + ElementSizeInBytes(repr)})
+ // are in the killed range. We do not need to traverse the inner maps, we can
+ // just clear them.
+ for (int i = 0; i < ElementSizeInBytes(repr); i++) {
+ infos.Set(offset + i, InnerMap(zone));
+ }
+
+ // Now we have to remove all elements in earlier offsets that overlap with an
+ // element in {offset}.
+ // The earliest offset that may overlap with {offset} is
+ // {kMaximumReprSizeInBytes - 1} before.
+ uint32_t initial_offset = offset >= kMaximumReprSizeInBytes - 1
+ ? offset - (kMaximumReprSizeInBytes - 1)
+ : 0;
+ // For all offsets from {initial_offset} to {offset}, we traverse the
+ // respective inner map, and reset all elements that are large enough to
+ // overlap with {offset}.
+ for (uint32_t i = initial_offset; i < offset; i++) {
+ InnerMap map_copy(infos.Get(i));
+ for (const std::pair<Node*, FieldInfo> info : infos.Get(i)) {
+ if (info.second.representation != MachineRepresentation::kNone &&
+ ElementSizeInBytes(info.second.representation) >
+ static_cast<int>(offset - i)) {
+ map_copy.Set(info.first, {});
+ }
+ }
+ infos.Set(i, map_copy);
+ }
+}
+
+void CsaLoadElimination::AbstractState::KillOffsetInFresh(
+ Node* const object, uint32_t offset, MachineRepresentation repr) {
+ for (int i = 0; i < ElementSizeInBytes(repr); i++) {
+ Update(fresh_entries_, offset + i, object, {});
+ }
+ uint32_t initial_offset = offset >= kMaximumReprSizeInBytes - 1
+ ? offset - (kMaximumReprSizeInBytes - 1)
+ : 0;
+ for (uint32_t i = initial_offset; i < offset; i++) {
+ const FieldInfo& info = fresh_entries_.Get(i).Get(object);
+ if (info.representation != MachineRepresentation::kNone &&
+ ElementSizeInBytes(info.representation) >
+ static_cast<int>(offset - i)) {
+ Update(fresh_entries_, i, object, {});
+ }
+ }
+}
+
+// static
+void CsaLoadElimination::AbstractState::Print(
+ const CsaLoadElimination::AbstractState::ConstantOffsetInfos& infos) {
+ for (const auto outer_entry : infos) {
+ for (const auto inner_entry : outer_entry.second) {
+ Node* object = inner_entry.first;
+ uint32_t offset = outer_entry.first;
+ FieldInfo info = inner_entry.second;
+ PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset,
+ object->op()->mnemonic(), info.value->id(),
+ info.value->op()->mnemonic(),
+ MachineReprToString(info.representation));
+ }
+ }
+}
+
+// static
+void CsaLoadElimination::AbstractState::Print(
+ const CsaLoadElimination::AbstractState::UnknownOffsetInfos& infos) {
+ for (const auto outer_entry : infos) {
+ for (const auto inner_entry : outer_entry.second) {
+ Node* object = outer_entry.first;
+ Node* offset = inner_entry.first;
+ FieldInfo info = inner_entry.second;
+ PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(),
+ object->op()->mnemonic(), info.value->id(),
+ info.value->op()->mnemonic(),
+ MachineReprToString(info.representation));
+ }
}
}
+void CsaLoadElimination::AbstractState::Print() const {
+ Print(fresh_entries_);
+ Print(constant_entries_);
+ Print(arbitrary_entries_);
+ Print(fresh_unknown_entries_);
+ Print(constant_unknown_entries_);
+ Print(arbitrary_unknown_entries_);
+}
+
Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
ObjectAccess const& access) {
Node* object = NodeProperties::GetValueInput(node, 0);
@@ -189,8 +351,7 @@ Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
return Replace(replacement);
}
}
- FieldInfo info(node, representation);
- state = state->AddField(object, offset, info, zone());
+ state = state->AddField(object, offset, node, representation);
return UpdateState(node, state);
}
@@ -204,9 +365,9 @@ Reduction CsaLoadElimination::ReduceStoreToObject(Node* node,
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- FieldInfo info(value, access.machine_type.representation());
- state = state->KillField(object, offset, info.representation, zone());
- state = state->AddField(object, offset, info, zone());
+ MachineRepresentation repr = access.machine_type.representation();
+ state = state->KillField(object, offset, repr);
+ state = state->AddField(object, offset, value, repr);
return UpdateState(node, state);
}
@@ -232,12 +393,14 @@ Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) {
if (node_states_.Get(effect) == nullptr) return NoChange();
}
- // Make a copy of the first input's state and merge with the state
+ // Make a copy of the first input's state and intersect it with the state
// from other inputs.
+ // TODO(manoskouk): Consider computing phis for at least a subset of the
+ // state.
AbstractState* state = zone()->New<AbstractState>(*state0);
for (int i = 1; i < input_count; ++i) {
Node* const input = NodeProperties::GetEffectInput(node, i);
- state->Merge(node_states_.Get(input), zone());
+ state->IntersectWith(node_states_.Get(input));
}
return UpdateState(node, state);
}
@@ -298,11 +461,10 @@ Reduction CsaLoadElimination::PropagateInputState(Node* node) {
CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
Node* node, AbstractState const* state) const {
DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
- Node* const control = NodeProperties::GetControlInput(node);
ZoneQueue<Node*> queue(zone());
ZoneSet<Node*> visited(zone());
visited.insert(node);
- for (int i = 1; i < control->InputCount(); ++i) {
+ for (int i = 1; i < node->InputCount() - 1; ++i) {
queue.push(node->InputAt(i));
}
while (!queue.empty()) {
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
index bd314cad8e..82ca580329 100644
--- a/deps/v8/src/compiler/csa-load-elimination.h
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -61,28 +61,74 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
MachineRepresentation representation = MachineRepresentation::kNone;
};
+ // Design doc: https://bit.ly/36MfD6Y
class AbstractState final : public ZoneObject {
public:
- explicit AbstractState(Zone* zone) : field_infos_(zone) {}
+ explicit AbstractState(Zone* zone)
+ : zone_(zone),
+ fresh_entries_(zone, InnerMap(zone)),
+ constant_entries_(zone, InnerMap(zone)),
+ arbitrary_entries_(zone, InnerMap(zone)),
+ fresh_unknown_entries_(zone, InnerMap(zone)),
+ constant_unknown_entries_(zone, InnerMap(zone)),
+ arbitrary_unknown_entries_(zone, InnerMap(zone)) {}
bool Equals(AbstractState const* that) const {
- return field_infos_ == that->field_infos_;
+ return fresh_entries_ == that->fresh_entries_ &&
+ constant_entries_ == that->constant_entries_ &&
+ arbitrary_entries_ == that->arbitrary_entries_ &&
+ fresh_unknown_entries_ == that->fresh_unknown_entries_ &&
+ constant_unknown_entries_ == that->constant_unknown_entries_ &&
+ arbitrary_unknown_entries_ == that->arbitrary_unknown_entries_;
}
- void Merge(AbstractState const* that, Zone* zone);
+ void IntersectWith(AbstractState const* that);
AbstractState const* KillField(Node* object, Node* offset,
- MachineRepresentation repr,
- Zone* zone) const;
- AbstractState const* AddField(Node* object, Node* offset, FieldInfo info,
- Zone* zone) const;
+ MachineRepresentation repr) const;
+ AbstractState const* AddField(Node* object, Node* offset, Node* value,
+ MachineRepresentation repr) const;
FieldInfo Lookup(Node* object, Node* offset) const;
void Print() const;
private:
- using Field = std::pair<Node*, Node*>;
- using FieldInfos = PersistentMap<Field, FieldInfo>;
- FieldInfos field_infos_;
+ Zone* zone_;
+ using InnerMap = PersistentMap<Node*, FieldInfo>;
+ template <typename OuterKey>
+ using OuterMap = PersistentMap<OuterKey, InnerMap>;
+
+ // offset -> object -> info
+ using ConstantOffsetInfos = OuterMap<uint32_t>;
+ ConstantOffsetInfos fresh_entries_;
+ ConstantOffsetInfos constant_entries_;
+ ConstantOffsetInfos arbitrary_entries_;
+
+ // object -> offset -> info
+ using UnknownOffsetInfos = OuterMap<Node*>;
+ UnknownOffsetInfos fresh_unknown_entries_;
+ UnknownOffsetInfos constant_unknown_entries_;
+ UnknownOffsetInfos arbitrary_unknown_entries_;
+
+ // Update {map} so that {map.Get(outer_key).Get(inner_key)} returns {info}.
+ template <typename OuterKey>
+ static void Update(OuterMap<OuterKey>& map, OuterKey outer_key,
+ Node* inner_key, FieldInfo info) {
+ InnerMap map_copy(map.Get(outer_key));
+ map_copy.Set(inner_key, info);
+ map.Set(outer_key, map_copy);
+ }
+
+ // Kill all elements in {infos} which may alias with offset.
+ static void KillOffset(ConstantOffsetInfos& infos, uint32_t offset,
+ MachineRepresentation repr, Zone* zone);
+ void KillOffsetInFresh(Node* object, uint32_t offset,
+ MachineRepresentation repr);
+
+ template <typename OuterKey>
+ static void IntersectWith(OuterMap<OuterKey>& to,
+ const OuterMap<OuterKey>& from);
+ static void Print(const ConstantOffsetInfos& infos);
+ static void Print(const UnknownOffsetInfos& infos);
};
Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index fe2774f55e..d7a0ca62dd 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -197,6 +197,9 @@ class EffectControlLinearizer {
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadFieldByIndex(Node* node);
Node* LowerLoadMessage(Node* node);
+ Node* AdaptFastCallTypedArrayArgument(Node* node,
+ ElementsKind expected_elements_kind,
+ GraphAssemblerLabel<0>* bailout);
Node* AdaptFastCallArgument(Node* node, CTypeInfo arg_type,
GraphAssemblerLabel<0>* if_error);
@@ -5004,16 +5007,102 @@ MachineType MachineTypeFor(CTypeInfo::Type type) {
}
} // namespace
+Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument(
+ Node* node, ElementsKind expected_elements_kind,
+ GraphAssemblerLabel<0>* bailout) {
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* value_is_typed_array = __ Word32Equal(
+ value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE));
+ __ GotoIfNot(value_is_typed_array, bailout);
+
+ Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), value_map);
+ Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
+ Node* andit = __ Word32And(bit_field2, mask);
+ Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
+ Node* kind = __ Word32Shr(andit, shift);
+
+ Node* value_is_expected_elements_kind =
+ __ Word32Equal(kind, __ Int32Constant(expected_elements_kind));
+ __ GotoIfNot(value_is_expected_elements_kind, bailout);
+
+ Node* buffer =
+ __ LoadField(AccessBuilder::ForJSArrayBufferViewBuffer(), node);
+ Node* buffer_bit_field =
+ __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), buffer);
+
+ // Go to the slow path if the {buffer} was detached.
+ Node* buffer_is_not_detached = __ Word32Equal(
+ __ Word32And(buffer_bit_field,
+ __ Int32Constant(JSArrayBuffer::WasDetachedBit::kMask)),
+ __ ZeroConstant());
+ __ GotoIfNot(buffer_is_not_detached, bailout);
+
+ // Go to the slow path if the {buffer} is shared.
+ Node* buffer_is_not_shared = __ Word32Equal(
+ __ Word32And(buffer_bit_field,
+ __ Int32Constant(JSArrayBuffer::IsSharedBit::kMask)),
+ __ ZeroConstant());
+ __ GotoIfNot(buffer_is_not_shared, bailout);
+
+ // Unpack the store and length, and store them to a struct
+ // FastApiTypedArray.
+ Node* external_pointer =
+ __ LoadField(AccessBuilder::ForJSTypedArrayExternalPointer(), node);
+
+ // Load the base pointer for the buffer. This will always be Smi
+ // zero unless we allow on-heap TypedArrays, which is only the case
+ // for Chrome. Node and Electron both set this limit to 0. Setting
+ // the base to Smi zero here allows the BuildTypedArrayDataPointer
+ // to optimize away the tricky part of the access later.
+ Node* base_pointer =
+ __ LoadField(AccessBuilder::ForJSTypedArrayBasePointer(), node);
+ if (JSTypedArray::kMaxSizeInHeap == 0) {
+ base_pointer = jsgraph()->ZeroConstant();
+ }
+ Node* data_ptr = BuildTypedArrayDataPointer(base_pointer, external_pointer);
+ Node* length_in_bytes =
+ __ LoadField(AccessBuilder::ForJSTypedArrayLength(), node);
+
+ // We hard-code int32_t here, because all specializations of
+ // FastApiTypedArray have the same size.
+ constexpr int kAlign = alignof(FastApiTypedArray<int32_t>);
+ constexpr int kSize = sizeof(FastApiTypedArray<int32_t>);
+ static_assert(kAlign == alignof(FastApiTypedArray<double>),
+ "Alignment mismatch between different specializations of "
+ "FastApiTypedArray");
+ static_assert(kSize == sizeof(FastApiTypedArray<double>),
+ "Size mismatch between different specializations of "
+ "FastApiTypedArray");
+ static_assert(
+ kSize == sizeof(uintptr_t) + sizeof(size_t),
+ "The size of "
+ "FastApiTypedArray isn't equal to the sum of its expected members.");
+ Node* stack_slot = __ StackSlot(kSize, kAlign);
+
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ stack_slot, 0, length_in_bytes);
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ stack_slot, sizeof(size_t), data_ptr);
+ static_assert(sizeof(uintptr_t) == sizeof(size_t),
+ "The buffer length can't "
+ "fit the PointerRepresentation used to store it.");
+
+ return stack_slot;
+}
+
Node* EffectControlLinearizer::AdaptFastCallArgument(
Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error) {
+ int kAlign = alignof(uintptr_t);
+ int kSize = sizeof(uintptr_t);
switch (arg_type.GetSequenceType()) {
case CTypeInfo::SequenceType::kScalar: {
switch (arg_type.GetType()) {
case CTypeInfo::Type::kV8Value: {
- int kAlign = alignof(uintptr_t);
- int kSize = sizeof(uintptr_t);
Node* stack_slot = __ StackSlot(kSize, kAlign);
-
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
@@ -5035,10 +5124,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
Node* value_is_smi = ObjectIsSmi(node);
__ GotoIf(value_is_smi, if_error);
- int kAlign = alignof(uintptr_t);
- int kSize = sizeof(uintptr_t);
Node* stack_slot = __ StackSlot(kSize, kAlign);
-
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
@@ -5053,9 +5139,15 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
return stack_slot;
}
- case CTypeInfo::SequenceType::kIsTypedArray:
- // TODO(mslekova): Implement typed arrays.
- return node;
+ case CTypeInfo::SequenceType::kIsTypedArray: {
+ // Check that the value is a HeapObject.
+ Node* value_is_smi = ObjectIsSmi(node);
+ __ GotoIf(value_is_smi, if_error);
+
+ return AdaptFastCallTypedArrayArgument(
+ node, fast_api_call::GetTypedArrayElementsKind(arg_type.GetType()),
+ if_error);
+ }
default: {
UNREACHABLE();
}
@@ -5069,14 +5161,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
GraphAssemblerLabel<0>* if_error) {
static constexpr int kReceiver = 1;
- auto merge = __ MakeLabel(MachineRepresentation::kTagged);
-
- int kAlign = alignof(uintptr_t);
- int kSize = sizeof(uintptr_t);
- Node* stack_slot = __ StackSlot(kSize, kAlign);
- __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- stack_slot, 0, node);
+ auto merge = __ MakeLabel(MachineRepresentation::kTagged,
+ MachineRepresentation::kTagged);
for (size_t func_index = 0; func_index < c_functions.size(); func_index++) {
const CFunctionInfo* c_signature = c_functions[func_index].signature;
@@ -5101,34 +5187,31 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
value_instance_type, __ Int32Constant(JS_ARRAY_TYPE));
__ GotoIfNot(value_is_js_array, &next);
+ int kAlign = alignof(uintptr_t);
+ int kSize = sizeof(uintptr_t);
+ Node* stack_slot = __ StackSlot(kSize, kAlign);
+
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ stack_slot, 0, node);
+
Node* target_address = __ ExternalConstant(
ExternalReference::Create(c_functions[func_index].address));
- __ Goto(&merge, target_address);
+ __ Goto(&merge, target_address, stack_slot);
break;
}
case CTypeInfo::SequenceType::kIsTypedArray: {
// Check that the value is a TypedArray with a type that matches the
// type declared in the c-function.
- ElementsKind typed_array_elements_kind =
+ Node* stack_slot = AdaptFastCallTypedArrayArgument(
+ node,
fast_api_call::GetTypedArrayElementsKind(
- overloads_resolution_result.element_type);
-
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
- Node* value_bit_field2 =
- __ LoadField(AccessBuilder::ForMapBitField2(), value_map);
- Node* value_elements_kind = __ WordShr(
- __ WordAnd(value_bit_field2,
- __ Int32Constant(Map::Bits2::ElementsKindBits::kMask)),
- __ Int32Constant(Map::Bits2::ElementsKindBits::kShift));
- Node* is_same_kind = __ Word32Equal(
- value_elements_kind,
- __ Int32Constant(GetPackedElementsKind(typed_array_elements_kind)));
- __ GotoIfNot(is_same_kind, &next);
-
+ overloads_resolution_result.element_type),
+ &next);
Node* target_address = __ ExternalConstant(
ExternalReference::Create(c_functions[func_index].address));
- __ Goto(&merge, target_address);
+ __ Goto(&merge, target_address, stack_slot);
break;
}
@@ -5142,7 +5225,7 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
__ Goto(if_error);
__ Bind(&merge);
- return {merge.PhiAt(0), stack_slot};
+ return {merge.PhiAt(0), merge.PhiAt(1)};
}
Node* EffectControlLinearizer::WrapFastCall(
diff --git a/deps/v8/src/compiler/fast-api-calls.cc b/deps/v8/src/compiler/fast-api-calls.cc
index 608fce8606..564da611d5 100644
--- a/deps/v8/src/compiler/fast-api-calls.cc
+++ b/deps/v8/src/compiler/fast-api-calls.cc
@@ -28,7 +28,6 @@ ElementsKind GetTypedArrayElementsKind(CTypeInfo::Type type) {
case CTypeInfo::Type::kV8Value:
case CTypeInfo::Type::kApiObject:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index e5b4fb35b7..26ae88362d 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -574,6 +574,15 @@ TNode<Map> GraphAssembler::LoadMap(Node* object) {
#endif
}
+void GraphAssembler::StoreMap(Node* object, TNode<Map> map) {
+#ifdef V8_MAP_PACKING
+ map = PackMapWord(map);
+#endif
+ StoreRepresentation rep(MachineType::TaggedRepresentation(),
+ kMapWriteBarrier);
+ Store(rep, object, HeapObject::kMapOffset - kHeapObjectTag, map);
+}
+
Node* JSGraphAssembler::StoreElement(ElementAccess const& access, Node* object,
Node* index, Node* value) {
return AddNode(graph()->NewNode(simplified()->StoreElement(access), object,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 335ec0a314..5efe6dd9c3 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -276,6 +276,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
TNode<Map> UnpackMapWord(Node* map_word);
#endif
TNode<Map> LoadMap(Node* object);
+ void StoreMap(Node* object, TNode<Map> map);
Node* DebugBreak();
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index a518d87ba8..1688a14a04 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -32,10 +32,6 @@ namespace compiler {
#define TRACE(broker, x) TRACE_BROKER(broker, x)
#define TRACE_MISSING(broker, x) TRACE_BROKER_MISSING(broker, x)
-#define FORWARD_DECL(Name, ...) class Name##Data;
-HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
-#undef FORWARD_DECL
-
// There are several kinds of ObjectData values.
//
// kSmi: The underlying V8 object is a Smi and the data is an instance of the
@@ -43,12 +39,10 @@ HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
// object is a Smi, it's safe to access the handle in order to extract the
// number value, and AsSmi() does exactly that.
//
-// kSerializedHeapObject: The underlying V8 object is a HeapObject and the
-// data is an instance of the corresponding (most-specific) subclass, e.g.
-// JSFunctionData, which provides serialized information about the object.
-//
-// kBackgroundSerializedHeapObject: Like kSerializedHeapObject, but
-// allows serialization from the background thread.
+// kBackgroundSerializedHeapObject: The underlying V8 object is a HeapObject
+// and the data is an instance of the corresponding (most-specific) subclass,
+// e.g. JSFunctionData, which provides serialized information about the
+// object. Allows serialization from the background thread.
//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
// data is an instance of the base class (ObjectData), i.e. it basically
@@ -66,7 +60,6 @@ HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
// these objects need not be serialized.
enum ObjectDataKind {
kSmi,
- kSerializedHeapObject,
kBackgroundSerializedHeapObject,
kUnserializedHeapObject,
kNeverSerializedHeapObject,
@@ -87,6 +80,10 @@ bool IsReadOnlyHeapObjectForCompiler(HeapObject object) {
} // namespace
+NotConcurrentInliningTag::NotConcurrentInliningTag(JSHeapBroker* broker) {
+ CHECK(!broker->is_concurrent_inlining());
+}
+
class ObjectData : public ZoneObject {
public:
ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
@@ -125,12 +122,12 @@ class ObjectData : public ZoneObject {
HeapObject::cast(*object)));
}
-#define DECLARE_IS(Name, ...) bool Is##Name() const;
+#define DECLARE_IS(Name) bool Is##Name() const;
HEAP_BROKER_OBJECT_LIST(DECLARE_IS)
#undef DECLARE_IS
-#define DECLARE_AS(Name, ...) Name##Data* As##Name();
- HEAP_BROKER_OBJECT_LIST(DECLARE_AS)
+#define DECLARE_AS(Name) Name##Data* As##Name();
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS)
#undef DECLARE_AS
Handle<Object> object() const { return object_; }
@@ -144,9 +141,6 @@ class ObjectData : public ZoneObject {
bool IsNull() const { return object_->IsNull(); }
#ifdef DEBUG
- enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
- mutable Usage used_status = Usage::kUnused;
-
JSHeapBroker* broker() const { return broker_; }
#endif // DEBUG
@@ -158,36 +152,10 @@ class ObjectData : public ZoneObject {
#endif // DEBUG
};
-namespace {
-
-template <class T>
-constexpr bool IsSerializedRef() {
- return ref_traits<T>::ref_serialization_kind ==
- RefSerializationKind::kSerialized;
-}
-
-RefSerializationKind RefSerializationKindOf(ObjectData* const data) {
- Object o = *data->object();
- if (o.IsSmi()) {
- return RefSerializationKind::kNeverSerialized;
-#define DEFINE_REF_SERIALIZATION_KIND(Name, Kind) \
- } \
- /* NOLINTNEXTLINE(readability/braces) */ \
- else if (o.Is##Name()) { \
- return ref_traits<Name>::ref_serialization_kind;
- HEAP_BROKER_OBJECT_LIST(DEFINE_REF_SERIALIZATION_KIND)
-#undef DEFINE_REF_SERIALIZATION_KIND
- }
- UNREACHABLE();
-}
-
-} // namespace
-
class HeapObjectData : public ObjectData {
public:
HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapObject> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
+ Handle<HeapObject> object, ObjectDataKind kind);
base::Optional<bool> TryGetBooleanValue(JSHeapBroker* broker) const;
ObjectData* map() const { return map_; }
@@ -202,10 +170,9 @@ class HeapObjectData : public ObjectData {
class PropertyCellData : public HeapObjectData {
public:
PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<PropertyCell> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
+ Handle<PropertyCell> object, ObjectDataKind kind);
- bool Serialize(JSHeapBroker* broker);
+ bool Cache(JSHeapBroker* broker);
PropertyDetails property_details() const {
CHECK(serialized());
@@ -224,34 +191,6 @@ class PropertyCellData : public HeapObjectData {
bool serialized() const { return value_ != nullptr; }
};
-// TODO(mslekova): Once we have real-world usage data, we might want to
-// reimplement this as sorted vector instead, to reduce the memory overhead.
-typedef ZoneMap<ObjectData*, HolderLookupResult> KnownReceiversMap;
-
-class FunctionTemplateInfoData : public HeapObjectData {
- public:
- FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FunctionTemplateInfo> object)
- : HeapObjectData(broker, storage, object) {
- // FunctionTemplateInfoData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class CallHandlerInfoData : public HeapObjectData {
- public:
- CallHandlerInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<CallHandlerInfo> object)
- : HeapObjectData(broker, storage, object) {
- // CallHandlerInfoData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
namespace {
ZoneVector<Address> GetCFunctions(FixedArray function_overloads, Zone* zone) {
@@ -285,7 +224,7 @@ PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
ObjectDataKind kind)
: HeapObjectData(broker, storage, object, kind) {}
-bool PropertyCellData::Serialize(JSHeapBroker* broker) {
+bool PropertyCellData::Cache(JSHeapBroker* broker) {
if (serialized()) return true;
TraceScope tracer(broker, this, "PropertyCellData::Serialize");
@@ -352,22 +291,22 @@ class JSReceiverData : public HeapObjectData {
class JSObjectData : public JSReceiverData {
public:
JSObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSObject> object,
- ObjectDataKind kind = kSerializedHeapObject);
+ Handle<JSObject> object, ObjectDataKind kind);
// Recursive serialization of all reachable JSObjects.
bool SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
+ NotConcurrentInliningTag,
int max_depth = kMaxFastLiteralDepth);
ObjectData* GetInobjectField(int property_index) const;
// Shallow serialization of {elements}.
- void SerializeElements(JSHeapBroker* broker);
+ void SerializeElements(JSHeapBroker* broker, NotConcurrentInliningTag);
bool serialized_elements() const { return serialized_elements_; }
ObjectData* elements() const;
ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; }
- void SerializeObjectCreateMap(JSHeapBroker* broker);
+ void SerializeObjectCreateMap(JSHeapBroker* broker, NotConcurrentInliningTag);
// Can be nullptr.
ObjectData* object_create_map(JSHeapBroker* broker) const {
@@ -427,7 +366,8 @@ class JSObjectData : public JSReceiverData {
ZoneUnorderedMap<int, ObjectData*> own_properties_;
};
-void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
+void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
+ NotConcurrentInliningTag) {
if (serialized_object_create_map_) return;
serialized_object_create_map_ = true;
@@ -540,14 +480,18 @@ base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
// object, we are guaranteed to see valid heap words even if the data is wrong.
base::Optional<ObjectRef> GetOwnDictionaryPropertyFromHeap(
JSHeapBroker* broker, Handle<JSObject> receiver, InternalIndex dict_index) {
- DisallowGarbageCollection no_gc;
- // DictionaryPropertyAt will check that we are within the bounds of the
- // object.
- base::Optional<Object> maybe_constant = JSObject::DictionaryPropertyAt(
- receiver, dict_index, broker->isolate()->heap());
- DCHECK_IMPLIES(broker->IsMainThread(), maybe_constant);
- if (!maybe_constant) return {};
- return TryMakeRef(broker, maybe_constant.value());
+ Handle<Object> constant;
+ {
+ DisallowGarbageCollection no_gc;
+ // DictionaryPropertyAt will check that we are within the bounds of the
+ // object.
+ base::Optional<Object> maybe_constant = JSObject::DictionaryPropertyAt(
+ receiver, dict_index, broker->isolate()->heap());
+ DCHECK_IMPLIES(broker->IsMainThread(), maybe_constant);
+ if (!maybe_constant) return {};
+ constant = broker->CanonicalPersistentHandle(maybe_constant.value());
+ }
+ return TryMakeRef(broker, constant);
}
} // namespace
@@ -622,7 +566,7 @@ class JSTypedArrayData : public JSObjectData {
Handle<JSTypedArray> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
- void Serialize(JSHeapBroker* broker);
+ void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
bool serialized() const { return serialized_; }
bool is_on_heap() const { return is_on_heap_; }
@@ -639,7 +583,8 @@ class JSTypedArrayData : public JSObjectData {
ObjectData* buffer_ = nullptr;
};
-void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
+void JSTypedArrayData::Serialize(JSHeapBroker* broker,
+ NotConcurrentInliningTag) {
if (serialized_) return;
serialized_ = true;
@@ -656,35 +601,18 @@ void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
}
}
-class ArrayBoilerplateDescriptionData : public HeapObjectData {
- public:
- ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<ArrayBoilerplateDescription> object)
- : HeapObjectData(broker, storage, object) {
- // ArrayBoilerplateDescriptionData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
class JSDataViewData : public JSObjectData {
public:
JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSDataView> object,
- ObjectDataKind kind = kSerializedHeapObject)
+ Handle<JSDataView> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {
- if (kind == kSerializedHeapObject) {
- DCHECK(!broker->is_concurrent_inlining());
+ DCHECK_EQ(kind, kBackgroundSerializedHeapObject);
+ if (!broker->is_concurrent_inlining()) {
byte_length_ = object->byte_length();
- } else {
- DCHECK_EQ(kind, kBackgroundSerializedHeapObject);
- DCHECK(broker->is_concurrent_inlining());
}
}
size_t byte_length() const {
- DCHECK_EQ(kind(), kSerializedHeapObject);
return byte_length_;
}
@@ -695,12 +623,10 @@ class JSDataViewData : public JSObjectData {
class JSBoundFunctionData : public JSObjectData {
public:
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSBoundFunction> object,
- ObjectDataKind kind = kSerializedHeapObject)
+ Handle<JSBoundFunction> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
- // For main-thread serialization only.
- bool Serialize(JSHeapBroker* broker);
+ bool Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* bound_target_function() const {
DCHECK(!broker()->is_concurrent_inlining());
@@ -726,197 +652,110 @@ class JSBoundFunctionData : public JSObjectData {
class JSFunctionData : public JSObjectData {
public:
JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSFunction> object);
+ Handle<JSFunction> object, ObjectDataKind kind)
+ : JSObjectData(broker, storage, object, kind) {
+ Cache(broker);
+ }
- bool has_feedback_vector() const { return has_feedback_vector_; }
- bool has_initial_map() const { return has_initial_map_; }
- bool has_prototype() const { return has_prototype_; }
+ bool IsConsistentWithHeapState(JSHeapBroker* broker) const;
+
+ bool has_feedback_vector() const {
+ DCHECK(serialized_);
+ return has_feedback_vector_;
+ }
+ bool has_initial_map() const {
+ DCHECK(serialized_);
+ return has_initial_map_;
+ }
+ bool has_instance_prototype() const {
+ DCHECK(serialized_);
+ return has_instance_prototype_;
+ }
bool PrototypeRequiresRuntimeLookup() const {
+ DCHECK(serialized_);
return PrototypeRequiresRuntimeLookup_;
}
- void Serialize(JSHeapBroker* broker);
- bool serialized() const { return serialized_; }
-
- void SerializeCodeAndFeedback(JSHeapBroker* broker);
- bool serialized_code_and_feedback() const {
- return serialized_code_and_feedback_;
+ ObjectData* context() const {
+ DCHECK(serialized_);
+ return context_;
+ }
+ ObjectData* native_context() const {
+ DCHECK(serialized_);
+ return native_context_;
+ }
+ MapData* initial_map() const {
+ DCHECK(serialized_);
+ return initial_map_;
+ }
+ ObjectData* instance_prototype() const {
+ DCHECK(serialized_);
+ return instance_prototype_;
+ }
+ ObjectData* shared() const {
+ DCHECK(serialized_);
+ return shared_;
}
-
- ObjectData* context() const { return context_; }
- ObjectData* native_context() const { return native_context_; }
- ObjectData* initial_map() const { return initial_map_; }
- ObjectData* prototype() const { return prototype_; }
- ObjectData* shared() const { return shared_; }
ObjectData* raw_feedback_cell() const {
- DCHECK(serialized_code_and_feedback());
+ DCHECK(serialized_);
return feedback_cell_;
}
ObjectData* feedback_vector() const {
- DCHECK(serialized_code_and_feedback());
+ DCHECK(serialized_);
return feedback_vector_;
}
- ObjectData* code() const {
- DCHECK(serialized_code_and_feedback());
- DCHECK(!broker()->is_concurrent_inlining());
- return code_;
- }
int initial_map_instance_size_with_min_slack() const {
- CHECK(serialized_);
+ DCHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
}
- private:
- bool has_feedback_vector_;
- bool has_initial_map_;
- bool has_prototype_;
- bool PrototypeRequiresRuntimeLookup_;
+ // Track serialized fields that are actually used, in order to relax
+ // ConsistentJSFunctionView dependency validation as much as possible.
+ enum UsedField {
+ kHasFeedbackVector = 1 << 0,
+ kPrototypeOrInitialMap = 1 << 1,
+ kHasInitialMap = 1 << 2,
+ kHasInstancePrototype = 1 << 3,
+ kPrototypeRequiresRuntimeLookup = 1 << 4,
+ kInitialMap = 1 << 5,
+ kInstancePrototype = 1 << 6,
+ kFeedbackVector = 1 << 7,
+ kFeedbackCell = 1 << 8,
+ kInitialMapInstanceSizeWithMinSlack = 1 << 9,
+ };
- bool serialized_ = false;
- bool serialized_code_and_feedback_ = false;
-
- ObjectData* context_ = nullptr;
- ObjectData* native_context_ = nullptr;
- ObjectData* initial_map_ = nullptr;
- ObjectData* prototype_ = nullptr;
- ObjectData* shared_ = nullptr;
- ObjectData* feedback_vector_ = nullptr;
- ObjectData* feedback_cell_ = nullptr;
- ObjectData* code_ = nullptr;
- int initial_map_instance_size_with_min_slack_;
-};
-
-class RegExpBoilerplateDescriptionData : public HeapObjectData {
- public:
- RegExpBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<RegExpBoilerplateDescription> object)
- : HeapObjectData(broker, storage, object) {
- // RegExpBoilerplateDescription is NeverEverSerialize.
- // TODO(jgruber): Remove this class once all kNeverSerialized types are
- // NeverEverSerialize.
- UNREACHABLE();
+ bool has_any_used_field() const { return used_fields_ != 0; }
+ bool has_used_field(UsedField used_field) const {
+ return (used_fields_ & used_field) != 0;
}
-};
-
-class HeapNumberData : public HeapObjectData {
- public:
- HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapNumber> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
- : HeapObjectData(broker, storage, object, kind),
- value_(object->value()),
- value_as_bits_(object->value_as_bits(kRelaxedLoad)) {}
-
- double value() const { return value_; }
- uint64_t value_as_bits() const { return value_as_bits_; }
+ void set_used_field(UsedField used_field) { used_fields_ |= used_field; }
private:
- double const value_;
- uint64_t const value_as_bits_;
-};
-
-class ContextData : public HeapObjectData {
- public:
- ContextData(JSHeapBroker* broker, ObjectData** storage,
- Handle<Context> object)
- : HeapObjectData(broker, storage, object) {
- // TODO(v8:7790): Remove this class once all kNeverSerialized types are
- // NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class NativeContextData : public ContextData {
- public:
- NativeContextData(JSHeapBroker* broker, ObjectData** storage,
- Handle<NativeContext> object)
- : ContextData(broker, storage, object) {
- // TODO(v8:7790): Remove this class once all kNeverSerialized types are
- // NeverEverSerialize.
- UNREACHABLE();
- }
-};
+ void Cache(JSHeapBroker* broker);
-class NameData : public HeapObjectData {
- public:
- NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
- : HeapObjectData(broker, storage, object) {
- // StringData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class StringData : public NameData {
- public:
- StringData(JSHeapBroker* broker, ObjectData** storage, Handle<String> object)
- : NameData(broker, storage, object) {
- // StringData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class SymbolData : public NameData {
- public:
- SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
- : NameData(broker, storage, object) {
- // StringData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class InternalizedStringData : public StringData {
- public:
- InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
- Handle<InternalizedString> object)
- : StringData(broker, storage, object) {
- // InternalizedStringData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class AccessorInfoData : public HeapObjectData {
- public:
- AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<AccessorInfo> object)
- : HeapObjectData(broker, storage, object) {
- // AccessorInfoData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class AllocationSiteData : public HeapObjectData {
- public:
- AllocationSiteData(JSHeapBroker* broker, ObjectData** storage,
- Handle<AllocationSite> object);
- void Serialize(JSHeapBroker* broker);
+#ifdef DEBUG
+ bool serialized_ = false;
+#endif // DEBUG
- bool PointsToLiteral() const { return PointsToLiteral_; }
- AllocationType GetAllocationType() const { return GetAllocationType_; }
- ObjectData* nested_site() const { return nested_site_; }
- ObjectData* boilerplate() const { return boilerplate_; }
+ using UsedFields = base::Flags<UsedField>;
+ UsedFields used_fields_;
- // These are only valid if PointsToLiteral is false.
- ElementsKind GetElementsKind() const { return GetElementsKind_; }
- bool CanInlineCall() const { return CanInlineCall_; }
+ bool has_feedback_vector_ = false;
+ ObjectData* prototype_or_initial_map_ = nullptr;
+ bool has_initial_map_ = false;
+ bool has_instance_prototype_ = false;
+ bool PrototypeRequiresRuntimeLookup_ = false;
- private:
- bool const PointsToLiteral_;
- AllocationType const GetAllocationType_;
- ObjectData* nested_site_ = nullptr;
- ObjectData* boilerplate_ = nullptr;
- ElementsKind GetElementsKind_ = NO_ELEMENTS;
- bool CanInlineCall_ = false;
- bool serialized_ = false;
+ ObjectData* context_ = nullptr;
+ ObjectData* native_context_ = nullptr; // Derives from context_.
+ MapData* initial_map_ = nullptr; // Derives from prototype_or_initial_map_.
+ ObjectData* instance_prototype_ =
+ nullptr; // Derives from prototype_or_initial_map_.
+ ObjectData* shared_ = nullptr;
+ ObjectData* feedback_vector_ = nullptr; // Derives from feedback_cell.
+ ObjectData* feedback_cell_ = nullptr;
+ int initial_map_instance_size_with_min_slack_; // Derives from
+ // prototype_or_initial_map_.
};
class BigIntData : public HeapObjectData {
@@ -933,18 +772,14 @@ class BigIntData : public HeapObjectData {
};
struct PropertyDescriptor {
- ObjectData* key = nullptr;
- ObjectData* value = nullptr;
- PropertyDetails details = PropertyDetails::Empty();
FieldIndex field_index;
ObjectData* field_owner = nullptr;
- ObjectData* field_type = nullptr;
};
class MapData : public HeapObjectData {
public:
MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
+ ObjectDataKind kind);
InstanceType instance_type() const { return instance_type_; }
int instance_size() const { return instance_size_; }
@@ -975,49 +810,38 @@ class MapData : public HeapObjectData {
}
// Extra information.
-
- // Serialize a single (or all) own slot(s) of the descriptor array and recurse
- // on field owner(s).
- bool TrySerializeOwnDescriptor(JSHeapBroker* broker,
- InternalIndex descriptor_index);
- void SerializeOwnDescriptor(JSHeapBroker* broker,
- InternalIndex descriptor_index) {
- CHECK(TrySerializeOwnDescriptor(broker, descriptor_index));
- }
- void SerializeOwnDescriptors(JSHeapBroker* broker);
- ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
- ObjectData* instance_descriptors() const { return instance_descriptors_; }
-
- void SerializeRootMap(JSHeapBroker* broker);
+ void SerializeRootMap(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* FindRootMap() const;
- void SerializeConstructor(JSHeapBroker* broker);
+ void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetConstructor() const {
CHECK(serialized_constructor_);
return constructor_;
}
- void SerializeBackPointer(JSHeapBroker* broker);
+ void SerializeBackPointer(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetBackPointer() const {
CHECK(serialized_backpointer_);
return backpointer_;
}
- bool TrySerializePrototype(JSHeapBroker* broker);
- void SerializePrototype(JSHeapBroker* broker) {
- CHECK(TrySerializePrototype(broker));
+ bool TrySerializePrototype(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag);
+ void SerializePrototype(JSHeapBroker* broker, NotConcurrentInliningTag tag) {
+ CHECK(TrySerializePrototype(broker, tag));
}
ObjectData* prototype() const {
DCHECK_EQ(serialized_prototype_, prototype_ != nullptr);
return prototype_;
}
- void SerializeForElementStore(JSHeapBroker* broker);
+ void SerializeForElementStore(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag);
bool has_extra_serialized_data() const {
- return serialized_own_descriptors_ || serialized_constructor_ ||
- serialized_backpointer_ || serialized_prototype_ ||
- serialized_root_map_ || serialized_for_element_store_;
+ return serialized_constructor_ || serialized_backpointer_ ||
+ serialized_prototype_ || serialized_root_map_ ||
+ serialized_for_element_store_;
}
private:
@@ -1044,14 +868,10 @@ class MapData : public HeapObjectData {
bool supports_fast_array_iteration_;
bool supports_fast_array_resize_;
- // These extra fields still have to be serialized (e.g prototype_) even with
- // concurrent inling, since those classes have fields themselves which are not
- // being directly read. This means that, for example, even though we can get
- // the prototype itself with direct reads, some of its fields require
- // serialization.
- bool serialized_own_descriptors_ = false;
- ObjectData* instance_descriptors_ = nullptr;
-
+ // These extra fields still have to be serialized (e.g prototype_), since
+ // those classes have fields themselves which are not being directly read.
+ // This means that, for example, even though we can get the prototype itself
+ // with direct reads, some of its fields require serialization.
bool serialized_constructor_ = false;
ObjectData* constructor_ = nullptr;
@@ -1067,33 +887,222 @@ class MapData : public HeapObjectData {
bool serialized_for_element_store_ = false;
};
-AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<AllocationSite> object)
- : HeapObjectData(broker, storage, object),
- PointsToLiteral_(object->PointsToLiteral()),
- GetAllocationType_(object->GetAllocationType()) {
- DCHECK(!broker->is_concurrent_inlining());
- if (!PointsToLiteral_) {
- GetElementsKind_ = object->GetElementsKind();
- CanInlineCall_ = object->CanInlineCall();
+namespace {
+
+int InstanceSizeWithMinSlack(JSHeapBroker* broker, MapRef map) {
+ // This operation is split into two phases (1. map collection, 2. map
+ // processing). This is to avoid having to take two locks
+ // (full_transition_array_access and map_updater_access) at once and thus
+ // having to deal with related deadlock issues.
+ ZoneVector<Handle<Map>> maps(broker->zone());
+ maps.push_back(map.object());
+
+ {
+ DisallowGarbageCollection no_gc;
+
+ // Has to be an initial map.
+ DCHECK(map.object()->GetBackPointer().IsUndefined(broker->isolate()));
+
+ static constexpr bool kConcurrentAccess = true;
+ TransitionsAccessor(broker->isolate(), *map.object(), &no_gc,
+ kConcurrentAccess)
+ .TraverseTransitionTree([&](Map m) {
+ maps.push_back(broker->CanonicalPersistentHandle(m));
+ });
}
+
+ // The lock is needed for UnusedPropertyFields and InstanceSizeFromSlack.
+ JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker);
+
+ int slack = std::numeric_limits<int>::max();
+ for (Handle<Map> m : maps) {
+ slack = std::min(slack, m->UnusedPropertyFields());
+ }
+
+ return map.object()->InstanceSizeFromSlack(slack);
}
-void AllocationSiteData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
+} // namespace
+
+// IMPORTANT: Keep this sync'd with JSFunctionData::IsConsistentWithHeapState.
+void JSFunctionData::Cache(JSHeapBroker* broker) {
+ DCHECK(!serialized_);
+
+ TraceScope tracer(broker, this, "JSFunctionData::Cache");
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object());
+
+ // This function may run on the background thread and thus must be individual
+ // fields in a thread-safe manner. Consistency between fields is *not*
+ // guaranteed here, instead we verify it in `IsConsistentWithHeapState`,
+ // called during job finalization. Relaxed loads are thus okay: we're
+ // guaranteed to see an initialized JSFunction object, and after
+ // initialization fields remain in a valid state.
+
+ Context context = function->context(kRelaxedLoad);
+ context_ = broker->GetOrCreateData(context, kAssumeMemoryFence);
+ CHECK(context_->IsContext());
+
+ native_context_ = broker->GetOrCreateData(context.map().native_context(),
+ kAssumeMemoryFence);
+ CHECK(native_context_->IsNativeContext());
+
+ SharedFunctionInfo shared = function->shared(kRelaxedLoad);
+ shared_ = broker->GetOrCreateData(shared, kAssumeMemoryFence);
+
+ if (function->has_prototype_slot()) {
+ prototype_or_initial_map_ = broker->GetOrCreateData(
+ function->prototype_or_initial_map(kAcquireLoad), kAssumeMemoryFence);
+
+ has_initial_map_ = prototype_or_initial_map_->IsMap();
+ if (has_initial_map_) {
+ initial_map_ = prototype_or_initial_map_->AsMap();
+
+ MapRef initial_map_ref = TryMakeRef<Map>(broker, initial_map_).value();
+ if (initial_map_ref.IsInobjectSlackTrackingInProgress()) {
+ initial_map_instance_size_with_min_slack_ =
+ InstanceSizeWithMinSlack(broker, initial_map_ref);
+ } else {
+ initial_map_instance_size_with_min_slack_ =
+ initial_map_ref.instance_size();
+ }
+ CHECK_GT(initial_map_instance_size_with_min_slack_, 0);
+
+ if (!initial_map_->should_access_heap() &&
+ !broker->is_concurrent_inlining()) {
+ // TODO(neis): This is currently only needed for native_context's
+ // object_function, as used by GetObjectCreateMap. If no further use
+ // sites show up, we should move this into NativeContextData::Serialize.
+ initial_map_->SerializePrototype(broker,
+ NotConcurrentInliningTag{broker});
+ initial_map_->SerializeConstructor(broker,
+ NotConcurrentInliningTag{broker});
+ }
+ }
+
+ if (has_initial_map_) {
+ has_instance_prototype_ = true;
+ instance_prototype_ = broker->GetOrCreateData(
+ Handle<Map>::cast(initial_map_->object())->prototype(),
+ kAssumeMemoryFence);
+ } else if (prototype_or_initial_map_->IsHeapObject() &&
+ !Handle<HeapObject>::cast(prototype_or_initial_map_->object())
+ ->IsTheHole()) {
+ has_instance_prototype_ = true;
+ instance_prototype_ = prototype_or_initial_map_;
+ }
+ }
+
+ PrototypeRequiresRuntimeLookup_ = function->PrototypeRequiresRuntimeLookup();
+
+ FeedbackCell feedback_cell = function->raw_feedback_cell(kAcquireLoad);
+ feedback_cell_ = broker->GetOrCreateData(feedback_cell, kAssumeMemoryFence);
+
+ ObjectData* maybe_feedback_vector = broker->GetOrCreateData(
+ feedback_cell.value(kAcquireLoad), kAssumeMemoryFence);
+ if (shared.is_compiled() && maybe_feedback_vector->IsFeedbackVector()) {
+ has_feedback_vector_ = true;
+ feedback_vector_ = maybe_feedback_vector;
+ }
+
+#ifdef DEBUG
serialized_ = true;
+#endif // DEBUG
+}
+
+// IMPORTANT: Keep this sync'd with JSFunctionData::Cache.
+bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
+ DCHECK(serialized_);
+
+ Handle<JSFunction> f = Handle<JSFunction>::cast(object());
+
+ CHECK_EQ(*context_->object(), f->context());
+ CHECK_EQ(*native_context_->object(), f->native_context());
+ CHECK_EQ(*shared_->object(), f->shared());
+
+ if (f->has_prototype_slot()) {
+ if (has_used_field(kPrototypeOrInitialMap) &&
+ *prototype_or_initial_map_->object() !=
+ f->prototype_or_initial_map(kAcquireLoad)) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::prototype_or_initial_map");
+ return false;
+ }
+ if (has_used_field(kHasInitialMap) &&
+ has_initial_map_ != f->has_initial_map()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::has_initial_map");
+ return false;
+ }
+ if (has_used_field(kHasInstancePrototype) &&
+ has_instance_prototype_ != f->has_instance_prototype()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::has_instance_prototype");
+ return false;
+ }
+ } else {
+ DCHECK(!has_initial_map_);
+ DCHECK(!has_instance_prototype_);
+ }
+
+ if (has_initial_map()) {
+ if (has_used_field(kInitialMap) &&
+ *initial_map_->object() != f->initial_map()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::initial_map");
+ return false;
+ }
+ if (has_used_field(kInitialMapInstanceSizeWithMinSlack) &&
+ initial_map_instance_size_with_min_slack_ !=
+ f->ComputeInstanceSizeWithMinSlack(f->GetIsolate())) {
+ TRACE_BROKER_MISSING(broker,
+ "JSFunction::ComputeInstanceSizeWithMinSlack");
+ return false;
+ }
+ } else {
+ DCHECK_NULL(initial_map_);
+ }
+
+ if (has_instance_prototype_) {
+ if (has_used_field(kInstancePrototype) &&
+ *instance_prototype_->object() != f->instance_prototype()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::instance_prototype");
+ return false;
+ }
+ } else {
+ DCHECK_NULL(instance_prototype_);
+ }
+
+ if (has_used_field(kPrototypeRequiresRuntimeLookup) &&
+ PrototypeRequiresRuntimeLookup_ != f->PrototypeRequiresRuntimeLookup()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::PrototypeRequiresRuntimeLookup");
+ return false;
+ }
+
+ if (has_used_field(kFeedbackCell) &&
+ *feedback_cell_->object() != f->raw_feedback_cell()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::raw_feedback_cell");
+ return false;
+ }
- TraceScope tracer(broker, this, "AllocationSiteData::Serialize");
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(object());
+ if (has_used_field(kHasFeedbackVector) &&
+ has_feedback_vector_ != f->has_feedback_vector()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::has_feedback_vector");
+ return false;
+ }
- if (PointsToLiteral_) {
- DCHECK_NULL(boilerplate_);
- boilerplate_ = broker->GetOrCreateData(site->boilerplate(kAcquireLoad));
+ if (has_feedback_vector_) {
+ if (has_used_field(kFeedbackVector) &&
+ *feedback_vector_->object() != f->feedback_vector()) {
+ TRACE_BROKER_MISSING(broker, "JSFunction::feedback_vector");
+ return false;
+ }
+ } else {
+ DCHECK_NULL(feedback_vector_);
}
- DCHECK_NULL(nested_site_);
- nested_site_ = broker->GetOrCreateData(site->nested_site());
+ return true;
+}
+
+bool JSFunctionRef::IsConsistentWithHeapState() const {
+ DCHECK(broker()->is_concurrent_inlining());
+ DCHECK(broker()->IsMainThread());
+ return data()->AsJSFunction()->IsConsistentWithHeapState(broker());
}
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -1101,8 +1110,6 @@ HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
: ObjectData(broker, storage, object, kind),
map_(broker->GetOrCreateData(object->map(kAcquireLoad),
kAssumeMemoryFence)) {
- CHECK_IMPLIES(kind == kSerializedHeapObject,
- broker->mode() == JSHeapBroker::kSerializing);
CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
kind == kBackgroundSerializedHeapObject);
}
@@ -1234,249 +1241,12 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
}
}
-JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSFunction> object)
- : JSObjectData(broker, storage, object),
- has_feedback_vector_(object->has_feedback_vector()),
- has_initial_map_(object->has_prototype_slot() &&
- object->has_initial_map()),
- has_prototype_(object->has_prototype_slot() && object->has_prototype()),
- PrototypeRequiresRuntimeLookup_(
- object->PrototypeRequiresRuntimeLookup()) {}
-
-void JSFunctionData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "JSFunctionData::Serialize");
- Handle<JSFunction> function = Handle<JSFunction>::cast(object());
-
- DCHECK_NULL(context_);
- DCHECK_NULL(native_context_);
- DCHECK_NULL(initial_map_);
- DCHECK_NULL(prototype_);
- DCHECK_NULL(shared_);
-
- context_ = broker->GetOrCreateData(function->context());
- native_context_ = broker->GetOrCreateData(function->native_context());
- shared_ = broker->GetOrCreateData(function->shared());
-
- initial_map_ = has_initial_map()
- ? broker->GetOrCreateData(function->initial_map())
- : nullptr;
- prototype_ = has_prototype() ? broker->GetOrCreateData(function->prototype())
- : nullptr;
-
- if (initial_map_ != nullptr) {
- initial_map_instance_size_with_min_slack_ =
- function->ComputeInstanceSizeWithMinSlack(broker->isolate());
- }
- if (initial_map_ != nullptr && !initial_map_->should_access_heap()) {
- initial_map_->AsMap()->SerializeConstructor(broker);
- // TODO(neis): This is currently only needed for native_context's
- // object_function, as used by GetObjectCreateMap. If no further use sites
- // show up, we should move this into NativeContextData::Serialize.
- initial_map_->AsMap()->SerializePrototype(broker);
- }
-}
-
-void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
- DCHECK(serialized_);
- if (serialized_code_and_feedback_) return;
- serialized_code_and_feedback_ = true;
-
- TraceScope tracer(broker, this, "JSFunctionData::SerializeCodeAndFeedback");
- Handle<JSFunction> function = Handle<JSFunction>::cast(object());
-
- DCHECK_NULL(feedback_cell_);
- DCHECK_NULL(feedback_vector_);
- DCHECK_NULL(code_);
- if (!broker->is_concurrent_inlining()) {
- // This is conditionalized because Code objects are never serialized now.
- // We only need to represent the code object in serialized data when
- // we're unable to perform direct heap accesses.
- code_ = broker->GetOrCreateData(function->code(kAcquireLoad));
- }
- feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
- feedback_vector_ = has_feedback_vector()
- ? broker->GetOrCreateData(function->feedback_vector())
- : nullptr;
-}
-
-class DescriptorArrayData : public HeapObjectData {
- public:
- DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<DescriptorArray> object)
- : HeapObjectData(broker, storage, object), contents_(broker->zone()) {
- DCHECK(!broker->is_concurrent_inlining());
- }
-
- ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).field_owner;
- }
-
- PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).details;
- }
-
- ObjectData* GetPropertyKey(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).key;
- }
-
- FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).field_index;
- }
-
- ObjectData* GetFieldType(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).field_type;
- }
-
- ObjectData* GetStrongValue(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).value;
- }
-
- bool serialized_descriptor(InternalIndex descriptor_index) const {
- return contents_.find(descriptor_index.as_int()) != contents_.end();
- }
-
- void SerializeDescriptor(JSHeapBroker* broker, Handle<Map> map,
- InternalIndex descriptor_index);
-
- private:
- ZoneMap<int, PropertyDescriptor> contents_;
-};
-
-void DescriptorArrayData::SerializeDescriptor(JSHeapBroker* broker,
- Handle<Map> map,
- InternalIndex descriptor_index) {
- CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
- if (contents_.find(descriptor_index.as_int()) != contents_.end()) return;
-
- Isolate* const isolate = broker->isolate();
- auto descriptors = Handle<DescriptorArray>::cast(object());
- CHECK_EQ(*descriptors, map->instance_descriptors(isolate));
-
- PropertyDescriptor d;
- d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
- MaybeObject value = descriptors->GetValue(descriptor_index);
- HeapObject obj;
- if (value.GetHeapObjectIfStrong(&obj)) {
- d.value = broker->GetOrCreateData(obj);
- }
- d.details = descriptors->GetDetails(descriptor_index);
- if (d.details.location() == kField) {
- d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
- d.field_owner =
- broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
- d.field_type =
- broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
- }
- contents_[descriptor_index.as_int()] = d;
-
- if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
- // Recurse on the owner map.
- d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
- }
-
- TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
- << this << " (" << contents_.size()
- << " total)");
-}
-
-class FeedbackCellData : public HeapObjectData {
- public:
- FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FeedbackCell> object);
-
- ObjectData* value() const { return value_; }
-
- private:
- ObjectData* const value_;
-};
-
-FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FeedbackCell> object)
- : HeapObjectData(broker, storage, object),
- value_(object->value().IsFeedbackVector()
- ? broker->GetOrCreateData(object->value())
- : nullptr) {
- DCHECK(!broker->is_concurrent_inlining());
-}
-
-class FeedbackVectorData : public HeapObjectData {
- public:
- FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FeedbackVector> object);
-
- double invocation_count() const { return invocation_count_; }
-
- ObjectData* shared_function_info() {
- CHECK(serialized_);
- return shared_function_info_;
- }
-
- void Serialize(JSHeapBroker* broker);
- bool serialized() const { return serialized_; }
- ObjectData* GetClosureFeedbackCell(JSHeapBroker* broker, int index) const;
-
- private:
- double const invocation_count_;
-
- bool serialized_ = false;
- ObjectData* shared_function_info_;
- ZoneVector<ObjectData*> closure_feedback_cell_array_;
-};
-
-FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<FeedbackVector> object)
- : HeapObjectData(broker, storage, object),
- invocation_count_(object->invocation_count()),
- closure_feedback_cell_array_(broker->zone()) {
- DCHECK(!broker->is_concurrent_inlining());
-}
-
-ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
- int index) const {
- CHECK_GE(index, 0);
-
- size_t cell_array_size = closure_feedback_cell_array_.size();
- if (!serialized_) {
- DCHECK_EQ(cell_array_size, 0);
- TRACE_BROKER_MISSING(broker,
- " closure feedback cell array for vector " << this);
- return nullptr;
- }
- CHECK_LT(index, cell_array_size);
- return closure_feedback_cell_array_[index];
-}
-
-void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
- Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
- Handle<SharedFunctionInfo> sfi(vector->shared_function_info(),
- broker->isolate());
- shared_function_info_ = broker->GetOrCreateData(sfi);
- DCHECK(closure_feedback_cell_array_.empty());
- int length = vector->closure_feedback_cell_array().length();
- closure_feedback_cell_array_.reserve(length);
- for (int i = 0; i < length; ++i) {
- Handle<FeedbackCell> cell = vector->GetClosureFeedbackCell(i);
- ObjectData* cell_data = broker->GetOrCreateData(cell);
- closure_feedback_cell_array_.push_back(cell_data);
- }
- TRACE(broker, "Copied " << length << " feedback cells");
-}
-
class FixedArrayBaseData : public HeapObjectData {
public:
FixedArrayBaseData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedArrayBase> object, ObjectDataKind kind)
: HeapObjectData(broker, storage, object, kind),
- length_(object->length()) {}
+ length_(object->length(kAcquireLoad)) {}
int length() const { return length_; }
@@ -1491,20 +1261,6 @@ class FixedArrayData : public FixedArrayBaseData {
: FixedArrayBaseData(broker, storage, object, kind) {}
};
-class ObjectBoilerplateDescriptionData : public FixedArrayData {
- public:
- ObjectBoilerplateDescriptionData(
- JSHeapBroker* broker, ObjectData** storage,
- Handle<ObjectBoilerplateDescription> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
- : FixedArrayData(broker, storage, object, kind) {
- // ObjectBoilerplateDescriptionData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
// Only used in JSNativeContextSpecialization.
class ScriptContextTableData : public FixedArrayData {
public:
@@ -1513,7 +1269,8 @@ class ScriptContextTableData : public FixedArrayData {
: FixedArrayData(broker, storage, object, kind) {}
};
-bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
+bool JSBoundFunctionData::Serialize(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag) {
DCHECK(!broker->is_concurrent_inlining());
if (serialized_) return true;
@@ -1532,9 +1289,7 @@ bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
if (!bound_target_function_->should_access_heap()) {
if (bound_target_function_->IsJSBoundFunction()) {
serialized_nested =
- bound_target_function_->AsJSBoundFunction()->Serialize(broker);
- } else if (bound_target_function_->IsJSFunction()) {
- bound_target_function_->AsJSFunction()->Serialize(broker);
+ bound_target_function_->AsJSBoundFunction()->Serialize(broker, tag);
}
}
if (!serialized_nested) {
@@ -1563,39 +1318,14 @@ JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
own_constant_elements_(broker->zone()),
own_properties_(broker->zone()) {}
-class FixedDoubleArrayData : public FixedArrayBaseData {
- public:
- FixedDoubleArrayData(
- JSHeapBroker* broker, ObjectData** storage,
- Handle<FixedDoubleArray> object,
- ObjectDataKind kind = ObjectDataKind::kNeverSerializedHeapObject)
- : FixedArrayBaseData(broker, storage, object, kind) {
- DCHECK(!broker->is_concurrent_inlining());
- }
-};
-
-class BytecodeArrayData : public FixedArrayBaseData {
- public:
- BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<BytecodeArray> object)
- : FixedArrayBaseData(broker, storage, object,
- ObjectDataKind::kNeverSerializedHeapObject) {
- // BytecodeArrayData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSArray> object,
- ObjectDataKind kind = kSerializedHeapObject)
+ Handle<JSArray> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind),
own_elements_(broker->zone()) {}
- void Serialize(JSHeapBroker* broker);
+ void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* length() const {
CHECK(serialized_);
return length_;
@@ -1616,9 +1346,8 @@ class JSArrayData : public JSObjectData {
ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
};
-void JSArrayData::Serialize(JSHeapBroker* broker) {
- CHECK(!broker->is_concurrent_inlining());
-
+void JSArrayData::Serialize(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag) {
if (serialized_) return;
serialized_ = true;
@@ -1647,56 +1376,10 @@ ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
return result;
}
-class ScopeInfoData : public HeapObjectData {
- public:
- ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<ScopeInfo> object)
- : HeapObjectData(broker, storage, object) {
- // TODO(v8:7790): Remove this class once all kNeverSerialized types are
- // NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class SharedFunctionInfoData : public HeapObjectData {
- public:
- SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<SharedFunctionInfo> object)
- : HeapObjectData(broker, storage, object) {
- // TODO(v8:7790): Remove this class once all kNeverSerialized types are
- // NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class SourceTextModuleData : public HeapObjectData {
- public:
- SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage,
- Handle<SourceTextModule> object)
- : HeapObjectData(broker, storage, object) {
- // SourceTextModuleData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class CellData : public HeapObjectData {
- public:
- CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
- : HeapObjectData(broker, storage, object) {
- // CellData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSGlobalObject> object,
- ObjectDataKind kind = kSerializedHeapObject)
+ Handle<JSGlobalObject> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind),
properties_(broker->zone()) {
if (!broker->is_concurrent_inlining()) {
@@ -1705,7 +1388,6 @@ class JSGlobalObjectData : public JSObjectData {
}
bool IsDetached() const {
- DCHECK_EQ(kind(), kSerializedHeapObject);
return is_detached_;
}
@@ -1715,7 +1397,6 @@ class JSGlobalObjectData : public JSObjectData {
private:
// Only valid if not concurrent inlining.
-
bool is_detached_ = false;
// Properties that either
@@ -1728,8 +1409,7 @@ class JSGlobalObjectData : public JSObjectData {
class JSGlobalProxyData : public JSObjectData {
public:
JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSGlobalProxy> object,
- ObjectDataKind kind = kSerializedHeapObject)
+ Handle<JSGlobalProxy> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
};
@@ -1750,8 +1430,6 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
ObjectData* name,
SerializationPolicy policy) {
- DCHECK_EQ(kind(), kSerializedHeapObject);
-
CHECK_NOT_NULL(name);
for (auto const& p : properties_) {
if (p.first == name) return p.second;
@@ -1768,52 +1446,14 @@ ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
if (cell.has_value()) {
result = cell->data();
if (!result->should_access_heap()) {
- result->AsPropertyCell()->Serialize(broker);
+ result->AsPropertyCell()->Cache(broker);
}
}
properties_.push_back({name, result});
return result;
}
-class TemplateObjectDescriptionData : public HeapObjectData {
- public:
- TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<TemplateObjectDescription> object)
- : HeapObjectData(broker, storage, object) {
- // TemplateObjectDescriptionData is NeverEverSerialize.
- // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types
- // are NeverEverSerialize.
- UNREACHABLE();
- }
-};
-
-class CodeData : public HeapObjectData {
- public:
- CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
- : HeapObjectData(broker, storage, object),
- inlined_bytecode_size_(object->inlined_bytecode_size() > 0 &&
- !object->marked_for_deoptimization()
- ? object->inlined_bytecode_size()
- : 0) {
- DCHECK(!broker->is_concurrent_inlining());
- }
-
- unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
-
- private:
- unsigned const inlined_bytecode_size_;
-};
-
-class CodeDataContainerData : public HeapObjectData {
- public:
- CodeDataContainerData(JSHeapBroker* broker, ObjectData** storage,
- Handle<CodeDataContainer> object)
- : HeapObjectData(broker, storage, object) {
- DCHECK(!broker->is_concurrent_inlining());
- }
-};
-
-#define DEFINE_IS(Name, ...) \
+#define DEFINE_IS(Name) \
bool ObjectData::Is##Name() const { \
if (should_access_heap()) { \
return object()->Is##Name(); \
@@ -1826,14 +1466,13 @@ class CodeDataContainerData : public HeapObjectData {
HEAP_BROKER_OBJECT_LIST(DEFINE_IS)
#undef DEFINE_IS
-#define DEFINE_AS(Name, Kind) \
+#define DEFINE_AS(Name) \
Name##Data* ObjectData::As##Name() { \
CHECK(Is##Name()); \
- CHECK(kind_ == kSerializedHeapObject || \
- kind_ == kBackgroundSerializedHeapObject); \
+ CHECK(kind_ == kBackgroundSerializedHeapObject); \
return static_cast<Name##Data*>(this); \
}
-HEAP_BROKER_OBJECT_LIST(DEFINE_AS)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
ObjectData* JSObjectData::GetInobjectField(int property_index) const {
@@ -1850,7 +1489,8 @@ ObjectData* JSObjectData::elements() const {
return elements_;
}
-void JSObjectData::SerializeElements(JSHeapBroker* broker) {
+void JSObjectData::SerializeElements(JSHeapBroker* broker,
+ NotConcurrentInliningTag) {
if (serialized_elements_) return;
serialized_elements_ = true;
@@ -1863,7 +1503,8 @@ void JSObjectData::SerializeElements(JSHeapBroker* broker) {
DCHECK(elements_->IsFixedArrayBase());
}
-void MapData::SerializeConstructor(JSHeapBroker* broker) {
+void MapData::SerializeConstructor(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag) {
if (serialized_constructor_) return;
serialized_constructor_ = true;
@@ -1874,7 +1515,8 @@ void MapData::SerializeConstructor(JSHeapBroker* broker) {
constructor_ = broker->GetOrCreateData(map->GetConstructor());
}
-void MapData::SerializeBackPointer(JSHeapBroker* broker) {
+void MapData::SerializeBackPointer(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag) {
if (serialized_backpointer_) return;
serialized_backpointer_ = true;
@@ -1885,7 +1527,8 @@ void MapData::SerializeBackPointer(JSHeapBroker* broker) {
backpointer_ = broker->GetOrCreateData(map->GetBackPointer());
}
-bool MapData::TrySerializePrototype(JSHeapBroker* broker) {
+bool MapData::TrySerializePrototype(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag) {
if (serialized_prototype_) return true;
TraceScope tracer(broker, this, "MapData::SerializePrototype");
@@ -1897,56 +1540,8 @@ bool MapData::TrySerializePrototype(JSHeapBroker* broker) {
return true;
}
-void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
- if (serialized_own_descriptors_) return;
- serialized_own_descriptors_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptors");
- Handle<Map> map = Handle<Map>::cast(object());
-
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- SerializeOwnDescriptor(broker, i);
- }
-}
-
-bool MapData::TrySerializeOwnDescriptor(JSHeapBroker* broker,
- InternalIndex descriptor_index) {
- TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
- Handle<Map> map = Handle<Map>::cast(object());
- Isolate* isolate = broker->isolate();
-
- if (instance_descriptors_ == nullptr) {
- instance_descriptors_ =
- broker->TryGetOrCreateData(map->instance_descriptors(kAcquireLoad));
- if (instance_descriptors_ == nullptr) return false;
- }
-
- if (instance_descriptors()->should_access_heap()) {
- // When accessing the fields concurrently, we still have to recurse on the
- // owner map if it is different than the current map. This is because
- // {instance_descriptors_} gets set on SerializeOwnDescriptor and otherwise
- // we risk the field owner having a null {instance_descriptors_}.
- Handle<DescriptorArray> descriptors = broker->CanonicalPersistentHandle(
- map->instance_descriptors(kAcquireLoad));
- if (descriptors->GetDetails(descriptor_index).location() == kField) {
- Handle<Map> owner = broker->CanonicalPersistentHandle(
- map->FindFieldOwner(isolate, descriptor_index));
- if (!owner.equals(map)) {
- ObjectData* data = broker->TryGetOrCreateData(owner);
- if (data == nullptr) return false;
- data->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
- }
- }
- } else {
- DescriptorArrayData* descriptors =
- instance_descriptors()->AsDescriptorArray();
- descriptors->SerializeDescriptor(broker, map, descriptor_index);
- }
-
- return true;
-}
-
-void MapData::SerializeRootMap(JSHeapBroker* broker) {
+void MapData::SerializeRootMap(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag) {
if (serialized_root_map_) return;
serialized_root_map_ = true;
@@ -1959,6 +1554,7 @@ void MapData::SerializeRootMap(JSHeapBroker* broker) {
ObjectData* MapData::FindRootMap() const { return root_map_; }
bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag,
int max_depth) {
if (serialized_as_boilerplate_) return true;
// If serialization succeeds, we set this to true at the end.
@@ -1996,10 +1592,6 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
return false;
}
- if (!map()->should_access_heap()) {
- map()->AsMap()->SerializeOwnDescriptors(broker);
- }
-
// Check the in-object properties.
inobject_fields_.clear();
Handle<DescriptorArray> descriptors(
@@ -2019,7 +1611,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
inobject_fields_.push_back(value_data);
if (value_data->IsJSObject() && !value_data->should_access_heap()) {
if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive(
- broker, max_depth - 1))
+ broker, tag, max_depth - 1))
return false;
}
}
@@ -2039,7 +1631,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
ObjectData* value_data = broker->GetOrCreateData(value);
if (!value_data->should_access_heap()) {
if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive(
- broker, max_depth - 1)) {
+ broker, tag, max_depth - 1)) {
return false;
}
}
@@ -2052,30 +1644,15 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
}
if (IsJSArray() && !broker->is_concurrent_inlining()) {
- AsJSArray()->Serialize(broker);
+ AsJSArray()->Serialize(broker, NotConcurrentInliningTag{broker});
}
serialized_as_boilerplate_ = true;
return true;
}
-#ifdef DEBUG
-bool ObjectRef::IsNeverSerializedHeapObject() const {
- return data_->kind() == ObjectDataKind::kNeverSerializedHeapObject;
-}
-#endif // DEBUG
-
bool ObjectRef::equals(const ObjectRef& other) const {
-#ifdef DEBUG
- if (broker()->mode() == JSHeapBroker::kSerialized &&
- data_->used_status == ObjectData::Usage::kUnused) {
- data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
- }
-#endif // DEBUG
- // TODO(jgruber): Consider going back to reference-equality on data_ once
- // ObjectData objects are guaranteed to be canonicalized (see also:
- // ClearReconstructibleData).
- return data_->object().is_identical_to(other.data_->object());
+ return data_ == other.data_;
}
Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
@@ -2088,97 +1665,18 @@ ContextRef ContextRef::previous(size_t* depth) const {
current = Context::cast(current.unchecked_previous());
(*depth)--;
}
- return MakeRef(broker(), current);
+ // The `previous` field is immutable after initialization and the
+ // context itself is read through an atomic load.
+ return MakeRefAssumeMemoryFence(broker(), current);
}
base::Optional<ObjectRef> ContextRef::get(int index) const {
CHECK_LE(0, index);
- if (index >= object()->length()) return {};
+ // Length is immutable after initialization.
+ if (index >= object()->length(kRelaxedLoad)) return {};
return TryMakeRef(broker(), object()->get(index));
}
-#ifdef DEBUG
-void JSHeapBroker::PrintRefsAnalysis() const {
- // Usage counts
- size_t used_total = 0, unused_total = 0, identity_used_total = 0;
- for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
- ref = refs_->Next(ref)) {
- switch (ref->value->used_status) {
- case ObjectData::Usage::kUnused:
- ++unused_total;
- break;
- case ObjectData::Usage::kOnlyIdentityUsed:
- ++identity_used_total;
- break;
- case ObjectData::Usage::kDataUsed:
- ++used_total;
- break;
- }
- }
-
- // Ref types analysis
- TRACE_BROKER_MEMORY(
- this, "Refs: " << refs_->occupancy() << "; data used: " << used_total
- << "; only identity used: " << identity_used_total
- << "; unused: " << unused_total);
- size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0;
- size_t used[LAST_TYPE + 1] = {0};
- size_t unused[LAST_TYPE + 1] = {0};
- size_t identity_used[LAST_TYPE + 1] = {0};
- for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
- ref = refs_->Next(ref)) {
- if (ref->value->is_smi()) {
- switch (ref->value->used_status) {
- case ObjectData::Usage::kUnused:
- ++unused_smis;
- break;
- case ObjectData::Usage::kOnlyIdentityUsed:
- ++identity_used_smis;
- break;
- case ObjectData::Usage::kDataUsed:
- ++used_smis;
- break;
- }
- } else {
- InstanceType instance_type;
- if (ref->value->should_access_heap()) {
- instance_type = Handle<HeapObject>::cast(ref->value->object())
- ->map()
- .instance_type();
- } else {
- instance_type = ref->value->AsHeapObject()->GetMapInstanceType();
- }
- CHECK_LE(FIRST_TYPE, instance_type);
- CHECK_LE(instance_type, LAST_TYPE);
- switch (ref->value->used_status) {
- case ObjectData::Usage::kUnused:
- ++unused[instance_type];
- break;
- case ObjectData::Usage::kOnlyIdentityUsed:
- ++identity_used[instance_type];
- break;
- case ObjectData::Usage::kDataUsed:
- ++used[instance_type];
- break;
- }
- }
- }
-
- TRACE_BROKER_MEMORY(
- this, "Smis: " << used_smis + identity_used_smis + unused_smis
- << "; data used: " << used_smis << "; only identity used: "
- << identity_used_smis << "; unused: " << unused_smis);
- for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) {
- size_t total = used[i] + identity_used[i] + unused[i];
- if (total == 0) continue;
- TRACE_BROKER_MEMORY(
- this, InstanceType(i) << ": " << total << "; data used: " << used[i]
- << "; only identity used: " << identity_used[i]
- << "; unused: " << unused[i]);
- }
-}
-#endif // DEBUG
-
void JSHeapBroker::InitializeAndStartSerializing() {
TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing");
@@ -2194,175 +1692,67 @@ void JSHeapBroker::InitializeAndStartSerializing() {
CollectArrayAndObjectPrototypes();
SetTargetNativeContextRef(target_native_context().object());
- target_native_context().Serialize();
-
- Factory* const f = isolate()->factory();
if (!is_concurrent_inlining()) {
+ target_native_context().Serialize(NotConcurrentInliningTag{this});
+
+ Factory* const f = isolate()->factory();
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->array_constructor_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->array_iterator_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->array_species_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->no_elements_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->promise_hook_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->promise_species_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->promise_then_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
data = GetOrCreateData(f->string_length_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ if (!data->should_access_heap()) {
+ data->AsPropertyCell()->Cache(this);
+ }
+ GetOrCreateData(f->many_closures_cell());
+ GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, true));
+ TRACE(this, "Finished serializing standard objects");
}
- GetOrCreateData(f->many_closures_cell());
- GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, true));
-
- TRACE(this, "Finished serializing standard objects");
}
namespace {
-template <RefSerializationKind Kind, class DataT, class ObjectT>
-struct CreateDataFunctor {
- bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object,
- RefsMap::Entry** entry_out, ObjectData** object_data_out) {
- USE(broker, refs, object, entry_out, object_data_out);
- UNREACHABLE();
- }
-};
-
-template <class DataT, class ObjectT>
-struct CreateDataFunctor<RefSerializationKind::kSerialized, DataT, ObjectT> {
- bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object,
- RefsMap::Entry** entry_out, ObjectData** object_data_out) {
- if (broker->mode() == JSHeapBroker::kSerializing) {
- RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
- *object_data_out = broker->zone()->New<DataT>(
- broker, &entry->value, Handle<ObjectT>::cast(object));
- *entry_out = entry;
- return true;
- }
- return false;
- }
-};
-
-template <class DataT, class ObjectT>
-struct CreateDataFunctor<RefSerializationKind::kBackgroundSerialized, DataT,
- ObjectT> {
- bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object,
- RefsMap::Entry** entry_out, ObjectData** object_data_out) {
- if (broker->is_concurrent_inlining()) {
- RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
- *object_data_out = broker->zone()->New<DataT>(
- broker, &entry->value, Handle<ObjectT>::cast(object),
- kBackgroundSerializedHeapObject);
- *entry_out = entry;
- return true;
- } else if (broker->mode() == JSHeapBroker::kSerializing) {
- RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
- *object_data_out = broker->zone()->New<DataT>(
- broker, &entry->value, Handle<ObjectT>::cast(object),
- ObjectDataKind::kSerializedHeapObject);
- *entry_out = entry;
- return true;
- }
- return false;
+constexpr ObjectDataKind ObjectDataKindFor(RefSerializationKind kind) {
+ switch (kind) {
+ case RefSerializationKind::kBackgroundSerialized:
+ return kBackgroundSerializedHeapObject;
+ case RefSerializationKind::kNeverSerialized:
+ return kNeverSerializedHeapObject;
}
-};
-
-template <class T>
-bool NeverEverSerialize() {
- return false;
}
-// This list is to help with the transition of kNeverSerialize types (which are
-// currently still serialized if concurrent inlining is disabled) to actually
-// be never serialized. It should be removed once all types have been migrated
-// here.
-#define NEVER_EVER_SERIALIZE(Type) \
- template <> \
- bool NeverEverSerialize<Type>() { \
- return true; \
- }
-
-NEVER_EVER_SERIALIZE(AccessorInfo)
-NEVER_EVER_SERIALIZE(ArrayBoilerplateDescription)
-NEVER_EVER_SERIALIZE(BytecodeArray)
-NEVER_EVER_SERIALIZE(Cell)
-NEVER_EVER_SERIALIZE(CallHandlerInfo)
-NEVER_EVER_SERIALIZE(Context)
-NEVER_EVER_SERIALIZE(FunctionTemplateInfo)
-NEVER_EVER_SERIALIZE(InternalizedString)
-NEVER_EVER_SERIALIZE(Name)
-NEVER_EVER_SERIALIZE(NativeContext)
-NEVER_EVER_SERIALIZE(ObjectBoilerplateDescription)
-NEVER_EVER_SERIALIZE(RegExpBoilerplateDescription)
-NEVER_EVER_SERIALIZE(SharedFunctionInfo)
-NEVER_EVER_SERIALIZE(ScopeInfo)
-NEVER_EVER_SERIALIZE(SourceTextModule)
-NEVER_EVER_SERIALIZE(String)
-NEVER_EVER_SERIALIZE(Symbol)
-NEVER_EVER_SERIALIZE(TemplateObjectDescription)
-
-#undef NEVER_EVER_SERIALIZE
-
-template <class DataT, class ObjectT>
-struct CreateDataFunctor<RefSerializationKind::kNeverSerialized, DataT,
- ObjectT> {
- bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object,
- RefsMap::Entry** entry_out, ObjectData** object_data_out) {
- // TODO(solanes, v8:10866): Remove the `(mode() == kSerializing)` case
- // below when all classes skip serialization. Same for similar spots if we
- // end up keeping them.
- if (broker->is_concurrent_inlining() || NeverEverSerialize<ObjectT>()) {
- RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
- *object_data_out = broker->zone()->New<ObjectData>(
- broker, &entry->value, object, kNeverSerializedHeapObject);
- *entry_out = entry;
- return true;
- } else if (broker->mode() == JSHeapBroker::kSerializing) {
- RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
- *object_data_out = broker->zone()->New<DataT>(
- broker, &entry->value, Handle<ObjectT>::cast(object));
- *entry_out = entry;
- return true;
- }
- return false;
- }
-};
-
} // namespace
-void JSHeapBroker::ClearReconstructibleData() {
- RefsMap::Entry* p = refs_->Start();
- while (p != nullptr) {
- Address key = p->key;
- ObjectData* value = p->value;
- p = refs_->Next(p);
- const auto kind = RefSerializationKindOf(value);
- if (kind == RefSerializationKind::kNeverSerialized ||
- kind == RefSerializationKind::kBackgroundSerialized) {
- if (value->IsMap() &&
- value->kind() == ObjectDataKind::kBackgroundSerializedHeapObject &&
- value->AsMap()->has_extra_serialized_data()) {
- continue;
- }
- if (value->IsJSObject() &&
- value->kind() == ObjectDataKind::kBackgroundSerializedHeapObject &&
- value->AsJSObject()->has_extra_serialized_data()) {
- continue;
- }
- // Can be reconstructed from the background thread.
- CHECK_NOT_NULL(refs_->Remove(key));
- }
- }
-}
-
ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
GetOrCreateDataFlags flags) {
RefsMap::Entry* entry = refs_->Lookup(object.address());
@@ -2405,14 +1795,13 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
kUnserializedReadOnlyHeapObject);
}
-#define CREATE_DATA(Name, Kind) \
- if (object->Is##Name()) { \
- CreateDataFunctor<Kind, Name##Data, Name> f; \
- if (!f(this, refs_, object, &entry, &object_data)) { \
- CHECK_WITH_MSG(!crash_on_error, #Name "Ref construction failed"); \
- return nullptr; \
- } \
- /* NOLINTNEXTLINE(readability/braces) */ \
+#define CREATE_DATA(Name) \
+ if (object->Is##Name()) { \
+ RefsMap::Entry* entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<ref_traits<Name>::data_type>( \
+ this, &entry->value, Handle<Name>::cast(object), \
+ ObjectDataKindFor(ref_traits<Name>::ref_serialization_kind)); \
+ /* NOLINTNEXTLINE(readability/braces) */ \
} else
HEAP_BROKER_OBJECT_LIST(CREATE_DATA)
#undef CREATE_DATA
@@ -2425,7 +1814,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
return object_data;
}
-#define DEFINE_IS_AND_AS(Name, ...) \
+#define DEFINE_IS_AND_AS(Name) \
bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
Name##Ref ObjectRef::As##Name() const { \
DCHECK(Is##Name()); \
@@ -2450,26 +1839,39 @@ INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
- // TODO(jgruber): Consider supporting transitions other than for JSArray
- // initial maps (e.g. by walking transitions concurrently and finding an
- // existing map that fits).
-
const ElementsKind current_kind = elements_kind();
if (kind == current_kind) return *this;
+ base::Optional<Map> maybe_result = Map::TryAsElementsKind(
+ broker()->isolate(), object(), kind, ConcurrencyMode::kConcurrent);
+
+#ifdef DEBUG
+ // If starting from an initial JSArray map, TryAsElementsKind must succeed
+ // and return the expected transitioned JSArray map.
NativeContextRef native_context = broker()->target_native_context();
- if (!equals(native_context.GetInitialJSArrayMap(current_kind))) return {};
+ if (equals(native_context.GetInitialJSArrayMap(current_kind))) {
+ CHECK_EQ(Map::TryAsElementsKind(broker()->isolate(), object(), kind,
+ ConcurrencyMode::kConcurrent)
+ .value(),
+ *native_context.GetInitialJSArrayMap(kind).object());
+ }
+#endif // DEBUG
- return native_context.GetInitialJSArrayMap(kind);
+ if (!maybe_result.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "MapRef::AsElementsKind " << *this);
+ return {};
+ }
+ return MakeRefAssumeMemoryFence(broker(), maybe_result.value());
}
-void MapRef::SerializeForElementStore() {
+void MapRef::SerializeForElementStore(NotConcurrentInliningTag tag) {
if (data()->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeForElementStore(broker());
+ data()->AsMap()->SerializeForElementStore(broker(), tag);
}
-void MapData::SerializeForElementStore(JSHeapBroker* broker) {
+void MapData::SerializeForElementStore(JSHeapBroker* broker,
+ NotConcurrentInliningTag tag) {
if (serialized_for_element_store_) return;
serialized_for_element_store_ = true;
@@ -2479,7 +1881,7 @@ void MapData::SerializeForElementStore(JSHeapBroker* broker) {
// method should go away anyway once the compiler is fully concurrent.
MapRef map(broker, this);
do {
- map.SerializePrototype();
+ map.SerializePrototype(tag);
map = map.prototype().value().map();
} while (map.IsJSObjectMap() && map.is_stable() &&
IsFastElementsKind(map.elements_kind()));
@@ -2514,10 +1916,29 @@ bool MapRef::supports_fast_array_resize() const {
return data()->AsMap()->supports_fast_array_resize();
}
-int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
+namespace {
+
+void RecordConsistentJSFunctionViewDependencyIfNeeded(
+ const JSHeapBroker* broker, const JSFunctionRef& ref, JSFunctionData* data,
+ JSFunctionData::UsedField used_field) {
+ if (!broker->is_concurrent_inlining()) return;
+ if (!data->has_any_used_field()) {
+ // Deduplicate dependencies.
+ broker->dependencies()->DependOnConsistentJSFunctionView(ref);
+ }
+ data->set_used_field(used_field);
+}
+
+} // namespace
+
+int JSFunctionRef::InitialMapInstanceSizeWithMinSlack(
+ CompilationDependencies* dependencies) const {
if (data_->should_access_heap()) {
return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate());
}
+ RecordConsistentJSFunctionViewDependencyIfNeeded(
+ broker(), *this, data()->AsJSFunction(),
+ JSFunctionData::kInitialMapInstanceSizeWithMinSlack);
return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
}
@@ -2549,19 +1970,12 @@ OddballType MapRef::oddball_type() const {
}
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
- if (data_->should_access_heap()) {
- // These should all be available because we request the cell for each
- // CreateClosure bytecode.
- return MakeRef(broker(), object()->closure_feedback_cell(index));
- }
-
- return FeedbackCellRef(
- broker(),
- data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
+ return MakeRefAssumeMemoryFence(broker(),
+ object()->closure_feedback_cell(index));
}
base::Optional<ObjectRef> JSObjectRef::raw_properties_or_hash() const {
- if (data_->should_access_heap()) {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return TryMakeRef(broker(), object()->raw_properties_or_hash());
}
return ObjectRef(broker(), data()->AsJSObject()->raw_properties_or_hash());
@@ -2571,53 +1985,57 @@ base::Optional<ObjectRef> JSObjectRef::RawInobjectPropertyAt(
FieldIndex index) const {
CHECK(index.is_inobject());
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- DisallowGarbageCollection no_gc;
- Map current_map = object()->map(kAcquireLoad);
-
- // If the map changed in some prior GC epoch, our {index} could be
- // outside the valid bounds of the cached map.
- if (*map().object() != current_map) {
- TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this);
- return {};
- }
+ Handle<Object> value;
+ {
+ DisallowGarbageCollection no_gc;
+ Map current_map = object()->map(kAcquireLoad);
+
+ // If the map changed in some prior GC epoch, our {index} could be
+ // outside the valid bounds of the cached map.
+ if (*map().object() != current_map) {
+ TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this);
+ return {};
+ }
- base::Optional<Object> value =
- object()->RawInobjectPropertyAt(current_map, index);
- if (!value.has_value()) {
- TRACE_BROKER_MISSING(broker(),
- "Unable to safely read property in " << *this);
- return {};
+ base::Optional<Object> maybe_value =
+ object()->RawInobjectPropertyAt(current_map, index);
+ if (!maybe_value.has_value()) {
+ TRACE_BROKER_MISSING(broker(),
+ "Unable to safely read property in " << *this);
+ return {};
+ }
+ value = broker()->CanonicalPersistentHandle(maybe_value.value());
}
- return TryMakeRef(broker(), value.value());
+ return TryMakeRef(broker(), value);
}
JSObjectData* object_data = data()->AsJSObject();
return ObjectRef(broker(),
object_data->GetInobjectField(index.property_index()));
}
-void JSObjectRef::SerializeAsBoilerplateRecursive() {
+void JSObjectRef::SerializeAsBoilerplateRecursive(
+ NotConcurrentInliningTag tag) {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSObject()->SerializeAsBoilerplateRecursive(broker());
+ data()->AsJSObject()->SerializeAsBoilerplateRecursive(broker(), tag);
}
-void AllocationSiteRef::SerializeRecursive() {
- if (!data_->should_access_heap()) {
- data()->AsAllocationSite()->Serialize(broker());
- }
-
+void AllocationSiteRef::SerializeRecursive(NotConcurrentInliningTag tag) {
+ DCHECK(data_->should_access_heap());
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ DCHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
if (boilerplate().has_value()) {
- boilerplate()->SerializeAsBoilerplateRecursive();
+ boilerplate()->SerializeAsBoilerplateRecursive(tag);
}
if (nested_site().IsAllocationSite()) {
- nested_site().AsAllocationSite().SerializeRecursive();
+ nested_site().AsAllocationSite().SerializeRecursive(tag);
}
}
-void JSObjectRef::SerializeElements() {
+void JSObjectRef::SerializeElements(NotConcurrentInliningTag tag) {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSObject()->SerializeElements(broker());
+ data()->AsJSObject()->SerializeElements(broker(), tag);
}
bool JSObjectRef::IsElementsTenured(const FixedArrayBaseRef& elements) {
@@ -2629,14 +2047,7 @@ bool JSObjectRef::IsElementsTenured(const FixedArrayBaseRef& elements) {
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index);
- DCHECK(result.is_inobject());
- return result;
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- FieldIndex result = descriptors->GetFieldIndexFor(descriptor_index);
+ FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index);
DCHECK(result.is_inobject());
return result;
}
@@ -2671,17 +2082,12 @@ bool MapRef::IsPrimitiveMap() const {
MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- CHECK(!is_deprecated());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // TODO(solanes, v8:7790): Consider caching the result of the field owner on
- // the descriptor array. It would be useful for same map as well as any
- // other map sharing that descriptor array.
- return MapRef(broker(), broker()->GetOrCreateData(object()->FindFieldOwner(
- broker()->isolate(), descriptor_index)));
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return MapRef(broker(), descriptors->FindFieldOwner(descriptor_index));
+ // TODO(solanes, v8:7790): Consider caching the result of the field owner on
+ // the descriptor array. It would be useful for same map as well as any
+ // other map sharing that descriptor array.
+ return MakeRefAssumeMemoryFence(
+ broker(),
+ object()->FindFieldOwner(broker()->isolate(), descriptor_index));
}
ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
@@ -2691,11 +2097,24 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
uint32_t index, SerializationPolicy policy) const {
- // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optimization for
- // concurrent inlining when we have the infrastructure to safely do so.
- if (broker()->is_concurrent_inlining()) return base::nullopt;
- CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
- return GetOwnElementFromHeap(broker(), object(), index, true);
+ if (broker()->is_concurrent_inlining()) {
+ String maybe_char;
+ auto result = ConcurrentLookupIterator::TryGetOwnChar(
+ &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
+ index);
+
+ if (result == ConcurrentLookupIterator::kGaveUp) {
+ TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
+ << *this << " at index " << index);
+ return {};
+ }
+
+ DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
+ return TryMakeRef(broker(), maybe_char);
+ }
+
+ CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
+ return GetOwnElementFromHeap(broker(), object(), index, true);
}
bool StringRef::SupportedStringKind() const {
@@ -2749,7 +2168,18 @@ int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
ObjectRef FixedArrayRef::get(int i) const { return TryGet(i).value(); }
base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const {
- return TryMakeRef(broker(), object()->get(i, kRelaxedLoad));
+ Handle<Object> value;
+ {
+ DisallowGarbageCollection no_gc;
+ CHECK_GE(i, 0);
+ value = broker()->CanonicalPersistentHandle(object()->get(i, kAcquireLoad));
+ if (i >= object()->length(kAcquireLoad)) {
+ // Right-trimming happened.
+ CHECK_LT(i, length());
+ return {};
+ }
+ }
+ return TryMakeRef(broker(), value);
}
Float64 FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(int i) const {
@@ -2805,7 +2235,7 @@ int BytecodeArrayRef::handler_table_size() const {
}
// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
-// kSerialized only for methods that we identified to be safe.
+// kBackgroundSerialized only for methods that we identified to be safe.
#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return MakeRef(broker(), result::cast(object()->name())); \
@@ -2816,9 +2246,9 @@ int BytecodeArrayRef::handler_table_size() const {
}
// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
-// broker()->is_concurrent_inlining() is true (even for kSerialized). This is
-// because we identified the method to be safe to use direct heap access, but
-// the holder##Data class still needs to be serialized.
+// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized).
+// This is because we identified the method to be safe to use direct heap
+// access, but the holder##Data class still needs to be serialized.
#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
result##Ref holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
@@ -2835,19 +2265,17 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-#define HEAP_ACCESSOR(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- return MakeRef(broker(), result::cast(object()->name())); \
- }
-
#define HEAP_ACCESSOR_C(holder, result, name) \
result holder##Ref::name() const { return object()->name(); }
-BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
-BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
-BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
-BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
-BIMODAL_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
+ObjectRef AllocationSiteRef::nested_site() const {
+ return MakeRefAssumeMemoryFence(broker(), object()->nested_site());
+}
+
+HEAP_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
+HEAP_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
+HEAP_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
+HEAP_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
BIMODAL_ACCESSOR_C(BigInt, uint64_t, AsUint64)
@@ -2862,18 +2290,12 @@ BytecodeArrayRef::incoming_new_target_or_generator_register() const {
return object()->incoming_new_target_or_generator_register();
}
-BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
-
BIMODAL_ACCESSOR(HeapObject, Map, map)
-BIMODAL_ACCESSOR_C(HeapNumber, double, value)
+HEAP_ACCESSOR_C(HeapNumber, double, value)
uint64_t HeapNumberRef::value_as_bits() const {
- if (data_->should_access_heap()) {
- return object()->value_as_bits(kRelaxedLoad);
- }
-
- return ObjectRef::data()->AsHeapNumber()->value_as_bits();
+ return object()->value_as_bits(kRelaxedLoad);
}
base::Optional<JSReceiverRef> JSBoundFunctionRef::bound_target_function()
@@ -2906,18 +2328,6 @@ FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
// Immutable after initialization.
BIMODAL_ACCESSOR_WITH_FLAG_C(JSDataView, size_t, byte_length)
-BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
-BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
-BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
-BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
-BIMODAL_ACCESSOR(JSFunction, Context, context)
-BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
-BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
-BIMODAL_ACCESSOR(JSFunction, Object, prototype)
-BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
-BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
-BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
-
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
Map::Bits2::ElementsKindBits)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
@@ -3002,38 +2412,47 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
DCHECK(has_call_code());
- DisallowGarbageCollection no_gc;
- HeapObject signature = object()->signature();
- if (signature.IsUndefined()) {
- return HolderLookupResult(CallOptimization::kHolderIsReceiver);
- }
- auto expected_receiver_type = FunctionTemplateInfo::cast(signature);
- if (expected_receiver_type.IsTemplateFor(*receiver_map.object())) {
- return HolderLookupResult(CallOptimization::kHolderIsReceiver);
+ Handle<FunctionTemplateInfo> expected_receiver_type;
+ {
+ DisallowGarbageCollection no_gc;
+ HeapObject signature = object()->signature();
+ if (signature.IsUndefined()) {
+ return HolderLookupResult(CallOptimization::kHolderIsReceiver);
+ }
+ expected_receiver_type = broker()->CanonicalPersistentHandle(
+ FunctionTemplateInfo::cast(signature));
+ if (expected_receiver_type->IsTemplateFor(*receiver_map.object())) {
+ return HolderLookupResult(CallOptimization::kHolderIsReceiver);
+ }
+
+ if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
}
- if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
if (policy == SerializationPolicy::kSerializeIfNeeded) {
- receiver_map.SerializePrototype();
+ receiver_map.SerializePrototype(NotConcurrentInliningTag{broker()});
}
base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
if (!prototype.has_value()) return not_found;
if (prototype->IsNull()) return not_found;
- JSObject raw_prototype = JSObject::cast(*prototype->object());
- if (!expected_receiver_type.IsTemplateFor(raw_prototype.map())) {
+ if (!expected_receiver_type->IsTemplateFor(prototype->object()->map())) {
return not_found;
}
return HolderLookupResult(CallOptimization::kHolderFound,
prototype->AsJSObject());
}
-HEAP_ACCESSOR(CallHandlerInfo, Object, data)
+ObjectRef CallHandlerInfoRef::data() const {
+ return MakeRefAssumeMemoryFence(broker(), object()->data());
+}
HEAP_ACCESSOR_C(ScopeInfo, int, ContextLength)
HEAP_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot)
HEAP_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo)
-HEAP_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
+
+ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
+ return MakeRefAssumeMemoryFence(broker(), object()->OuterScopeInfo());
+}
HEAP_ACCESSOR_C(SharedFunctionInfo, Builtin, builtin_id)
@@ -3062,14 +2481,11 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
}
base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
- if (data_->should_access_heap()) {
- // Note that we use the synchronized accessor.
- Object value = object()->value(kAcquireLoad);
- if (!value.IsFeedbackVector()) return base::nullopt;
- return TryMakeRef(broker(), FeedbackVector::cast(value));
- }
- ObjectData* vector = ObjectRef::data()->AsFeedbackCell()->value();
- return FeedbackVectorRef(broker(), vector->AsFeedbackVector());
+ DisallowGarbageCollection no_gc;
+ DCHECK(data_->should_access_heap());
+ Object value = object()->value(kAcquireLoad);
+ if (!value.IsFeedbackVector()) return base::nullopt;
+ return MakeRefAssumeMemoryFence(broker(), FeedbackVector::cast(value));
}
base::Optional<ObjectRef> MapRef::GetStrongValue(
@@ -3079,13 +2495,9 @@ base::Optional<ObjectRef> MapRef::GetStrongValue(
}
DescriptorArrayRef MapRef::instance_descriptors() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return MakeRefAssumeMemoryFence(
- broker(),
- object()->instance_descriptors(broker()->isolate(), kAcquireLoad));
- }
-
- return DescriptorArrayRef(broker(), data()->AsMap()->instance_descriptors());
+ return MakeRefAssumeMemoryFence(
+ broker(),
+ object()->instance_descriptors(broker()->isolate(), kAcquireLoad));
}
base::Optional<HeapObjectRef> MapRef::prototype() const {
@@ -3101,10 +2513,10 @@ base::Optional<HeapObjectRef> MapRef::prototype() const {
return HeapObjectRef(broker(), prototype_data);
}
-void MapRef::SerializeRootMap() {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) return;
+void MapRef::SerializeRootMap(NotConcurrentInliningTag tag) {
+ if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeRootMap(broker());
+ data()->AsMap()->SerializeRootMap(broker(), tag);
}
// TODO(solanes, v8:7790): Remove base::Optional from the return type when
@@ -3230,7 +2642,7 @@ ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const {
bool StringRef::IsSeqString() const { return object()->IsSeqString(); }
-void NativeContextRef::Serialize() {
+void NativeContextRef::Serialize(NotConcurrentInliningTag tag) {
// TODO(jgruber): Disable visitation if should_access_heap() once all
// NativeContext element refs can be created on background threads. Until
// then, we *must* iterate them and create refs at serialization-time (even
@@ -3241,10 +2653,7 @@ void NativeContextRef::Serialize() {
ObjectData* member_data = broker()->GetOrCreateData(object()->name()); \
if (member_data->IsMap() && !InstanceTypeChecker::IsContext( \
member_data->AsMap()->instance_type())) { \
- member_data->AsMap()->SerializeConstructor(broker()); \
- } \
- if (member_data->IsJSFunction()) { \
- member_data->AsJSFunction()->Serialize(broker()); \
+ member_data->AsMap()->SerializeConstructor(broker(), tag); \
} \
}
BROKER_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
@@ -3254,7 +2663,7 @@ void NativeContextRef::Serialize() {
i <= Context::LAST_FUNCTION_MAP_INDEX; i++) {
MapData* member_data = broker()->GetOrCreateData(object()->get(i))->AsMap();
if (!InstanceTypeChecker::IsContext(member_data->instance_type())) {
- member_data->SerializeConstructor(broker());
+ member_data->SerializeConstructor(broker(), tag);
}
}
}
@@ -3599,12 +3008,8 @@ HeapObjectType HeapObjectRef::GetHeapObjectType() const {
base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
if (!PointsToLiteral()) return {};
- if (data_->should_access_heap()) {
- return TryMakeRef(broker(), object()->boilerplate(kAcquireLoad));
- }
- ObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
- if (boilerplate == nullptr) return {};
- return JSObjectRef(broker(), boilerplate);
+ DCHECK(data_->should_access_heap());
+ return TryMakeRef(broker(), object()->boilerplate(kAcquireLoad));
}
base::Optional<FixedArrayBaseRef> JSObjectRef::elements(
@@ -3627,81 +3032,45 @@ int FixedArrayBaseRef::length() const {
PropertyDetails DescriptorArrayRef::GetPropertyDetails(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- return object()->GetDetails(descriptor_index);
- }
- return data()->AsDescriptorArray()->GetPropertyDetails(descriptor_index);
+ return object()->GetDetails(descriptor_index);
}
NameRef DescriptorArrayRef::GetPropertyKey(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- NameRef result = MakeRef(broker(), object()->GetKey(descriptor_index));
- CHECK(result.IsUniqueName());
- return result;
- }
- return NameRef(broker(),
- data()->AsDescriptorArray()->GetPropertyKey(descriptor_index));
+ NameRef result = MakeRef(broker(), object()->GetKey(descriptor_index));
+ CHECK(result.IsUniqueName());
+ return result;
}
ObjectRef DescriptorArrayRef::GetFieldType(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- return MakeRef<Object>(broker(), object()->GetFieldType(descriptor_index));
- }
- return ObjectRef(broker(),
- data()->AsDescriptorArray()->GetFieldType(descriptor_index));
+ return MakeRef(broker(),
+ Object::cast(object()->GetFieldType(descriptor_index)));
}
base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- HeapObject heap_object;
- if (!object()
- ->GetValue(descriptor_index)
- .GetHeapObjectIfStrong(&heap_object)) {
- return {};
- }
- // Since the descriptors in the descriptor array can be changed in-place
- // via DescriptorArray::Replace, we might get a value that we haven't seen
- // before.
- return TryMakeRef(broker(), heap_object);
+ HeapObject heap_object;
+ if (!object()
+ ->GetValue(descriptor_index)
+ .GetHeapObjectIfStrong(&heap_object)) {
+ return {};
}
- ObjectData* value =
- data()->AsDescriptorArray()->GetStrongValue(descriptor_index);
- if (!value) return base::nullopt;
- return ObjectRef(broker(), value);
+ // Since the descriptors in the descriptor array can be changed in-place
+ // via DescriptorArray::Replace, we might get a value that we haven't seen
+ // before.
+ return TryMakeRef(broker(), heap_object);
}
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
- if (value()) {
- FeedbackVectorRef vector = *value();
- if (vector.serialized()) {
- return vector.shared_function_info();
- }
- }
- return base::nullopt;
-}
-
-void FeedbackVectorRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsFeedbackVector()->Serialize(broker());
-}
-
-bool FeedbackVectorRef::serialized() const {
- if (data_->should_access_heap()) return true;
- return data()->AsFeedbackVector()->serialized();
+ base::Optional<FeedbackVectorRef> feedback_vector = value();
+ if (!feedback_vector.has_value()) return {};
+ return feedback_vector->shared_function_info();
}
SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
- if (data_->should_access_heap()) {
- return MakeRef(broker(), object()->shared_function_info());
- }
-
- return SharedFunctionInfoRef(
- broker(), data()->AsFeedbackVector()->shared_function_info());
+ return MakeRef(broker(), object()->shared_function_info());
}
bool NameRef::IsUniqueName() const {
@@ -3709,7 +3078,7 @@ bool NameRef::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
-void RegExpBoilerplateDescriptionRef::Serialize() {
+void RegExpBoilerplateDescriptionRef::Serialize(NotConcurrentInliningTag) {
// TODO(jgruber,v8:7790): Remove once member types are also never serialized.
// Until then, we have to call these functions once on the main thread to
// trigger serialization.
@@ -3717,26 +3086,16 @@ void RegExpBoilerplateDescriptionRef::Serialize() {
}
Handle<Object> ObjectRef::object() const {
-#ifdef DEBUG
- if (broker()->mode() == JSHeapBroker::kSerialized &&
- data_->used_status == ObjectData::Usage::kUnused) {
- data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
- }
-#endif // DEBUG
return data_->object();
}
#ifdef DEBUG
-#define DEF_OBJECT_GETTER(T, ...) \
+#define DEF_OBJECT_GETTER(T) \
Handle<T> T##Ref::object() const { \
- if (broker()->mode() == JSHeapBroker::kSerialized && \
- data_->used_status == ObjectData::Usage::kUnused) { \
- data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \
- } \
return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
}
#else
-#define DEF_OBJECT_GETTER(T, ...) \
+#define DEF_OBJECT_GETTER(T) \
Handle<T> T##Ref::object() const { \
return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
}
@@ -3750,66 +3109,98 @@ JSHeapBroker* ObjectRef::broker() const { return broker_; }
ObjectData* ObjectRef::data() const {
switch (broker()->mode()) {
case JSHeapBroker::kDisabled:
- CHECK_NE(data_->kind(), kSerializedHeapObject);
return data_;
case JSHeapBroker::kSerializing:
CHECK_NE(data_->kind(), kUnserializedHeapObject);
return data_;
case JSHeapBroker::kSerialized:
-#ifdef DEBUG
- data_->used_status = ObjectData::Usage::kDataUsed;
-#endif // DEBUG
+ case JSHeapBroker::kRetired:
CHECK_NE(data_->kind(), kUnserializedHeapObject);
return data_;
- case JSHeapBroker::kRetired:
- UNREACHABLE();
}
}
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line) {
- TRACE_MISSING(broker, "data in function " << function << " at line " << line);
- return AdvancedReducer::NoChange();
+template <class T>
+typename TinyRef<T>::RefType TinyRef<T>::AsRef(JSHeapBroker* broker) const {
+ if (data_->kind() == kUnserializedHeapObject &&
+ broker->mode() != JSHeapBroker::kDisabled) {
+ // Gotta reconstruct to avoid returning a stale unserialized ref.
+ return MakeRefAssumeMemoryFence<T>(broker,
+ Handle<T>::cast(data_->object()));
+ }
+ return TryMakeRef<T>(broker, data_).value();
}
-void JSFunctionRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSFunction()->Serialize(broker());
+template <class T>
+Handle<T> TinyRef<T>::object() const {
+ return Handle<T>::cast(data_->object());
}
-void JSFunctionRef::SerializeCodeAndFeedback() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSFunction()->SerializeCodeAndFeedback(broker());
+#define V(Name) \
+ template class TinyRef<Name>; \
+ /* TinyRef should contain only one pointer. */ \
+ STATIC_ASSERT(sizeof(TinyRef<Name>) == kSystemPointerSize);
+HEAP_BROKER_OBJECT_LIST(V)
+#undef V
+
+Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
+ const char* function, int line) {
+ TRACE_MISSING(broker, "data in function " << function << " at line " << line);
+ return AdvancedReducer::NoChange();
}
-bool JSBoundFunctionRef::Serialize() {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
+ if (data_->should_access_heap()) {
return true;
}
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- return data()->AsJSBoundFunction()->Serialize(broker());
-}
+ return data()->AsJSBoundFunction()->Serialize(broker(), tag);
+}
+
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \
+ Result##Ref JSFunctionRef::Name(CompilationDependencies* dependencies) \
+ const { \
+ IF_ACCESS_FROM_HEAP(Result, Name); \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker(), *this, data()->AsJSFunction(), UsedField); \
+ return Result##Ref(broker(), data()->AsJSFunction()->Name()); \
+ }
+
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(Result, Name, UsedField) \
+ Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \
+ IF_ACCESS_FROM_HEAP_C(Name); \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker(), *this, data()->AsJSFunction(), UsedField); \
+ return data()->AsJSFunction()->Name(); \
+ }
+
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_feedback_vector,
+ JSFunctionData::kHasFeedbackVector)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_initial_map,
+ JSFunctionData::kHasInitialMap)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_instance_prototype,
+ JSFunctionData::kHasInstancePrototype)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(
+ bool, PrototypeRequiresRuntimeLookup,
+ JSFunctionData::kPrototypeRequiresRuntimeLookup)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Map, initial_map,
+ JSFunctionData::kInitialMap)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Object, instance_prototype,
+ JSFunctionData::kInstancePrototype)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackCell, raw_feedback_cell,
+ JSFunctionData::kFeedbackCell)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackVector, feedback_vector,
+ JSFunctionData::kFeedbackVector)
-bool JSFunctionRef::serialized() const {
- if (data_->should_access_heap()) return true;
- if (data_->AsJSFunction()->serialized()) return true;
- TRACE_BROKER_MISSING(broker(), "data for JSFunction " << this);
- return false;
-}
+BIMODAL_ACCESSOR(JSFunction, Context, context)
+BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
+BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
-bool JSFunctionRef::serialized_code_and_feedback() const {
- if (data_->should_access_heap()) return true;
- return data()->AsJSFunction()->serialized_code_and_feedback();
-}
+#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
+#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C
CodeRef JSFunctionRef::code() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
- }
-
- return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code());
+ return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
}
base::Optional<FunctionTemplateInfoRef>
@@ -3824,16 +3215,12 @@ int SharedFunctionInfoRef::context_header_size() const {
}
ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
- return MakeRef(broker(), object()->scope_info());
+ return MakeRefAssumeMemoryFence(broker(), object()->scope_info(kAcquireLoad));
}
-void JSObjectRef::SerializeObjectCreateMap() {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return;
- }
- CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
- broker()->mode() == JSHeapBroker::kSerializing);
- data()->AsJSObject()->SerializeObjectCreateMap(broker());
+void JSObjectRef::SerializeObjectCreateMap(NotConcurrentInliningTag tag) {
+ if (data_->should_access_heap()) return;
+ data()->AsJSObject()->SerializeObjectCreateMap(broker(), tag);
}
base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
@@ -3864,55 +3251,30 @@ base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
return MapRef(broker(), map_data->AsMap());
}
-bool MapRef::TrySerializeOwnDescriptor(InternalIndex descriptor_index) {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return true;
- }
- CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
- broker()->mode() == JSHeapBroker::kSerializing);
- return data()->AsMap()->TrySerializeOwnDescriptor(broker(), descriptor_index);
-}
-
-void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
- CHECK(TrySerializeOwnDescriptor(descriptor_index));
+void MapRef::SerializeBackPointer(NotConcurrentInliningTag tag) {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializeBackPointer(broker(), tag);
}
-bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+bool MapRef::TrySerializePrototype(NotConcurrentInliningTag tag) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return true;
}
- ObjectData* maybe_desc_array_data = data()->AsMap()->instance_descriptors();
- if (!maybe_desc_array_data) return false;
- if (maybe_desc_array_data->should_access_heap()) return true;
- DescriptorArrayData* desc_array_data =
- maybe_desc_array_data->AsDescriptorArray();
- return desc_array_data->serialized_descriptor(descriptor_index);
-}
-
-void MapRef::SerializeBackPointer() {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) return;
- CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
- broker()->mode() == JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeBackPointer(broker());
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ return data()->AsMap()->TrySerializePrototype(broker(), tag);
}
-bool MapRef::TrySerializePrototype() {
- if (data_->should_access_heap()) return true;
- CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
- broker()->mode() == JSHeapBroker::kSerializing);
- return data()->AsMap()->TrySerializePrototype(broker());
+void MapRef::SerializePrototype(NotConcurrentInliningTag tag) {
+ CHECK(TrySerializePrototype(tag));
}
-void MapRef::SerializePrototype() { CHECK(TrySerializePrototype()); }
-
-void JSTypedArrayRef::Serialize() {
+void JSTypedArrayRef::Serialize(NotConcurrentInliningTag tag) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Nothing to do.
} else {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker());
+ data()->AsJSTypedArray()->Serialize(broker(), tag);
}
}
@@ -3924,14 +3286,14 @@ bool JSTypedArrayRef::serialized() const {
return false;
}
-bool PropertyCellRef::Serialize() const {
+bool PropertyCellRef::Cache() const {
if (data_->should_access_heap()) return true;
CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
broker()->mode() == JSHeapBroker::kSerialized);
- return data()->AsPropertyCell()->Serialize(broker());
+ return data()->AsPropertyCell()->Cache(broker());
}
-void FunctionTemplateInfoRef::SerializeCallCode() {
+void FunctionTemplateInfoRef::SerializeCallCode(NotConcurrentInliningTag tag) {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
// CallHandlerInfo::data may still hold a serialized heap object, so we
// have to make the broker aware of it.
@@ -3973,17 +3335,13 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
}
unsigned CodeRef::GetInlinedBytecodeSize() const {
- if (data_->should_access_heap()) {
- unsigned value = object()->inlined_bytecode_size();
- if (value > 0) {
- // Don't report inlined bytecode size if the code object was already
- // deoptimized.
- value = object()->marked_for_deoptimization() ? 0 : value;
- }
- return value;
+ unsigned value = object()->inlined_bytecode_size();
+ if (value > 0) {
+ // Don't report inlined bytecode size if the code object was already
+ // deoptimized.
+ value = object()->marked_for_deoptimization() ? 0 : value;
}
-
- return ObjectRef::data()->AsCode()->inlined_bytecode_size();
+ return value;
}
#undef BIMODAL_ACCESSOR
@@ -3992,7 +3350,6 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
#undef BIMODAL_ACCESSOR_WITH_FLAG
#undef BIMODAL_ACCESSOR_WITH_FLAG_B
#undef BIMODAL_ACCESSOR_WITH_FLAG_C
-#undef HEAP_ACCESSOR
#undef HEAP_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index b72fac53f5..d580671f6d 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -57,6 +57,13 @@ inline bool IsAnyStore(AccessMode mode) {
enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
+// Clarifies in function signatures that a method may only be called when
+// concurrent inlining is disabled.
+class NotConcurrentInliningTag final {
+ public:
+ explicit NotConcurrentInliningTag(JSHeapBroker* broker);
+};
+
enum class OddballType : uint8_t {
kNone, // Not an Oddball.
kBoolean, // True or False.
@@ -68,70 +75,73 @@ enum class OddballType : uint8_t {
};
enum class RefSerializationKind {
- // Will skip serialization when --concurrent-inlining is on. Otherwise, they
- // might get serialized. (The cake is a lie.)
+ // Skips serialization.
kNeverSerialized,
// Can be serialized on demand from the background thread.
kBackgroundSerialized,
- kSerialized,
};
// This list is sorted such that subtypes appear before their supertypes.
// DO NOT VIOLATE THIS PROPERTY!
-#define HEAP_BROKER_OBJECT_LIST(V) \
- /* Subtypes of JSObject */ \
- V(JSArray, RefSerializationKind::kBackgroundSerialized) \
- V(JSBoundFunction, RefSerializationKind::kBackgroundSerialized) \
- V(JSDataView, RefSerializationKind::kBackgroundSerialized) \
- V(JSFunction, RefSerializationKind::kSerialized) \
- V(JSGlobalObject, RefSerializationKind::kBackgroundSerialized) \
- V(JSGlobalProxy, RefSerializationKind::kBackgroundSerialized) \
- V(JSTypedArray, RefSerializationKind::kBackgroundSerialized) \
- /* Subtypes of Context */ \
- V(NativeContext, RefSerializationKind::kNeverSerialized) \
- /* Subtypes of FixedArray */ \
- V(ObjectBoilerplateDescription, RefSerializationKind::kNeverSerialized) \
- V(ScriptContextTable, RefSerializationKind::kBackgroundSerialized) \
- /* Subtypes of String */ \
- V(InternalizedString, RefSerializationKind::kNeverSerialized) \
- /* Subtypes of FixedArrayBase */ \
- V(BytecodeArray, RefSerializationKind::kNeverSerialized) \
- V(FixedArray, RefSerializationKind::kBackgroundSerialized) \
- V(FixedDoubleArray, RefSerializationKind::kNeverSerialized) \
- /* Subtypes of Name */ \
- V(String, RefSerializationKind::kNeverSerialized) \
- V(Symbol, RefSerializationKind::kNeverSerialized) \
- /* Subtypes of JSReceiver */ \
- V(JSObject, RefSerializationKind::kBackgroundSerialized) \
- /* Subtypes of HeapObject */ \
- V(AccessorInfo, RefSerializationKind::kNeverSerialized) \
- V(AllocationSite, RefSerializationKind::kNeverSerialized) \
- V(ArrayBoilerplateDescription, RefSerializationKind::kNeverSerialized) \
- V(BigInt, RefSerializationKind::kBackgroundSerialized) \
- V(CallHandlerInfo, RefSerializationKind::kNeverSerialized) \
- V(Cell, RefSerializationKind::kNeverSerialized) \
- V(Code, RefSerializationKind::kNeverSerialized) \
- V(CodeDataContainer, RefSerializationKind::kNeverSerialized) \
- V(Context, RefSerializationKind::kNeverSerialized) \
- V(DescriptorArray, RefSerializationKind::kNeverSerialized) \
- V(FeedbackCell, RefSerializationKind::kNeverSerialized) \
- V(FeedbackVector, RefSerializationKind::kNeverSerialized) \
- V(FixedArrayBase, RefSerializationKind::kBackgroundSerialized) \
- V(FunctionTemplateInfo, RefSerializationKind::kNeverSerialized) \
- V(HeapNumber, RefSerializationKind::kNeverSerialized) \
- V(JSReceiver, RefSerializationKind::kBackgroundSerialized) \
- V(Map, RefSerializationKind::kBackgroundSerialized) \
- V(Name, RefSerializationKind::kNeverSerialized) \
- V(PropertyCell, RefSerializationKind::kBackgroundSerialized) \
- V(RegExpBoilerplateDescription, RefSerializationKind::kNeverSerialized) \
- V(ScopeInfo, RefSerializationKind::kNeverSerialized) \
- V(SharedFunctionInfo, RefSerializationKind::kNeverSerialized) \
- V(SourceTextModule, RefSerializationKind::kNeverSerialized) \
- V(TemplateObjectDescription, RefSerializationKind::kNeverSerialized) \
- /* Subtypes of Object */ \
- V(HeapObject, RefSerializationKind::kBackgroundSerialized)
-
-#define FORWARD_DECL(Name, ...) class Name##Ref;
+#define HEAP_BROKER_OBJECT_LIST_BASE(BACKGROUND_SERIALIZED, NEVER_SERIALIZED) \
+ /* Subtypes of JSObject */ \
+ BACKGROUND_SERIALIZED(JSArray) \
+ BACKGROUND_SERIALIZED(JSBoundFunction) \
+ BACKGROUND_SERIALIZED(JSDataView) \
+ BACKGROUND_SERIALIZED(JSFunction) \
+ BACKGROUND_SERIALIZED(JSGlobalObject) \
+ BACKGROUND_SERIALIZED(JSGlobalProxy) \
+ BACKGROUND_SERIALIZED(JSTypedArray) \
+ /* Subtypes of Context */ \
+ NEVER_SERIALIZED(NativeContext) \
+ /* Subtypes of FixedArray */ \
+ NEVER_SERIALIZED(ObjectBoilerplateDescription) \
+ BACKGROUND_SERIALIZED(ScriptContextTable) \
+ /* Subtypes of String */ \
+ NEVER_SERIALIZED(InternalizedString) \
+ /* Subtypes of FixedArrayBase */ \
+ NEVER_SERIALIZED(BytecodeArray) \
+ BACKGROUND_SERIALIZED(FixedArray) \
+ NEVER_SERIALIZED(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ NEVER_SERIALIZED(String) \
+ NEVER_SERIALIZED(Symbol) \
+ /* Subtypes of JSReceiver */ \
+ BACKGROUND_SERIALIZED(JSObject) \
+ /* Subtypes of HeapObject */ \
+ NEVER_SERIALIZED(AccessorInfo) \
+ NEVER_SERIALIZED(AllocationSite) \
+ NEVER_SERIALIZED(ArrayBoilerplateDescription) \
+ BACKGROUND_SERIALIZED(BigInt) \
+ NEVER_SERIALIZED(CallHandlerInfo) \
+ NEVER_SERIALIZED(Cell) \
+ NEVER_SERIALIZED(Code) \
+ NEVER_SERIALIZED(CodeDataContainer) \
+ NEVER_SERIALIZED(Context) \
+ NEVER_SERIALIZED(DescriptorArray) \
+ NEVER_SERIALIZED(FeedbackCell) \
+ NEVER_SERIALIZED(FeedbackVector) \
+ BACKGROUND_SERIALIZED(FixedArrayBase) \
+ NEVER_SERIALIZED(FunctionTemplateInfo) \
+ NEVER_SERIALIZED(HeapNumber) \
+ BACKGROUND_SERIALIZED(JSReceiver) \
+ BACKGROUND_SERIALIZED(Map) \
+ NEVER_SERIALIZED(Name) \
+ BACKGROUND_SERIALIZED(PropertyCell) \
+ NEVER_SERIALIZED(RegExpBoilerplateDescription) \
+ NEVER_SERIALIZED(ScopeInfo) \
+ NEVER_SERIALIZED(SharedFunctionInfo) \
+ NEVER_SERIALIZED(SourceTextModule) \
+ NEVER_SERIALIZED(TemplateObjectDescription) \
+ /* Subtypes of Object */ \
+ BACKGROUND_SERIALIZED(HeapObject)
+
+#define HEAP_BROKER_OBJECT_LIST(V) HEAP_BROKER_OBJECT_LIST_BASE(V, V)
+#define IGNORE_CASE(...)
+#define HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \
+ HEAP_BROKER_OBJECT_LIST_BASE(V, IGNORE_CASE)
+
+#define FORWARD_DECL(Name) class Name##Ref;
HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
@@ -140,14 +150,32 @@ class ObjectRef;
template <class T>
struct ref_traits;
-#define REF_TRAITS(Name, Kind) \
- template <> \
- struct ref_traits<Name> { \
- using ref_type = Name##Ref; \
- static constexpr RefSerializationKind ref_serialization_kind = Kind; \
+#define FORWARD_DECL(Name) class Name##Data;
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+#define BACKGROUND_SERIALIZED_REF_TRAITS(Name) \
+ template <> \
+ struct ref_traits<Name> { \
+ using ref_type = Name##Ref; \
+ using data_type = Name##Data; \
+ static constexpr RefSerializationKind ref_serialization_kind = \
+ RefSerializationKind::kBackgroundSerialized; \
+ };
+
+#define NEVER_SERIALIZED_REF_TRAITS(Name) \
+ template <> \
+ struct ref_traits<Name> { \
+ using ref_type = Name##Ref; \
+ using data_type = ObjectData; \
+ static constexpr RefSerializationKind ref_serialization_kind = \
+ RefSerializationKind::kNeverSerialized; \
};
-HEAP_BROKER_OBJECT_LIST(REF_TRAITS)
-#undef REF_TYPE
+
+HEAP_BROKER_OBJECT_LIST_BASE(BACKGROUND_SERIALIZED_REF_TRAITS,
+ NEVER_SERIALIZED_REF_TRAITS)
+#undef NEVER_SERIALIZED_REF_TRAITS
+#undef BACKGROUND_SERIALIZED_REF_TRAITS
template <>
struct ref_traits<Object> {
@@ -159,6 +187,39 @@ struct ref_traits<Object> {
RefSerializationKind::kNeverSerialized;
};
+// A ref without the broker_ field, used when storage size is important.
+template <class T>
+class TinyRef {
+ private:
+ using RefType = typename ref_traits<T>::ref_type;
+
+ public:
+ explicit TinyRef(const RefType& ref) : TinyRef(ref.data_) {}
+ RefType AsRef(JSHeapBroker* broker) const;
+ static base::Optional<RefType> AsOptionalRef(JSHeapBroker* broker,
+ base::Optional<TinyRef<T>> ref) {
+ if (!ref.has_value()) return {};
+ return ref->AsRef(broker);
+ }
+ Handle<T> object() const;
+
+ private:
+ explicit TinyRef(ObjectData* data) : data_(data) { DCHECK_NOT_NULL(data); }
+ ObjectData* const data_;
+};
+
+#define V(Name) using Name##TinyRef = TinyRef<Name>;
+HEAP_BROKER_OBJECT_LIST(V)
+#undef V
+
+#ifdef V8_EXTERNAL_CODE_SPACE
+using CodeTRef = CodeDataContainerRef;
+using CodeTTinyRef = CodeDataContainerTinyRef;
+#else
+using CodeTRef = CodeRef;
+using CodeTTinyRef = CodeTinyRef;
+#endif
+
class V8_EXPORT_PRIVATE ObjectRef {
public:
ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true)
@@ -173,11 +234,11 @@ class V8_EXPORT_PRIVATE ObjectRef {
bool IsSmi() const;
int AsSmi() const;
-#define HEAP_IS_METHOD_DECL(Name, ...) bool Is##Name() const;
+#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
#undef HEAP_IS_METHOD_DECL
-#define HEAP_AS_METHOD_DECL(Name, ...) Name##Ref As##Name() const;
+#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
@@ -203,10 +264,6 @@ class V8_EXPORT_PRIVATE ObjectRef {
}
};
-#ifdef DEBUG
- bool IsNeverSerializedHeapObject() const;
-#endif // DEBUG
-
protected:
JSHeapBroker* broker() const;
ObjectData* data() const;
@@ -220,12 +277,18 @@ class V8_EXPORT_PRIVATE ObjectRef {
friend class JSHeapBroker;
friend class JSObjectData;
friend class StringData;
+ template <class T>
+ friend class TinyRef;
friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
JSHeapBroker* broker_;
};
+template <class T>
+using ZoneRefUnorderedSet =
+ ZoneUnorderedSet<T, ObjectRef::Hash, ObjectRef::Equal>;
+
// Temporary class that carries information from a Map. We'd like to remove
// this class and use MapRef instead, but we can't as long as we support the
// kDisabled broker mode. That's because obtaining the MapRef via
@@ -293,13 +356,12 @@ class PropertyCellRef : public HeapObjectRef {
Handle<PropertyCell> object() const;
- // Can be called from a background thread.
- V8_WARN_UNUSED_RESULT bool Serialize() const;
- void SerializeAsProtector() const {
- bool serialized = Serialize();
+ V8_WARN_UNUSED_RESULT bool Cache() const;
+ void CacheAsProtector() const {
+ bool cached = Cache();
// A protector always holds a Smi value and its cell type never changes, so
- // Serialize can't fail.
- CHECK(serialized);
+ // Cache can't fail.
+ CHECK(cached);
}
PropertyDetails property_details() const;
@@ -365,13 +427,13 @@ class JSObjectRef : public JSReceiverRef {
// relaxed read. This is to ease the transition to unserialized (or
// background-serialized) elements.
base::Optional<FixedArrayBaseRef> elements(RelaxedLoadTag) const;
- void SerializeElements();
+ void SerializeElements(NotConcurrentInliningTag tag);
bool IsElementsTenured(const FixedArrayBaseRef& elements);
- void SerializeObjectCreateMap();
+ void SerializeObjectCreateMap(NotConcurrentInliningTag tag);
base::Optional<MapRef> GetObjectCreateMap() const;
- void SerializeAsBoilerplateRecursive();
+ void SerializeAsBoilerplateRecursive(NotConcurrentInliningTag tag);
};
class JSDataViewRef : public JSObjectRef {
@@ -389,7 +451,7 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- bool Serialize();
+ bool Serialize(NotConcurrentInliningTag tag);
// TODO(neis): Make return types non-optional once JSFunction is no longer
// fg-serialized.
@@ -404,27 +466,29 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
Handle<JSFunction> object() const;
- bool has_feedback_vector() const;
- bool has_initial_map() const;
- bool has_prototype() const;
- bool PrototypeRequiresRuntimeLookup() const;
+ // Returns true, iff the serialized JSFunctionData contents are consistent
+ // with the state of the underlying JSFunction object. Must be called from
+ // the main thread.
+ bool IsConsistentWithHeapState() const;
- void Serialize();
- bool serialized() const;
-
- // The following are available only after calling Serialize().
- ObjectRef prototype() const;
- MapRef initial_map() const;
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
- int InitialMapInstanceSizeWithMinSlack() const;
- void SerializeCodeAndFeedback();
- bool serialized_code_and_feedback() const;
+ bool has_feedback_vector(CompilationDependencies* dependencies) const;
+ bool has_initial_map(CompilationDependencies* dependencies) const;
+ bool PrototypeRequiresRuntimeLookup(
+ CompilationDependencies* dependencies) const;
+ bool has_instance_prototype(CompilationDependencies* dependencies) const;
+ ObjectRef instance_prototype(CompilationDependencies* dependencies) const;
+ MapRef initial_map(CompilationDependencies* dependencies) const;
+ int InitialMapInstanceSizeWithMinSlack(
+ CompilationDependencies* dependencies) const;
+ FeedbackVectorRef feedback_vector(
+ CompilationDependencies* dependencies) const;
+ FeedbackCellRef raw_feedback_cell(
+ CompilationDependencies* dependencies) const;
- FeedbackVectorRef feedback_vector() const;
- FeedbackCellRef raw_feedback_cell() const;
CodeRef code() const;
};
@@ -434,7 +498,7 @@ class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
Handle<RegExpBoilerplateDescription> object() const;
- void Serialize();
+ void Serialize(NotConcurrentInliningTag tag);
FixedArrayRef data() const;
StringRef source() const;
@@ -489,6 +553,7 @@ class ContextRef : public HeapObjectRef {
V(JSFunction, symbol_function) \
V(JSGlobalObject, global_object) \
V(JSGlobalProxy, global_proxy_object) \
+ V(JSObject, initial_array_prototype) \
V(JSObject, promise_prototype) \
V(Map, async_function_object_map) \
V(Map, block_context_map) \
@@ -524,7 +589,7 @@ class NativeContextRef : public ContextRef {
Handle<NativeContext> object() const;
- void Serialize();
+ void Serialize(NotConcurrentInliningTag tag);
#define DECL_ACCESSOR(type, name) type##Ref name() const;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
@@ -580,10 +645,7 @@ class FeedbackVectorRef : public HeapObjectRef {
Handle<FeedbackVector> object() const;
SharedFunctionInfoRef shared_function_info() const;
- double invocation_count() const;
- void Serialize();
- bool serialized() const;
FeedbackCellRef GetClosureFeedbackCell(int index) const;
};
@@ -614,7 +676,7 @@ class AllocationSiteRef : public HeapObjectRef {
AllocationType GetAllocationType() const;
ObjectRef nested_site() const;
- void SerializeRecursive();
+ void SerializeRecursive(NotConcurrentInliningTag tag);
base::Optional<JSObjectRef> boilerplate() const;
ElementsKind GetElementsKind() const;
@@ -675,24 +737,21 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
- void SerializeBackPointer();
+ void SerializeBackPointer(NotConcurrentInliningTag tag);
HeapObjectRef GetBackPointer() const;
- void SerializePrototype();
+ void SerializePrototype(NotConcurrentInliningTag tag);
// TODO(neis): We should be able to remove TrySerializePrototype once
// concurrent-inlining is always on. Then we can also change the return type
// of prototype() back to HeapObjectRef.
- bool TrySerializePrototype();
+ bool TrySerializePrototype(NotConcurrentInliningTag tag);
base::Optional<HeapObjectRef> prototype() const;
- void SerializeForElementStore();
+ void SerializeForElementStore(NotConcurrentInliningTag tag);
bool HasOnlyStablePrototypesWithFastElements(
ZoneVector<MapRef>* prototype_maps);
// Concerning the underlying instance_descriptors:
- bool TrySerializeOwnDescriptor(InternalIndex descriptor_index);
- void SerializeOwnDescriptor(InternalIndex descriptor_index);
- bool serialized_own_descriptor(InternalIndex descriptor_index) const;
MapRef FindFieldOwner(InternalIndex descriptor_index) const;
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
@@ -703,11 +762,9 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
DescriptorArrayRef instance_descriptors() const;
- void SerializeRootMap();
+ void SerializeRootMap(NotConcurrentInliningTag tag);
base::Optional<MapRef> FindRootMap() const;
- // Available after calling JSFunctionRef::Serialize on a function that has
- // this map as initial map.
ObjectRef GetConstructor() const;
};
@@ -731,7 +788,7 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
// The following returns true if the CallHandlerInfo is present.
bool has_call_code() const;
- void SerializeCallCode();
+ void SerializeCallCode(NotConcurrentInliningTag tag);
base::Optional<CallHandlerInfoRef> call_code() const;
ZoneVector<Address> c_functions() const;
ZoneVector<const CFunctionInfo*> c_signatures() const;
@@ -858,9 +915,7 @@ class ScopeInfoRef : public HeapObjectRef {
bool HasOuterScopeInfo() const;
bool HasContextExtensionSlot() const;
- // Only serialized via SerializeScopeInfoChain.
ScopeInfoRef OuterScopeInfo() const;
- void SerializeScopeInfoChain();
};
#define BROKER_SFI_FIELDS(V) \
@@ -948,7 +1003,7 @@ class JSTypedArrayRef : public JSObjectRef {
size_t length() const;
void* data_ptr() const;
- void Serialize();
+ void Serialize(NotConcurrentInliningTag tag);
bool serialized() const;
HeapObjectRef buffer() const;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 6341b99b98..3dcdc6a33e 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -59,6 +59,7 @@ class JSCallReducerAssembler : public JSGraphAssembler {
reducer->ZoneForGraphAssembler(),
[reducer](Node* n) { reducer->RevisitForGraphAssembler(n); },
nullptr, kMarkLoopExits),
+ dependencies_(reducer->dependencies()),
node_(node),
outermost_catch_scope_(
CatchScope::Outermost(reducer->ZoneForGraphAssembler())),
@@ -656,9 +657,11 @@ class JSCallReducerAssembler : public JSGraphAssembler {
JSOperatorBuilder* javascript() const { return jsgraph()->javascript(); }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+
private:
+ CompilationDependencies* const dependencies_;
Node* const node_;
-
CatchScope outermost_catch_scope_;
Node* outermost_handler_;
CatchScope* catch_scope_;
@@ -831,7 +834,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
int slot_count) {
return AddNode<Context>(graph()->NewNode(
javascript()->CreateFunctionContext(
- native_context.scope_info().object(),
+ native_context.scope_info(),
slot_count - Context::MIN_CONTEXT_SLOTS, FUNCTION_SCOPE),
outer_context, effect(), control()));
}
@@ -848,11 +851,10 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
- Handle<CodeT> code =
- broker_->CanonicalPersistentHandle(ToCodeT(*callable.code()));
+ CodeTRef code = MakeRef(broker_, ToCodeT(*callable.code()));
return AddNode<JSFunction>(graph()->NewNode(
- javascript()->CreateClosure(shared.object(), code),
- HeapConstant(feedback_cell), context, effect(), control()));
+ javascript()->CreateClosure(shared, code), HeapConstant(feedback_cell),
+ context, effect(), control()));
}
void CallPromiseExecutor(TNode<Object> executor, TNode<JSFunction> resolve,
@@ -1117,9 +1119,9 @@ TNode<Object> JSCallReducerAssembler::CopyNode() {
TNode<JSArray> JSCallReducerAssembler::CreateArrayNoThrow(
TNode<Object> ctor, TNode<Number> size, FrameState frame_state) {
- return AddNode<JSArray>(graph()->NewNode(
- javascript()->CreateArray(1, MaybeHandle<AllocationSite>()), ctor, ctor,
- size, ContextInput(), frame_state, effect(), control()));
+ return AddNode<JSArray>(
+ graph()->NewNode(javascript()->CreateArray(1, base::nullopt), ctor, ctor,
+ size, ContextInput(), frame_state, effect(), control()));
}
TNode<JSArray> JSCallReducerAssembler::AllocateEmptyJSArray(
ElementsKind kind, const NativeContextRef& native_context) {
@@ -2418,8 +2420,8 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
node->RemoveInput(n.FeedbackVectorIndex());
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceValueInput(node, target, 1);
- NodeProperties::ChangeOp(
- node, javascript()->CreateArray(arity, MaybeHandle<AllocationSite>()));
+ NodeProperties::ChangeOp(node,
+ javascript()->CreateArray(arity, base::nullopt));
return Changed(node);
}
@@ -2611,17 +2613,16 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// definitely a constructor or not a constructor.
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
- MapRef first_receiver_map = MakeRef(broker(), receiver_maps[0]);
+ MapRef first_receiver_map = receiver_maps[0];
bool const is_constructor = first_receiver_map.is_constructor();
base::Optional<HeapObjectRef> const prototype =
first_receiver_map.prototype();
if (!prototype.has_value()) return inference.NoChange();
- for (Handle<Map> const map : receiver_maps) {
- MapRef receiver_map = MakeRef(broker(), map);
+ for (const MapRef& receiver_map : receiver_maps) {
base::Optional<HeapObjectRef> map_prototype = receiver_map.prototype();
if (!map_prototype.has_value()) return inference.NoChange();
@@ -2653,12 +2654,6 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
JSFunctionOrBoundFunction::kLengthDescriptorIndex);
const InternalIndex kNameIndex(
JSFunctionOrBoundFunction::kNameDescriptorIndex);
- if (!receiver_map.serialized_own_descriptor(kLengthIndex) ||
- !receiver_map.serialized_own_descriptor(kNameIndex)) {
- TRACE_BROKER_MISSING(broker(),
- "serialized descriptors on map " << receiver_map);
- return inference.NoChange();
- }
ReadOnlyRoots roots(isolate());
StringRef length_string = MakeRef(broker(), roots.length_string_handle());
StringRef name_string = MakeRef(broker(), roots.name_string_handle());
@@ -2719,7 +2714,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
DCHECK_EQ(cursor, input_count);
Node* value = effect =
graph()->NewNode(javascript()->CreateBoundFunction(
- arity_with_bound_this - kBoundThis, map.object()),
+ arity_with_bound_this - kBoundThis, map),
input_count, inputs);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -2739,7 +2734,6 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
HeapObjectMatcher m(target);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
- if (!function.serialized()) return NoChange();
context = jsgraph()->Constant(function.context());
} else {
context = effect = graph()->NewNode(
@@ -2801,20 +2795,20 @@ Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
}
Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
- Node* effect = NodeProperties::GetEffectInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
// Try to determine the {object} map.
MapInference inference(broker(), object, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& object_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& object_maps = inference.GetMaps();
- MapRef candidate_map = MakeRef(broker(), object_maps[0]);
+ MapRef candidate_map = object_maps[0];
base::Optional<HeapObjectRef> candidate_prototype = candidate_map.prototype();
if (!candidate_prototype.has_value()) return inference.NoChange();
// Check if we can constant-fold the {candidate_prototype}.
for (size_t i = 0; i < object_maps.size(); ++i) {
- MapRef object_map = MakeRef(broker(), object_maps[i]);
+ MapRef object_map = object_maps[i];
base::Optional<HeapObjectRef> map_prototype = object_map.prototype();
if (!map_prototype.has_value()) return inference.NoChange();
if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
@@ -3188,13 +3182,13 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
}
namespace {
+
bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
- MapHandles const& receiver_maps,
+ ZoneVector<MapRef> const& receiver_maps,
ElementsKind* kind_return) {
DCHECK_NE(0, receiver_maps.size());
- *kind_return = MakeRef(broker, receiver_maps[0]).elements_kind();
- for (auto receiver_map : receiver_maps) {
- MapRef map = MakeRef(broker, receiver_map);
+ *kind_return = receiver_maps[0].elements_kind();
+ for (const MapRef& map : receiver_maps) {
if (!map.supports_fast_array_iteration() ||
!UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
return false;
@@ -3204,12 +3198,11 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
}
bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
- MapHandles const& receiver_maps,
+ ZoneVector<MapRef> const& receiver_maps,
std::vector<ElementsKind>* kinds,
bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
- for (auto receiver_map : receiver_maps) {
- MapRef map = MakeRef(broker, receiver_map);
+ for (const MapRef& map : receiver_maps) {
if (!map.supports_fast_array_resize()) return false;
// TODO(turbofan): We should also handle fast holey double elements once
// we got the hole NaN mess sorted out in TurboFan/V8.
@@ -3249,7 +3242,7 @@ class IteratingArrayBuiltinHelper {
// Try to determine the {receiver} map.
if (!inference_.HaveMaps()) return;
- MapHandles const& receiver_maps = inference_.GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference_.GetMaps();
if (!CanInlineArrayIteratingBuiltin(broker, receiver_maps,
&elements_kind_)) {
@@ -3610,6 +3603,7 @@ FastApiCallFunctionVector CanOptimizeFastCall(
optimize_to_fast_call =
optimize_to_fast_call && !Has64BitIntegerParamsInSignature(c_signature);
#endif
+
if (optimize_to_fast_call) {
result.push_back({functions[i], c_signature});
}
@@ -3671,8 +3665,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// Try to infer the {receiver} maps from the graph.
MapInference inference(broker(), receiver, effect);
if (inference.HaveMaps()) {
- MapHandles const& receiver_maps = inference.GetMaps();
- MapRef first_receiver_map = MakeRef(broker(), receiver_maps[0]);
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
+ MapRef first_receiver_map = receiver_maps[0];
// See if we can constant-fold the compatible receiver checks.
HolderLookupResult api_holder =
@@ -3705,7 +3699,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
function_template_info.accept_any_receiver());
for (size_t i = 1; i < receiver_maps.size(); ++i) {
- MapRef receiver_map = MakeRef(broker(), receiver_maps[i]);
+ MapRef receiver_map = receiver_maps[i];
HolderLookupResult holder_i =
function_template_info.LookupHolderOfExpectedType(receiver_map);
@@ -4061,10 +4055,6 @@ JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpreadOfCreateArguments(
}
}
- // TODO(jgruber,v8:8888): Attempt to remove this restriction. The reason it
- // currently exists is because we cannot create code dependencies in NCI code.
- if (broker()->is_native_context_independent()) return NoChange();
-
// For call/construct with spread, we need to also install a code
// dependency on the array iterator lookup protector cell to ensure
// that no one messed with the %ArrayIteratorPrototype%.next method.
@@ -4202,9 +4192,10 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
if (feedback.IsInsufficient()) return NoChange();
AllocationSiteRef site = feedback.AsLiteral().value();
- base::Optional<JSArrayRef> boilerplate_array =
- site.boilerplate()->AsJSArray();
- int const array_length = boilerplate_array->GetBoilerplateLength().AsSmi();
+ if (!site.boilerplate().has_value()) return NoChange();
+
+ JSArrayRef boilerplate_array = site.boilerplate()->AsJSArray();
+ int const array_length = boilerplate_array.GetBoilerplateLength().AsSmi();
// We'll replace the arguments_list input with {array_length} element loads.
new_argument_count = argument_count - 1 + array_length;
@@ -4217,7 +4208,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
}
// Determine the array's map.
- MapRef array_map = boilerplate_array->map();
+ MapRef array_map = boilerplate_array.map();
if (!array_map.supports_fast_array_iteration()) {
return NoChange();
}
@@ -4277,16 +4268,15 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
}
NodeProperties::ChangeOp(
- node, javascript()->Call(
- JSCallNode::ArityForArgc(new_argument_count), frequency,
- feedback_source, ConvertReceiverMode::kNullOrUndefined,
- speculation_mode, CallFeedbackRelation::kUnrelated));
+ node,
+ javascript()->Call(JSCallNode::ArityForArgc(new_argument_count),
+ frequency, feedback_source, ConvertReceiverMode::kAny,
+ speculation_mode, CallFeedbackRelation::kUnrelated));
NodeProperties::ReplaceEffectInput(node, effect);
return Changed(node).FollowedBy(ReduceJSCall(node));
}
bool JSCallReducer::IsBuiltinOrApiFunction(JSFunctionRef function) const {
- if (!function.serialized()) return false;
// TODO(neis): Add a way to check if function template info isn't serialized
// and add a warning in such cases. Currently we can't tell if function
// template info doesn't exist or wasn't serialized.
@@ -4310,7 +4300,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
- if (!function.serialized()) return NoChange();
// Don't inline cross native context.
if (!function.native_context().equals(native_context())) {
@@ -4380,7 +4369,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Same if the {target} is the result of a CheckClosure operation.
if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& p = JSCreateClosureNode{target}.Parameters();
- return ReduceJSCall(node, MakeRef(broker(), p.shared_info()));
+ return ReduceJSCall(node, p.shared_info(broker()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
if (cell.shared_function_info().has_value()) {
@@ -4471,13 +4460,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (feedback_cell.value().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
- FeedbackVectorRef feedback_vector = *feedback_cell.value();
- if (!feedback_vector.serialized()) {
- TRACE_BROKER_MISSING(
- broker(), "feedback vector, not serialized: " << feedback_vector);
- return NoChange();
- }
-
Node* target_closure = effect =
graph()->NewNode(simplified()->CheckClosure(feedback_cell.object()),
target, effect, control);
@@ -4969,8 +4951,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
node->ReplaceInput(n.NewTargetIndex(), array_function);
node->RemoveInput(n.FeedbackVectorIndex());
NodeProperties::ChangeOp(
- node, javascript()->CreateArray(
- arity, feedback_target->AsAllocationSite().object()));
+ node, javascript()->CreateArray(arity,
+ feedback_target->AsAllocationSite()));
return Changed(node);
} else if (feedback_target.has_value() &&
!HeapObjectMatcher(new_target).HasResolvedValue() &&
@@ -5012,13 +4994,13 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
- if (!function.serialized()) return NoChange();
// Do not reduce constructors with break points.
// If this state changes during background compilation, the compilation
// job will be aborted from the main thread (see
// Debug::PrepareFunctionForDebugExecution()).
- if (function.shared().HasBreakInfo()) return NoChange();
+ SharedFunctionInfoRef sfi = function.shared();
+ if (sfi.HasBreakInfo()) return NoChange();
// Don't inline cross native context.
if (!function.native_context().equals(native_context())) {
@@ -5026,9 +5008,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
// Check for known builtin functions.
- Builtin builtin = function.shared().HasBuiltinId()
- ? function.shared().builtin_id()
- : Builtin::kNoBuiltinId;
+ Builtin builtin =
+ sfi.HasBuiltinId() ? sfi.builtin_id() : Builtin::kNoBuiltinId;
switch (builtin) {
case Builtin::kArrayConstructor: {
// TODO(bmeurer): Deal with Array subclasses here.
@@ -5037,7 +5018,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
node->ReplaceInput(n.NewTargetIndex(), new_target);
node->RemoveInput(n.FeedbackVectorIndex());
NodeProperties::ChangeOp(
- node, javascript()->CreateArray(arity, Handle<AllocationSite>()));
+ node, javascript()->CreateArray(arity, base::nullopt));
return Changed(node);
}
case Builtin::kObjectConstructor: {
@@ -5464,7 +5445,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
std::vector<ElementsKind> kinds;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds, true)) {
@@ -5601,7 +5582,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
std::vector<ElementsKind> kinds;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
@@ -5748,7 +5729,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
std::vector<ElementsKind> kinds;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
@@ -5985,14 +5966,13 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
// Check that the maps are of JSArray (and more).
// TODO(turbofan): Consider adding special case for the common pattern
// `slice.call(arguments)`, for example jQuery makes heavy use of that.
bool can_be_holey = false;
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map = MakeRef(broker(), map);
+ for (const MapRef& receiver_map : receiver_maps) {
if (!receiver_map.supports_fast_array_iteration()) {
return inference.NoChange();
}
@@ -6136,23 +6116,21 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
IterationKind const iteration_kind =
CreateArrayIteratorParametersOf(iterator->op()).kind();
Node* iterated_object = NodeProperties::GetValueInput(iterator, 0);
- Node* iterator_effect = NodeProperties::GetEffectInput(iterator);
+ Effect iterator_effect{NodeProperties::GetEffectInput(iterator)};
MapInference inference(broker(), iterated_object, iterator_effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& iterated_object_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& iterated_object_maps = inference.GetMaps();
// Check that various {iterated_object_maps} have compatible elements kinds.
- ElementsKind elements_kind =
- MakeRef(broker(), iterated_object_maps[0]).elements_kind();
+ ElementsKind elements_kind = iterated_object_maps[0].elements_kind();
if (IsTypedArrayElementsKind(elements_kind)) {
// TurboFan doesn't support loading from BigInt typed arrays yet.
if (elements_kind == BIGUINT64_ELEMENTS ||
elements_kind == BIGINT64_ELEMENTS) {
return inference.NoChange();
}
- for (Handle<Map> map : iterated_object_maps) {
- MapRef iterated_object_map = MakeRef(broker(), map);
+ for (const MapRef& iterated_object_map : iterated_object_maps) {
if (iterated_object_map.elements_kind() != elements_kind) {
return inference.NoChange();
}
@@ -6416,16 +6394,7 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Effect effect = n.effect();
Control control = n.control();
- if (n.ArgumentCount() < 1) {
- effect = graph()->NewNode(simplified()->CheckString(p.feedback()), receiver,
- effect, control);
-
- Node* value = jsgraph()->FalseConstant();
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
-
- Node* search_string = n.Argument(0);
+ Node* search_string = n.ArgumentOr(0, jsgraph()->UndefinedConstant());
Node* position = n.ArgumentOr(1, jsgraph()->ZeroConstant());
HeapObjectMatcher m(search_string);
@@ -6433,51 +6402,59 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsString()) {
StringRef str = target_ref.AsString();
- if (str.length().has_value() && str.length().value() == 1) {
+ if (str.length().has_value()) {
receiver = effect = graph()->NewNode(
simplified()->CheckString(p.feedback()), receiver, effect, control);
position = effect = graph()->NewNode(
simplified()->CheckSmi(p.feedback()), position, effect, control);
- Node* string_length =
- graph()->NewNode(simplified()->StringLength(), receiver);
- Node* unsigned_position = graph()->NewNode(
- simplified()->NumberMax(), position, jsgraph()->ZeroConstant());
-
- Node* check = graph()->NewNode(simplified()->NumberLessThan(),
- unsigned_position, string_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
- check, control);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse = jsgraph()->FalseConstant();
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- Node* masked_position =
- graph()->NewNode(simplified()->PoisonIndex(), unsigned_position);
- Node* string_first = etrue =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_position, etrue, if_true);
-
- Node* search_first = jsgraph()->Constant(str.GetFirstChar().value());
- vtrue = graph()->NewNode(simplified()->NumberEqual(), string_first,
- search_first);
+ if (str.length().value() == 0) {
+ Node* value = jsgraph()->TrueConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
+ if (str.length().value() == 1) {
+ Node* string_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+ Node* unsigned_position = graph()->NewNode(
+ simplified()->NumberMax(), position, jsgraph()->ZeroConstant());
+
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(),
+ unsigned_position, string_length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
+ check, control);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->FalseConstant();
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ Node* masked_position = graph()->NewNode(
+ simplified()->PoisonIndex(), unsigned_position);
+ Node* string_first = etrue =
+ graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ masked_position, etrue, if_true);
+
+ Node* search_first =
+ jsgraph()->Constant(str.GetFirstChar().value());
+ vtrue = graph()->NewNode(simplified()->NumberEqual(), string_first,
+ search_first);
+ }
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
}
}
}
@@ -6617,10 +6594,6 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
}
Reduction JSCallReducer::ReduceStringPrototypeIterator(Node* node) {
- // TODO(jgruber): We could reduce here when generating native context
- // independent code, if LowerJSCreateStringIterator were implemented in
- // generic lowering.
- if (broker()->is_native_context_independent()) return NoChange();
JSCallNode n(node);
CallParameters const& p = n.Parameters();
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -6745,11 +6718,6 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(Node* node) {
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
- // TODO(jgruber): We could reduce here when generating native context
- // independent code, if LowerJSCreatePromise were implemented in generic
- // lowering.
- if (broker()->is_native_context_independent()) return NoChange();
-
PromiseBuiltinReducerAssembler a(this, node, broker());
// We only inline when we have the executor.
@@ -6764,12 +6732,11 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
if (!inference->HaveMaps()) return false;
- MapHandles const& receiver_maps = inference->GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference->GetMaps();
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map = MakeRef(broker(), map);
+ for (const MapRef& receiver_map : receiver_maps) {
if (!receiver_map.IsJSPromiseMap()) return false;
base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
if (!prototype.has_value() ||
@@ -6827,9 +6794,8 @@ Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
- Handle<CodeT> code =
- broker()->CanonicalPersistentHandle(ToCodeT(*callable.code()));
- return graph()->NewNode(javascript()->CreateClosure(shared.object(), code),
+ CodeTRef code = MakeRef(broker(), ToCodeT(*callable.code()));
+ return graph()->NewNode(javascript()->CreateClosure(shared, code),
jsgraph()->HeapConstant(feedback_cell), context,
effect, control);
}
@@ -6849,7 +6815,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
MapInference inference(broker(), receiver, effect);
if (!DoPromiseChecks(&inference)) return inference.NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
if (!dependencies()->DependOnPromiseHookProtector()) {
return inference.NoChange();
@@ -6881,7 +6847,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// Allocate shared context for the closures below.
context = etrue =
graph()->NewNode(javascript()->CreateFunctionContext(
- native_context().scope_info().object(),
+ native_context().scope_info(),
PromiseBuiltins::kPromiseFinallyContextLength -
Context::MIN_CONTEXT_SLOTS,
FUNCTION_SCOPE),
@@ -6927,7 +6893,9 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// of the call to "then" below.
{
ZoneHandleSet<Map> maps;
- for (Handle<Map> map : receiver_maps) maps.insert(map, graph()->zone());
+ for (const MapRef& map : receiver_maps) {
+ maps.insert(map.object(), graph()->zone());
+ }
effect = graph()->NewNode(simplified()->MapGuard(maps), receiver, effect,
control);
}
@@ -7007,7 +6975,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// doesn't escape to user JavaScript. So bake this information
// into the graph such that subsequent passes can use the
// information for further optimizations.
- MapRef promise_map = native_context().promise_function().initial_map();
+ MapRef promise_map =
+ native_context().promise_function().initial_map(dependencies());
effect = graph()->NewNode(
simplified()->MapGuard(ZoneHandleSet<Map>(promise_map.object())), promise,
effect, control);
@@ -7215,8 +7184,8 @@ Reduction JSCallReducer::ReduceMapPrototypeGet(Node* node) {
JSCallNode n(node);
if (n.ArgumentCount() != 1) return NoChange();
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
Node* key = NodeProperties::GetValueInput(node, 2);
MapInference inference(broker(), receiver, effect);
@@ -7262,8 +7231,8 @@ Reduction JSCallReducer::ReduceMapPrototypeHas(Node* node) {
JSCallNode n(node);
if (n.ArgumentCount() != 1) return NoChange();
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
Node* key = NodeProperties::GetValueInput(node, 2);
MapInference inference(broker(), receiver, effect);
@@ -7305,8 +7274,8 @@ Reduction JSCallReducer::ReduceCollectionIteration(
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
InstanceType type = InstanceTypeForCollectionKind(collection_kind);
MapInference inference(broker(), receiver, effect);
@@ -7325,8 +7294,8 @@ Reduction JSCallReducer::ReduceCollectionPrototypeSize(
Node* node, CollectionKind collection_kind) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
InstanceType type = InstanceTypeForCollectionKind(collection_kind);
MapInference inference(broker(), receiver, effect);
@@ -7376,12 +7345,10 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
{
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
- receiver_instance_type =
- MakeRef(broker(), receiver_maps[0]).instance_type();
+ ZoneVector<MapRef> const& receiver_maps = inference.GetMaps();
+ receiver_instance_type = receiver_maps[0].instance_type();
for (size_t i = 1; i < receiver_maps.size(); ++i) {
- if (MakeRef(broker(), receiver_maps[i]).instance_type() !=
- receiver_instance_type) {
+ if (receiver_maps[i].instance_type() != receiver_instance_type) {
return inference.NoChange();
}
}
@@ -7653,8 +7620,8 @@ Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) {
Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
Node* node, InstanceType instance_type, FieldAccess const& access) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps() ||
@@ -7901,8 +7868,8 @@ Reduction JSCallReducer::ReduceGlobalIsNaN(Node* node) {
// ES6 section 20.3.4.10 Date.prototype.getTime ( )
Reduction JSCallReducer::ReduceDatePrototypeGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAre(JS_DATE_TYPE)) {
@@ -7969,62 +7936,45 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Only the initial JSRegExp map is valid here, since the following lastIndex
// check as well as the lowered builtin call rely on a known location of the
// lastIndex field.
- Handle<Map> regexp_initial_map =
- native_context().regexp_function().initial_map().object();
+ MapRef regexp_initial_map =
+ native_context().regexp_function().initial_map(dependencies());
MapInference inference(broker(), regexp, effect);
if (!inference.Is(regexp_initial_map)) return inference.NoChange();
- MapHandles const& regexp_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& regexp_maps = inference.GetMaps();
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- if (broker()->is_concurrent_inlining()) {
- // Obtain precomputed access infos from the broker.
- for (auto map : regexp_maps) {
- MapRef map_ref = MakeRef(broker(), map);
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- map_ref, MakeRef(broker(), isolate()->factory()->exec_string()),
- AccessMode::kLoad, dependencies());
- access_infos.push_back(access_info);
- }
- } else {
- // Compute property access info for "exec" on {resolution}.
- access_info_factory.ComputePropertyAccessInfos(
- MapHandles(regexp_maps.begin(), regexp_maps.end()),
- factory()->exec_string(), AccessMode::kLoad, &access_infos);
+
+ for (const MapRef& map : regexp_maps) {
+ access_infos.push_back(broker()->GetPropertyAccessInfo(
+ map, MakeRef(broker(), isolate()->factory()->exec_string()),
+ AccessMode::kLoad, dependencies()));
}
PropertyAccessInfo ai_exec =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
if (ai_exec.IsInvalid()) return inference.NoChange();
+ if (!ai_exec.IsFastDataConstant()) return inference.NoChange();
- // If "exec" has been modified on {regexp}, we can't do anything.
- if (ai_exec.IsFastDataConstant()) {
- Handle<JSObject> holder;
- // Do not reduce if the exec method is not on the prototype chain.
- if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange();
-
- JSObjectRef holder_ref = MakeRef(broker(), holder);
+ // Do not reduce if the exec method is not on the prototype chain.
+ base::Optional<JSObjectRef> holder = ai_exec.holder();
+ if (!holder.has_value()) return inference.NoChange();
- // Bail out if the exec method is not the original one.
- base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
- ai_exec.field_representation(), ai_exec.field_index(), dependencies());
- if (!constant.has_value() ||
- !constant->equals(native_context().regexp_exec_function())) {
- return inference.NoChange();
- }
-
- // Add proper dependencies on the {regexp}s [[Prototype]]s.
- dependencies()->DependOnStablePrototypeChains(
- ai_exec.lookup_start_object_maps(), kStartAtPrototype,
- MakeRef(broker(), holder));
- } else {
- // TODO(v8:11457) Support dictionary mode protoypes here.
+ // Bail out if the exec method is not the original one.
+ base::Optional<ObjectRef> constant = holder->GetOwnFastDataProperty(
+ ai_exec.field_representation(), ai_exec.field_index(), dependencies());
+ if (!constant.has_value() ||
+ !constant->equals(native_context().regexp_exec_function())) {
return inference.NoChange();
}
+ // Add proper dependencies on the {regexp}s [[Prototype]]s.
+ dependencies()->DependOnStablePrototypeChains(
+ ai_exec.lookup_start_object_maps(), kStartAtPrototype, holder.value());
+
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -8116,6 +8066,10 @@ Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
return NoChange();
}
+CompilationDependencies* JSCallReducer::dependencies() const {
+ return broker()->dependencies();
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index b1ad8b5ba8..e9b09e3515 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -48,14 +48,12 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
using Flags = base::Flags<Flag>;
JSCallReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
- Zone* temp_zone, Flags flags,
- CompilationDependencies* dependencies)
+ Zone* temp_zone, Flags flags)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
broker_(broker),
temp_zone_(temp_zone),
- flags_(flags),
- dependencies_(dependencies) {}
+ flags_(flags) {}
const char* reducer_name() const override { return "JSCallReducer"; }
@@ -72,6 +70,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
bool has_wasm_calls() const { return has_wasm_calls_; }
+ CompilationDependencies* dependencies() const;
+
private:
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node,
@@ -256,13 +256,11 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
Flags flags() const { return flags_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
Zone* const temp_zone_;
Flags const flags_;
- CompilationDependencies* const dependencies_;
std::set<Node*> waitlist_;
// For preventing infinite recursion via ReduceJSCallWithArrayLikeOrSpread.
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index ecccf7e373..414977eb7d 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -389,12 +389,12 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
DCHECK(closure_type.AsHeapConstant()->Ref().IsJSFunction());
JSFunctionRef js_function =
closure_type.AsHeapConstant()->Ref().AsJSFunction();
- if (!js_function.has_initial_map()) return NoChange();
+ if (!js_function.has_initial_map(dependencies())) return NoChange();
SlackTrackingPrediction slack_tracking_prediction =
dependencies()->DependOnInitialMapInstanceSizePrediction(js_function);
- MapRef initial_map = js_function.initial_map();
+ MapRef initial_map = js_function.initial_map(dependencies());
DCHECK(initial_map.instance_type() == JS_GENERATOR_OBJECT_TYPE ||
initial_map.instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -618,13 +618,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- base::Optional<AllocationSiteRef> site_ref;
- {
- Handle<AllocationSite> site;
- if (p.site().ToHandle(&site)) {
- site_ref = MakeRef(broker(), site);
- }
- }
+ base::Optional<AllocationSiteRef> site_ref = p.site(broker());
AllocationType allocation = AllocationType::kYoung;
base::Optional<MapRef> initial_map =
@@ -652,7 +646,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
} else {
PropertyCellRef array_constructor_protector =
MakeRef(broker(), factory()->array_constructor_protector());
- array_constructor_protector.SerializeAsProtector();
+ array_constructor_protector.CacheAsProtector();
can_inline_call = array_constructor_protector.value().AsSmi() ==
Protectors::kProtectorValid;
}
@@ -879,7 +873,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- MapRef const map = MakeRef(broker(), p.map());
+ MapRef const map = p.map(broker());
Node* bound_target_function = NodeProperties::GetValueInput(node, 0);
Node* bound_this = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -920,9 +914,9 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
JSCreateClosureNode n(node);
CreateClosureParameters const& p = n.Parameters();
- SharedFunctionInfoRef shared = MakeRef(broker(), p.shared_info());
+ SharedFunctionInfoRef shared = p.shared_info(broker());
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- HeapObjectRef code = MakeRef(broker(), p.code());
+ HeapObjectRef code = p.code(broker());
Effect effect = n.effect();
Control control = n.control();
Node* context = n.context();
@@ -1060,7 +1054,8 @@ Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
- MapRef promise_map = native_context().promise_function().initial_map();
+ MapRef promise_map =
+ native_context().promise_function().initial_map(dependencies());
AllocationBuilder a(jsgraph(), effect, graph()->start());
a.Allocate(promise_map.instance_size());
@@ -1140,7 +1135,7 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Retrieve the initial map for the object.
- MapRef map = native_context().object_function().initial_map();
+ MapRef map = native_context().object_function().initial_map(dependencies());
DCHECK(!map.is_dictionary_map());
DCHECK(!map.IsInobjectSlackTrackingInProgress());
Node* js_object_map = jsgraph()->Constant(map);
@@ -1203,7 +1198,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef scope_info = MakeRef(broker(), parameters.scope_info());
+ ScopeInfoRef scope_info = parameters.scope_info(broker());
int slot_count = parameters.slot_count();
ScopeType scope_type = parameters.scope_type();
@@ -1243,7 +1238,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
Node* extension = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1264,7 +1259,7 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
- ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
Node* exception = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1285,7 +1280,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
int const context_length = scope_info.ContextLength();
// Use inline allocation for block contexts up to a size limit.
@@ -1313,10 +1308,12 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
}
namespace {
+
base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker,
HeapObjectRef prototype) {
MapRef standard_map =
- broker->target_native_context().object_function().initial_map();
+ broker->target_native_context().object_function().initial_map(
+ broker->dependencies());
if (prototype.equals(standard_map.prototype().value())) {
return standard_map;
}
@@ -1329,6 +1326,7 @@ base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker,
}
return base::Optional<MapRef>();
}
+
} // namespace
Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
@@ -1886,7 +1884,8 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements(
Node* JSCreateLowering::AllocateLiteralRegExp(
Node* effect, Node* control, RegExpBoilerplateDescriptionRef boilerplate) {
- MapRef initial_map = native_context().regexp_function().initial_map();
+ MapRef initial_map =
+ native_context().regexp_function().initial_map(dependencies());
// Sanity check that JSRegExp object layout hasn't changed.
STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize);
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index cc5d6aa69c..bbc47e45ad 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -239,15 +239,10 @@ namespace {
// some cases - unlike the full builtin, the megamorphic builtin does fewer
// checks and does not collect feedback.
bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
+ base::Optional<NameRef> name,
JSHeapBroker* broker) {
- if (broker->is_native_context_independent()) {
- // The decision to use the megamorphic load builtin is made based on
- // current feedback, and is thus context-dependent. It cannot be used when
- // generating NCI code.
- return false;
- }
-
- ProcessedFeedback const& feedback = broker->GetFeedback(source);
+ ProcessedFeedback const& feedback =
+ broker->GetFeedbackForPropertyAccess(source, AccessMode::kLoad, name);
if (feedback.kind() == ProcessedFeedback::kElementAccess) {
return feedback.AsElementAccess().transition_groups().empty();
@@ -263,6 +258,7 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
}
UNREACHABLE();
}
+
} // namespace
void JSGenericLowering::LowerJSHasProperty(Node* node) {
@@ -290,14 +286,14 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
n->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
- node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker())
? Builtin::kKeyedLoadICTrampoline_Megamorphic
: Builtin::kKeyedLoadICTrampoline);
} else {
n->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
- node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker())
? Builtin::kKeyedLoadIC_Megamorphic
: Builtin::kKeyedLoadIC);
}
@@ -311,25 +307,25 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 1);
if (!p.feedback().IsValid()) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
ReplaceWithBuiltinCall(node, Builtin::kGetProperty);
} else if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- ReplaceWithBuiltinCall(
- node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtin::kLoadICTrampoline_Megamorphic
- : Builtin::kLoadICTrampoline);
+ ReplaceWithBuiltinCall(node, ShouldUseMegamorphicLoadBuiltin(
+ p.feedback(), p.name(broker()), broker())
+ ? Builtin::kLoadICTrampoline_Megamorphic
+ : Builtin::kLoadICTrampoline);
} else {
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- ReplaceWithBuiltinCall(
- node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtin::kLoadIC_Megamorphic
- : Builtin::kLoadIC);
+ ReplaceWithBuiltinCall(node, ShouldUseMegamorphicLoadBuiltin(
+ p.feedback(), p.name(broker()), broker())
+ ? Builtin::kLoadIC_Megamorphic
+ : Builtin::kLoadIC);
}
}
@@ -354,7 +350,7 @@ void JSGenericLowering::LowerJSLoadNamedFromSuper(Node* node) {
// be double-checked that the FeedbackVector parameter will be the
// UndefinedConstant.
DCHECK(p.feedback().IsValid());
- node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kLoadSuperIC);
@@ -369,13 +365,13 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 0);
if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode());
ReplaceWithBuiltinCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable =
@@ -434,16 +430,16 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 2);
if (!p.feedback().IsValid()) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
ReplaceWithRuntimeCall(node, Runtime::kSetNamedProperty);
} else if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kStoreICTrampoline);
} else {
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kStoreIC);
@@ -459,13 +455,13 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 2);
if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable = CodeFactory::StoreOwnIC(isolate());
ReplaceWithBuiltinCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
@@ -481,12 +477,12 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 1);
if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kStoreGlobalICTrampoline);
} else {
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kStoreGlobalIC);
@@ -591,12 +587,9 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0);
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
Node* stub_arity = jsgraph()->Int32Constant(arity);
- MaybeHandle<AllocationSite> const maybe_site = p.site();
- Handle<AllocationSite> site;
- DCHECK_IMPLIES(broker()->is_native_context_independent(),
- maybe_site.is_null());
- Node* type_info = maybe_site.ToHandle(&site) ? jsgraph()->HeapConstant(site)
- : jsgraph()->UndefinedConstant();
+ base::Optional<AllocationSiteRef> const site = p.site(broker());
+ Node* type_info = site.has_value() ? jsgraph()->Constant(site.value())
+ : jsgraph()->UndefinedConstant();
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
@@ -640,9 +633,9 @@ void JSGenericLowering::LowerJSRegExpTest(Node* node) {
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
JSCreateClosureNode n(node);
CreateClosureParameters const& p = n.Parameters();
- Handle<SharedFunctionInfo> const shared_info = p.shared_info();
+ SharedFunctionInfoRef shared_info = p.shared_info(broker());
STATIC_ASSERT(n.FeedbackCellIndex() == 0);
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(shared_info));
node->RemoveInput(4); // control
// Use the FastNewClosure builtin only for functions allocated in new space.
@@ -656,7 +649,7 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
- Handle<ScopeInfo> scope_info = parameters.scope_info();
+ ScopeInfoRef scope_info = parameters.scope_info(broker());
int slot_count = parameters.slot_count();
ScopeType scope_type = parameters.scope_type();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -664,11 +657,11 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
Callable callable =
CodeFactory::FastNewFunctionContext(isolate(), scope_type);
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info));
node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
ReplaceWithBuiltinCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info));
ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
}
}
@@ -704,7 +697,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 0);
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker())));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
// Use the CreateShallowArrayLiteral builtin only for shallow boilerplates
@@ -720,8 +713,8 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
void JSGenericLowering::LowerJSGetTemplateObject(Node* node) {
JSGetTemplateObjectNode n(node);
GetTemplateObjectParameters const& p = n.Parameters();
- SharedFunctionInfoRef shared = MakeRef(broker(), p.shared());
- TemplateObjectDescriptionRef description = MakeRef(broker(), p.description());
+ SharedFunctionInfoRef shared = p.shared(broker());
+ TemplateObjectDescriptionRef description = p.description(broker());
DCHECK_EQ(node->op()->ControlInputCount(), 1);
node->RemoveInput(NodeProperties::FirstControlIndex(node));
@@ -755,7 +748,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 0);
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker())));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
// Use the CreateShallowObjectLiteratal builtin only for shallow boilerplates
@@ -789,47 +782,30 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
STATIC_ASSERT(n.FeedbackVectorIndex() == 0);
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker())));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithBuiltinCall(node, Builtin::kCreateRegExpLiteral);
}
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
+ node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info));
ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
}
void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
- Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
+ node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info));
ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
- Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
+ ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
+ node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info));
ReplaceWithRuntimeCall(node, Runtime::kPushBlockContext);
}
-namespace {
-
-bool CollectCallAndConstructFeedback(JSHeapBroker* broker) {
- // Call and construct feedback is a special case. Besides shape feedback, we
- // also increment the call count, which is later used to make inlining
- // decisions. The call count is only comparable/reliable if it is incremented
- // for all calls inside a function. This is not the case in default turbofan
- // mode, in which many calls may be inlined and will thus never reach generic
- // lowering (where we insert the feedback-collecting builtin call).
- // Therefore it should only be collected in native context independent code,
- // where we 1. know every call will reach generic lowering, and 2. we must
- // collect full feedback to properly tier up later.
- return broker->is_native_context_independent();
-}
-
-} // namespace
-
// TODO(jgruber,v8:8888): Should this collect feedback?
void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
ConstructForwardVarargsParameters p =
@@ -861,57 +837,22 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
static constexpr int kReceiver = 1;
- static constexpr int kMaybeFeedbackVector = 1;
- if (CollectFeedbackInGenericLowering() &&
- CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) {
- const int stack_argument_count =
- arg_count + kReceiver + kMaybeFeedbackVector;
- Callable callable =
- Builtins::CallableFor(isolate(), Builtin::kConstruct_WithFeedback);
- // If this fails, we might need to update the parameter reordering code
- // to ensure that the additional arguments passed via stack are pushed
- // between top of stack and JS arguments.
- DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
- kMaybeFeedbackVector);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
- Node* receiver = jsgraph()->UndefinedConstant();
- Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
- // Register argument inputs are followed by stack argument inputs (such as
- // feedback_vector). Both are listed in ascending order. Note that
- // the receiver is implicitly placed on the stack and is thus inserted
- // between explicitly-specified register and stack arguments.
- // TODO(jgruber): Implement a simpler way to specify these mutations.
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, slot);
- node->InsertInput(zone(), 5, feedback_vector);
- node->InsertInput(zone(), 6, receiver);
- // After: {code, target, new_target, arity, slot, vector, receiver,
- // ...args}.
-
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else {
- const int stack_argument_count = arg_count + kReceiver;
- Callable callable = Builtins::CallableFor(isolate(), Builtin::kConstruct);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* receiver = jsgraph()->UndefinedConstant();
- node->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, receiver);
+ const int stack_argument_count = arg_count + kReceiver;
+ Callable callable = Builtins::CallableFor(isolate(), Builtin::kConstruct);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(n.FeedbackVectorIndex());
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, receiver);
- // After: {code, target, new_target, arity, receiver, ...args}.
+ // After: {code, target, new_target, arity, receiver, ...args}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- }
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
@@ -923,58 +864,25 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
static constexpr int kReceiver = 1;
static constexpr int kArgumentList = 1;
- static constexpr int kMaybeFeedbackVector = 1;
-
- if (CollectFeedbackInGenericLowering() &&
- CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) {
- const int stack_argument_count =
- arg_count - kArgumentList + kReceiver + kMaybeFeedbackVector;
- Callable callable = Builtins::CallableFor(
- isolate(), Builtin::kConstructWithArrayLike_WithFeedback);
- // If this fails, we might need to update the parameter reordering code
- // to ensure that the additional arguments passed via stack are pushed
- // between top of stack and JS arguments.
- DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
- kMaybeFeedbackVector);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = jsgraph()->UndefinedConstant();
- Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
- Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
- // Register argument inputs are followed by stack argument inputs (such as
- // feedback_vector). Both are listed in ascending order. Note that
- // the receiver is implicitly placed on the stack and is thus inserted
- // between explicitly-specified register and stack arguments.
- // TODO(jgruber): Implement a simpler way to specify these mutations.
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 4, slot);
- node->InsertInput(zone(), 5, feedback_vector);
- node->InsertInput(zone(), 6, receiver);
- // After: {code, target, new_target, arguments_list, slot, vector,
- // receiver}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else {
- const int stack_argument_count = arg_count - kArgumentList + kReceiver;
- Callable callable =
- Builtins::CallableFor(isolate(), Builtin::kConstructWithArrayLike);
- // If this fails, we might need to update the parameter reordering code
- // to ensure that the additional arguments passed via stack are pushed
- // between top of stack and JS arguments.
- DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = jsgraph()->UndefinedConstant();
- node->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 4, receiver);
+ const int stack_argument_count = arg_count - kArgumentList + kReceiver;
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtin::kConstructWithArrayLike);
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(n.FeedbackVectorIndex());
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 4, receiver);
- // After: {code, target, new_target, arguments_list, receiver}.
+ // After: {code, target, new_target, arguments_list, receiver}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- }
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
@@ -986,80 +894,34 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
static constexpr int kReceiver = 1;
static constexpr int kTheSpread = 1; // Included in `arg_count`.
- static constexpr int kMaybeFeedbackVector = 1;
-
- if (CollectFeedbackInGenericLowering() &&
- CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) {
- const int stack_argument_count =
- arg_count + kReceiver + kMaybeFeedbackVector;
- Callable callable = Builtins::CallableFor(
- isolate(), Builtin::kConstructWithSpread_WithFeedback);
- // If this fails, we might need to update the parameter reordering code
- // to ensure that the additional arguments passed via stack are pushed
- // between top of stack and JS arguments.
- DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
- kTheSpread + kMaybeFeedbackVector);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
-
- // The single available register is needed for `slot`, thus `spread` remains
- // on the stack here.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
- Node* receiver = jsgraph()->UndefinedConstant();
- Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
- Node* spread = node->RemoveInput(n.LastArgumentIndex());
-
- // Register argument inputs are followed by stack argument inputs (such as
- // feedback_vector). Both are listed in ascending order. Note that
- // the receiver is implicitly placed on the stack and is thus inserted
- // between explicitly-specified register and stack arguments.
- // TODO(jgruber): Implement a simpler way to specify these mutations.
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, slot);
- // Arguments in the stack should be inserted in reversed order, ie, the last
- // arguments defined in the interface descriptor should be inserted first.
- DCHECK_EQ(callable.descriptor().GetStackArgumentOrder(),
- StackArgumentOrder::kJS);
- node->InsertInput(zone(), 5, feedback_vector);
- node->InsertInput(zone(), 6, spread);
- node->InsertInput(zone(), 7, receiver);
- // After: {code, target, new_target, arity, slot, vector, spread, receiver,
- // ...args}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else {
- const int stack_argument_count = arg_count + kReceiver - kTheSpread;
- Callable callable = CodeFactory::ConstructWithSpread(isolate());
- // If this fails, we might need to update the parameter reordering code
- // to ensure that the additional arguments passed via stack are pushed
- // between top of stack and JS arguments.
- DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ const int stack_argument_count = arg_count + kReceiver - kTheSpread;
+ Callable callable = CodeFactory::ConstructWithSpread(isolate());
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
- // We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
- Node* receiver = jsgraph()->UndefinedConstant();
- DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex());
- node->RemoveInput(n.FeedbackVectorIndex());
- Node* spread = node->RemoveInput(n.LastArgumentIndex());
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex());
+ node->RemoveInput(n.FeedbackVectorIndex());
+ Node* spread = node->RemoveInput(n.LastArgumentIndex());
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, spread);
- node->InsertInput(zone(), 5, receiver);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, spread);
+ node->InsertInput(zone(), 5, receiver);
- // After: {code, target, new_target, arity, spread, receiver, ...args}.
+ // After: {code, target, new_target, arity, spread, receiver, ...args}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- }
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
-// TODO(jgruber,v8:8888): Should this collect feedback?
void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
@@ -1082,34 +944,17 @@ void JSGenericLowering::LowerJSCall(Node* node) {
int const arg_count = p.arity_without_implicit_args();
ConvertReceiverMode const mode = p.convert_mode();
- Node* feedback_vector = n.feedback_vector();
node->RemoveInput(n.FeedbackVectorIndex());
- if (CollectFeedbackInGenericLowering() &&
- CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) {
- Callable callable = CodeFactory::Call_WithFeedback(isolate(), mode);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count + 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
- node->InsertInput(zone(), 3, slot);
- node->InsertInput(zone(), 4, feedback_vector);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else {
- Callable callable = CodeFactory::Call(isolate(), mode);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count + 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- }
+ Callable callable = CodeFactory::Call(isolate(), mode);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
@@ -1122,55 +967,25 @@ void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
static constexpr int kArgumentsList = 1;
static constexpr int kReceiver = 1;
- if (CollectFeedbackInGenericLowering() &&
- CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) {
- const int stack_argument_count = arg_count - kArgumentsList + kReceiver;
- Callable callable = Builtins::CallableFor(
- isolate(), Builtin::kCallWithArrayLike_WithFeedback);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = n.receiver();
- Node* arguments_list = n.Argument(0);
- Node* feedback_vector = n.feedback_vector();
- Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
-
- // Shuffling inputs.
- // Before: {target, receiver, arguments_list, vector}.
-
- node->ReplaceInput(1, arguments_list);
- node->ReplaceInput(2, feedback_vector);
- node->ReplaceInput(3, receiver);
-
- // Now: {target, arguments_list, vector, receiver}.
-
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 3, slot);
-
- // After: {code, target, arguments_list, slot, vector, receiver}.
-
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else {
- const int stack_argument_count = arg_count - kArgumentsList + kReceiver;
- Callable callable = CodeFactory::CallWithArrayLike(isolate());
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = n.receiver();
- Node* arguments_list = n.Argument(0);
+ const int stack_argument_count = arg_count - kArgumentsList + kReceiver;
+ Callable callable = CodeFactory::CallWithArrayLike(isolate());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = n.receiver();
+ Node* arguments_list = n.Argument(0);
- // Shuffling inputs.
- // Before: {target, receiver, arguments_list, vector}.
+ // Shuffling inputs.
+ // Before: {target, receiver, arguments_list, vector}.
- node->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 0, stub_code);
- node->ReplaceInput(2, arguments_list);
- node->ReplaceInput(3, receiver);
+ node->RemoveInput(n.FeedbackVectorIndex());
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, arguments_list);
+ node->ReplaceInput(3, receiver);
- // After: {code, target, arguments_list, receiver}.
+ // After: {code, target, arguments_list, receiver}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- }
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
@@ -1182,73 +997,33 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
static constexpr int kReceiver = 1;
static constexpr int kTheSpread = 1;
- static constexpr int kMaybeFeedbackVector = 1;
-
- if (CollectFeedbackInGenericLowering() &&
- CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) {
- const int stack_argument_count =
- arg_count - kTheSpread + kReceiver + kMaybeFeedbackVector;
- Callable callable =
- Builtins::CallableFor(isolate(), Builtin::kCallWithSpread_WithFeedback);
- // If this fails, we might need to update the parameter reordering code
- // to ensure that the additional arguments passed via stack are pushed
- // between top of stack and JS arguments.
- DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
- kMaybeFeedbackVector);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
-
- // We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
-
- // Register argument inputs are followed by stack argument inputs (such as
- // feedback_vector). Both are listed in ascending order. Note that
- // the receiver is implicitly placed on the stack and is thus inserted
- // between explicitly-specified register and stack arguments.
- // TODO(jgruber): Implement a simpler way to specify these mutations.
-
- // Shuffling inputs.
- // Before: {target, receiver, ...args, spread, vector}.
- Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
- Node* spread = node->RemoveInput(n.LastArgumentIndex());
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
- node->InsertInput(zone(), 3, spread);
- node->InsertInput(zone(), 4, slot);
- node->InsertInput(zone(), 5, feedback_vector);
- // After: {code, target, arity, spread, slot, vector, receiver, ...args}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else {
- const int stack_argument_count = arg_count - kTheSpread + kReceiver;
- Callable callable = CodeFactory::CallWithSpread(isolate());
- // If this fails, we might need to update the parameter reordering code
- // to ensure that the additional arguments passed via stack are pushed
- // between top of stack and JS arguments.
- DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), stack_argument_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ const int stack_argument_count = arg_count - kTheSpread + kReceiver;
+ Callable callable = CodeFactory::CallWithSpread(isolate());
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
- // We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
- // Shuffling inputs.
- // Before: {target, receiver, ...args, spread, vector}.
+ // Shuffling inputs.
+ // Before: {target, receiver, ...args, spread, vector}.
- node->RemoveInput(n.FeedbackVectorIndex());
- Node* spread = node->RemoveInput(n.LastArgumentIndex());
+ node->RemoveInput(n.FeedbackVectorIndex());
+ Node* spread = node->RemoveInput(n.LastArgumentIndex());
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
- node->InsertInput(zone(), 3, spread);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, spread);
- // After: {code, target, arity, spread, receiver, ...args}.
+ // After: {code, target, arity, spread, receiver, ...args}.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- }
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 024e4f147f..dc34bcae6d 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -19,6 +19,7 @@
#include "src/objects/feedback-cell.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell.h"
@@ -54,8 +55,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
feedback_(zone()),
property_access_infos_(zone()),
minimorphic_property_access_infos_(zone()),
- typed_array_string_tags_(zone()),
- serialized_functions_(zone()) {
+ typed_array_string_tags_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
@@ -124,10 +124,6 @@ void JSHeapBroker::Retire() {
CHECK_EQ(mode_, kSerialized);
TRACE(this, "Retiring");
mode_ = kRetired;
-
-#ifdef DEBUG
- PrintRefsAnalysis();
-#endif // DEBUG
}
void JSHeapBroker::SetTargetNativeContextRef(
@@ -170,40 +166,6 @@ StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) {
}
}
-bool JSHeapBroker::ShouldBeSerializedForCompilation(
- const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback,
- const HintsVector& arguments) const {
- if (serialized_functions_.size() >= kMaxSerializedFunctionsCacheSize) {
- TRACE_BROKER_MISSING(this,
- "opportunity - serialized functions cache is full.");
- return false;
- }
- SerializedFunction function{shared, feedback};
- auto matching_functions = serialized_functions_.equal_range(function);
- return std::find_if(matching_functions.first, matching_functions.second,
- [&arguments](const auto& entry) {
- return entry.second == arguments;
- }) == matching_functions.second;
-}
-
-void JSHeapBroker::SetSerializedForCompilation(
- const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback,
- const HintsVector& arguments) {
- SerializedFunction function{shared, feedback};
- serialized_functions_.insert({function, arguments});
- TRACE(this, "Set function " << shared << " with " << feedback
- << " as serialized for compilation");
-}
-
-bool JSHeapBroker::IsSerializedForCompilation(
- const SharedFunctionInfoRef& shared,
- const FeedbackVectorRef& feedback) const {
- if (mode() == kDisabled) return true;
-
- SerializedFunction function = {shared, feedback};
- return serialized_functions_.find(function) != serialized_functions_.end();
-}
-
bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const {
return IsArrayOrObjectPrototype(object.object());
}
@@ -285,36 +247,37 @@ ElementAccessFeedback::transition_groups() const {
}
ElementAccessFeedback const& ElementAccessFeedback::Refine(
- ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const {
+ JSHeapBroker* broker, ZoneVector<MapRef> const& inferred_maps) const {
ElementAccessFeedback& refined_feedback =
- *zone->New<ElementAccessFeedback>(zone, keyed_mode(), slot_kind());
+ *broker->zone()->New<ElementAccessFeedback>(broker->zone(), keyed_mode(),
+ slot_kind());
if (inferred_maps.empty()) return refined_feedback;
- ZoneUnorderedSet<Handle<Map>, Handle<Map>::hash, Handle<Map>::equal_to>
- inferred(zone);
+ ZoneRefUnorderedSet<MapRef> inferred(broker->zone());
inferred.insert(inferred_maps.begin(), inferred_maps.end());
for (auto const& group : transition_groups()) {
DCHECK(!group.empty());
- TransitionGroup new_group(zone);
+ TransitionGroup new_group(broker->zone());
for (size_t i = 1; i < group.size(); ++i) {
- Handle<Map> source = group[i];
+ MapRef source = MakeRefAssumeMemoryFence(broker, *group[i]);
if (inferred.find(source) != inferred.end()) {
- new_group.push_back(source);
+ new_group.push_back(source.object());
}
}
- Handle<Map> target = group.front();
+ MapRef target = MakeRefAssumeMemoryFence(broker, *group.front());
bool const keep_target =
inferred.find(target) != inferred.end() || new_group.size() > 1;
if (keep_target) {
- new_group.push_back(target);
+ new_group.push_back(target.object());
// The target must be at the front, the order of sources doesn't matter.
std::swap(new_group[0], new_group[new_group.size() - 1]);
}
if (!new_group.empty()) {
- DCHECK(new_group.size() == 1 || new_group.front().equals(target));
+ DCHECK(new_group.size() == 1 ||
+ new_group.front().equals(target.object()));
refined_feedback.transition_groups_.push_back(std::move(new_group));
}
}
@@ -378,8 +341,8 @@ bool GlobalAccessFeedback::immutable() const {
base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
if (IsPropertyCell()) {
- bool cell_serialized = property_cell().Serialize();
- CHECK(cell_serialized); // Can't fail on the main thread.
+ bool cell_cached = property_cell().Cache();
+ CHECK(cell_cached); // Can't fail on the main thread.
return property_cell().value();
} else if (IsScriptContextSlot() && immutable()) {
return script_context().get(slot_index());
@@ -468,7 +431,7 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
- ZoneVector<Handle<Map>> const& maps, bool has_migration_target_maps)
+ ZoneVector<MapRef> const& maps, bool has_migration_target_maps)
: ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
name_(name),
handler_(handler),
@@ -478,7 +441,7 @@ MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
}
NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
- ZoneVector<Handle<Map>> const& maps,
+ ZoneVector<MapRef> const& maps,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) {
DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) ||
@@ -510,46 +473,24 @@ ProcessedFeedback const& JSHeapBroker::GetFeedback(
FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind(
FeedbackSource const& source) const {
- if (is_concurrent_inlining_) {
- ProcessedFeedback const& processed = GetFeedback(source);
- return processed.slot_kind();
- }
+ if (HasFeedback(source)) return GetFeedback(source).slot_kind();
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
return nexus.kind();
}
bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
- return is_concurrent_inlining_ ? GetFeedback(source).IsInsufficient()
- : FeedbackNexus(source.vector, source.slot,
- feedback_nexus_config())
- .IsUninitialized();
+ if (HasFeedback(source)) return GetFeedback(source).IsInsufficient();
+ return FeedbackNexus(source.vector, source.slot, feedback_nexus_config())
+ .IsUninitialized();
}
namespace {
-// Update deprecated maps, drop unupdatable ones and abandoned prototype maps.
-void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
- auto in = maps->begin();
- auto out = in;
- auto end = maps->end();
-
- for (; in != end; ++in) {
- Handle<Map> map = *in;
- if (Map::TryUpdate(isolate, map).ToHandle(&map) &&
- !map->is_abandoned_prototype_map()) {
- DCHECK(!map->is_deprecated());
- *out = map;
- ++out;
- }
- }
-
- // Remove everything between the last valid map and the end of the vector.
- maps->erase(out, end);
-}
-
+using MapRefAndHandler = std::pair<MapRef, MaybeObjectHandle>;
MaybeObjectHandle TryGetMinimorphicHandler(
- std::vector<MapAndHandler> const& maps_and_handlers, FeedbackSlotKind kind,
- Handle<NativeContext> native_context, bool is_turboprop) {
+ ZoneVector<MapRefAndHandler> const& maps_and_handlers,
+ FeedbackSlotKind kind, NativeContextRef const& native_context,
+ bool is_turboprop) {
if (!is_turboprop || !FLAG_turbo_dynamic_map_checks || !IsLoadICKind(kind)) {
return MaybeObjectHandle();
}
@@ -560,14 +501,14 @@ MaybeObjectHandle TryGetMinimorphicHandler(
// polymorphic loads currently we don't inline the builtins even without
// dynamic map checks.
if (maps_and_handlers.size() == 1 &&
- *maps_and_handlers[0].first ==
- native_context->initial_array_prototype().map()) {
+ maps_and_handlers[0].first.equals(
+ native_context.initial_array_prototype().map())) {
return MaybeObjectHandle();
}
MaybeObjectHandle initial_handler;
- for (MapAndHandler map_and_handler : maps_and_handlers) {
- auto map = map_and_handler.first;
+ for (const MapRefAndHandler& map_and_handler : maps_and_handlers) {
+ MapRef map = map_and_handler.first;
MaybeObjectHandle handler = map_and_handler.second;
if (handler.is_null()) return MaybeObjectHandle();
DCHECK(!handler->IsCleared());
@@ -577,7 +518,7 @@ MaybeObjectHandle TryGetMinimorphicHandler(
LoadHandler::Kind::kField) {
return MaybeObjectHandle();
}
- CHECK(!map->IsJSGlobalProxyMap());
+ CHECK(!map.object()->IsJSGlobalProxyMap());
if (initial_handler.is_null()) {
initial_handler = handler;
} else if (!handler.is_identical_to(initial_handler)) {
@@ -587,21 +528,15 @@ MaybeObjectHandle TryGetMinimorphicHandler(
return initial_handler;
}
-bool HasMigrationTargets(const MapHandles& maps) {
- for (Handle<Map> map : maps) {
- if (map->is_migration_target()) return true;
+bool HasMigrationTargets(const ZoneVector<MapRef>& maps) {
+ for (const MapRef& map : maps) {
+ if (map.is_migration_target()) return true;
}
return false;
}
} // namespace
-bool JSHeapBroker::CanUseFeedback(const FeedbackNexus& nexus) const {
- // TODO(jgruber,v8:8888): Currently, nci code does not use any
- // feedback. This restriction will be relaxed in the future.
- return !is_native_context_independent() && !nexus.IsUninitialized();
-}
-
const ProcessedFeedback& JSHeapBroker::NewInsufficientFeedback(
FeedbackSlotKind kind) const {
return *zone()->New<InsufficientFeedback>(kind);
@@ -612,29 +547,45 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
base::Optional<NameRef> static_name) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
FeedbackSlotKind kind = nexus.kind();
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(kind);
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(kind);
- std::vector<MapAndHandler> maps_and_handlers;
- nexus.ExtractMapsAndFeedback(&maps_and_handlers);
- MapHandles maps;
- for (auto const& entry : maps_and_handlers) {
- maps.push_back(entry.first);
+ ZoneVector<MapRefAndHandler> maps_and_handlers(zone());
+ ZoneVector<MapRef> maps(zone());
+ {
+ std::vector<MapAndHandler> maps_and_handlers_unfiltered;
+ nexus.ExtractMapsAndFeedback(&maps_and_handlers_unfiltered);
+
+ for (const MapAndHandler& map_and_handler : maps_and_handlers_unfiltered) {
+ MapRef map = MakeRefAssumeMemoryFence(this, *map_and_handler.first);
+ // May change concurrently at any time - must be guarded by a dependency
+ // if non-deprecation is important.
+ if (map.is_deprecated()) {
+ // TODO(ishell): support fast map updating if we enable it.
+ CHECK(!FLAG_fast_map_update);
+ base::Optional<Map> maybe_map = MapUpdater::TryUpdateNoLock(
+ isolate(), *map.object(), ConcurrencyMode::kConcurrent);
+ if (maybe_map.has_value()) {
+ map = MakeRefAssumeMemoryFence(this, maybe_map.value());
+ } else {
+ continue; // Couldn't update the deprecated map.
+ }
+ }
+ if (map.is_abandoned_prototype_map()) continue;
+ maps_and_handlers.push_back({map, map_and_handler.second});
+ maps.push_back(map);
+ }
}
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
MaybeObjectHandle handler = TryGetMinimorphicHandler(
- maps_and_handlers, kind, target_native_context().object(),
- is_turboprop());
+ maps_and_handlers, kind, target_native_context(), is_turboprop());
if (!handler.is_null()) {
return *zone()->New<MinimorphicLoadPropertyAccessFeedback>(
- *name, kind, handler.object(),
- ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()),
+ *name, kind, CanonicalPersistentHandle(handler.object()), maps,
HasMigrationTargets(maps));
}
- FilterRelevantReceiverMaps(isolate(), &maps);
-
// If no maps were found for a non-megamorphic access, then our maps died
// and we should soft-deopt.
if (maps.empty() && nexus.ic_state() != MEGAMORPHIC) {
@@ -644,8 +595,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
if (name.has_value()) {
// We rely on this invariant in JSGenericLowering.
DCHECK_IMPLIES(maps.empty(), nexus.ic_state() == MEGAMORPHIC);
- return *zone()->New<NamedAccessFeedback>(
- *name, ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), kind);
+ return *zone()->New<NamedAccessFeedback>(*name, maps, kind);
} else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) {
return ProcessFeedbackMapsForElementAccess(
maps, KeyedAccessMode::FromNexus(nexus), kind);
@@ -661,58 +611,52 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof ||
nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict);
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) {
return *zone()->New<GlobalAccessFeedback>(nexus.kind());
}
- Handle<Object> feedback_value(nexus.GetFeedback()->GetHeapObjectOrSmi(),
- isolate());
+ Handle<Object> feedback_value =
+ CanonicalPersistentHandle(nexus.GetFeedback()->GetHeapObjectOrSmi());
if (feedback_value->IsSmi()) {
// The wanted name belongs to a script-scope variable and the feedback
// tells us where to find its value.
- int number = feedback_value->Number();
+ int const number = feedback_value->Number();
int const script_context_index =
FeedbackNexus::ContextIndexBits::decode(number);
int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number);
- bool const immutable = FeedbackNexus::ImmutabilityBit::decode(number);
- Handle<Context> context = ScriptContextTable::GetContext(
- isolate(), target_native_context().script_context_table().object(),
- script_context_index);
- {
- ObjectRef contents =
- MakeRef(this, handle(context->get(context_slot_index), isolate()));
- CHECK(!contents.equals(
- MakeRef<Object>(this, isolate()->factory()->the_hole_value())));
- }
- ContextRef context_ref = MakeRef(this, context);
- if (immutable) {
- context_ref.get(context_slot_index);
- }
- return *zone()->New<GlobalAccessFeedback>(context_ref, context_slot_index,
- immutable, nexus.kind());
+ ContextRef context = MakeRefAssumeMemoryFence(
+ this,
+ target_native_context().script_context_table().object()->get_context(
+ script_context_index, kAcquireLoad));
+
+ base::Optional<ObjectRef> contents = context.get(context_slot_index);
+ if (contents.has_value()) CHECK(!contents->IsTheHole());
+
+ return *zone()->New<GlobalAccessFeedback>(
+ context, context_slot_index,
+ FeedbackNexus::ImmutabilityBit::decode(number), nexus.kind());
}
CHECK(feedback_value->IsPropertyCell());
// The wanted name belongs (or did belong) to a property on the global
// object and the feedback is the cell holding its value.
- PropertyCellRef cell =
- MakeRef(this, Handle<PropertyCell>::cast(feedback_value));
- MakeRef(this,
- Handle<PropertyCell>::cast(feedback_value)->value(kAcquireLoad));
- return *zone()->New<GlobalAccessFeedback>(cell, nexus.kind());
+ return *zone()->New<GlobalAccessFeedback>(
+ MakeRefAssumeMemoryFence(this,
+ Handle<PropertyCell>::cast(feedback_value)),
+ nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
FeedbackSource const& source) const {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
DCHECK_NE(hint, BinaryOperationHint::kNone); // Not uninitialized.
return *zone()->New<BinaryOperationFeedback>(hint, nexus.kind());
@@ -721,7 +665,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation(
FeedbackSource const& source) const {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
CompareOperationHint hint = nexus.GetCompareOperationFeedback();
DCHECK_NE(hint, CompareOperationHint::kNone); // Not uninitialized.
return *zone()->New<CompareOperationFeedback>(hint, nexus.kind());
@@ -730,7 +674,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn(
FeedbackSource const& source) const {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
ForInHint hint = nexus.GetForInFeedback();
DCHECK_NE(hint, ForInHint::kNone); // Not uninitialized.
return *zone()->New<ForInFeedback>(hint, nexus.kind());
@@ -739,14 +683,14 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
base::Optional<JSObjectRef> optional_constructor;
{
MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback();
Handle<JSObject> constructor;
if (maybe_constructor.ToHandle(&constructor)) {
- optional_constructor = MakeRef(this, constructor);
+ optional_constructor = MakeRefAssumeMemoryFence(this, *constructor);
}
}
return *zone()->New<InstanceOfFeedback>(optional_constructor, nexus.kind());
@@ -755,63 +699,67 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
if (!nexus.GetFeedback()->GetHeapObject(&object)) {
return NewInsufficientFeedback(nexus.kind());
}
- AllocationSiteRef site = MakeRef(this, AllocationSite::cast(object));
- if (site.PointsToLiteral()) site.SerializeRecursive();
+ AllocationSiteRef site =
+ MakeRefAssumeMemoryFence(this, AllocationSite::cast(object));
+ if (!is_concurrent_inlining() && site.PointsToLiteral()) {
+ site.SerializeRecursive(NotConcurrentInliningTag{this});
+ }
return *zone()->New<LiteralFeedback>(site, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
if (!nexus.GetFeedback()->GetHeapObject(&object)) {
return NewInsufficientFeedback(nexus.kind());
}
- RegExpBoilerplateDescriptionRef boilerplate = MakeRef(
- this, handle(RegExpBoilerplateDescription::cast(object), isolate()));
- boilerplate.Serialize();
+ RegExpBoilerplateDescriptionRef boilerplate = MakeRefAssumeMemoryFence(
+ this, RegExpBoilerplateDescription::cast(object));
+ if (!is_concurrent_inlining()) {
+ boilerplate.Serialize(NotConcurrentInliningTag{this});
+ }
return *zone()->New<RegExpLiteralFeedback>(boilerplate, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
if (!nexus.GetFeedback()->GetHeapObject(&object)) {
return NewInsufficientFeedback(nexus.kind());
}
- JSArrayRef array = MakeRef(this, handle(JSArray::cast(object), isolate()));
+ JSArrayRef array = MakeRefAssumeMemoryFence(this, JSArray::cast(object));
return *zone()->New<TemplateObjectFeedback>(array, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
- if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
+ if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
base::Optional<HeapObjectRef> target_ref;
{
MaybeObject maybe_target = nexus.GetFeedback();
HeapObject target_object;
if (maybe_target->GetHeapObject(&target_object)) {
- // TryMakeRef is used because the GC predicate may fail if the
- // JSFunction was allocated too recently to be store-ordered.
- target_ref = TryMakeRef(this, handle(target_object, isolate()));
+ target_ref = MakeRefAssumeMemoryFence(this, target_object);
}
}
+
float frequency = nexus.ComputeCallFrequency();
SpeculationMode mode = nexus.GetSpeculationMode();
CallFeedbackContent content = nexus.GetCallFeedbackContent();
@@ -821,9 +769,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
FeedbackSource const& source) {
- ProcessedFeedback const& feedback =
- is_concurrent_inlining_ ? GetFeedback(source)
- : ProcessFeedbackForBinaryOperation(source);
+ ProcessedFeedback const& feedback = ProcessFeedbackForBinaryOperation(source);
return feedback.IsInsufficient() ? BinaryOperationHint::kNone
: feedback.AsBinaryOperation().value();
}
@@ -831,67 +777,19 @@ BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation(
FeedbackSource const& source) {
ProcessedFeedback const& feedback =
- is_concurrent_inlining_ ? GetFeedback(source)
- : ProcessFeedbackForCompareOperation(source);
+ ProcessFeedbackForCompareOperation(source);
return feedback.IsInsufficient() ? CompareOperationHint::kNone
: feedback.AsCompareOperation().value();
}
ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) {
- ProcessedFeedback const& feedback = is_concurrent_inlining_
- ? GetFeedback(source)
- : ProcessFeedbackForForIn(source);
+ ProcessedFeedback const& feedback = ProcessFeedbackForForIn(source);
return feedback.IsInsufficient() ? ForInHint::kNone
: feedback.AsForIn().value();
}
-ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
- FeedbackSource const& source, AccessMode mode,
- base::Optional<NameRef> static_name) {
- return is_concurrent_inlining_
- ? GetFeedback(source)
- : ProcessFeedbackForPropertyAccess(source, mode, static_name);
-}
-
-ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf(
- FeedbackSource const& source) {
- return is_concurrent_inlining_ ? GetFeedback(source)
- : ProcessFeedbackForInstanceOf(source);
-}
-
-ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
- FeedbackSource const& source) {
- return is_concurrent_inlining_ ? GetFeedback(source)
- : ProcessFeedbackForCall(source);
-}
-
-ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
- FeedbackSource const& source) {
- return is_concurrent_inlining_ ? GetFeedback(source)
- : ProcessFeedbackForGlobalAccess(source);
-}
-
ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
- return is_concurrent_inlining_
- ? GetFeedback(source)
- : ProcessFeedbackForArrayOrObjectLiteral(source);
-}
-
-ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
- FeedbackSource const& source) {
- return is_concurrent_inlining_ ? GetFeedback(source)
- : ProcessFeedbackForRegExpLiteral(source);
-}
-
-ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
- FeedbackSource const& source) {
- return is_concurrent_inlining_ ? GetFeedback(source)
- : ProcessFeedbackForTemplateObject(source);
-}
-
-ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
- FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback =
ReadFeedbackForArrayOrObjectLiteral(source);
@@ -899,7 +797,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
return feedback;
}
-ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral(
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForRegExpLiteral(source);
@@ -907,7 +805,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral(
return feedback;
}
-ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForTemplateObject(
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForTemplateObject(source);
@@ -939,7 +837,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForForIn(
return feedback;
}
-ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess(
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
if (HasFeedback(source)) return GetFeedback(source);
@@ -949,7 +847,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess(
return feedback;
}
-ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf(
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForInstanceOf(source);
@@ -957,7 +855,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf(
return feedback;
}
-ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall(
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForCall(source);
@@ -965,7 +863,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall(
return feedback;
}
-ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess(
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(source);
@@ -974,21 +872,22 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess(
}
ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps, KeyedAccessMode const& keyed_mode,
+ ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind) {
DCHECK(!maps.empty());
// Collect possible transition targets.
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
- for (Handle<Map> map : maps) {
- MapRef map_ref = MakeRef(this, map);
- map_ref.SerializeRootMap();
-
- if (CanInlineElementAccess(map_ref) &&
- IsFastElementsKind(map->elements_kind()) &&
- GetInitialFastElementsKind() != map->elements_kind()) {
- possible_transition_targets.push_back(map);
+ for (MapRef& map : maps) {
+ if (!is_concurrent_inlining()) {
+ map.SerializeRootMap(NotConcurrentInliningTag{this});
+ }
+
+ if (CanInlineElementAccess(map) &&
+ IsFastElementsKind(map.elements_kind()) &&
+ GetInitialFastElementsKind() != map.elements_kind()) {
+ possible_transition_targets.push_back(map.object());
}
}
@@ -1001,21 +900,28 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
ZoneMap<Handle<Map>, TransitionGroup, HandleLess> transition_groups(zone());
// Separate the actual receiver maps and the possible transition sources.
- for (Handle<Map> map : maps) {
+ for (const MapRef& map : maps) {
+ Map transition_target;
+
// Don't generate elements kind transitions from stable maps.
- Map transition_target = map->is_stable()
- ? Map()
- : map->FindElementsKindTransitionedMap(
- isolate(), possible_transition_targets);
+ if (!map.is_stable()) {
+ // The lock is needed for UnusedPropertyFields (called deep inside
+ // FindElementsKindTransitionedMap).
+ MapUpdaterGuardIfNeeded mumd_scope(this);
+
+ transition_target = map.object()->FindElementsKindTransitionedMap(
+ isolate(), possible_transition_targets, ConcurrencyMode::kConcurrent);
+ }
+
if (transition_target.is_null()) {
- TransitionGroup group(1, map, zone());
- transition_groups.insert({map, group});
+ TransitionGroup group(1, map.object(), zone());
+ transition_groups.insert({map.object(), group});
} else {
- Handle<Map> target(transition_target, isolate());
+ Handle<Map> target = CanonicalPersistentHandle(transition_target);
TransitionGroup new_group(1, target, zone());
TransitionGroup& actual_group =
transition_groups.insert({target, new_group}).first->second;
- actual_group.push_back(map);
+ actual_group.push_back(map.object());
}
}
@@ -1052,31 +958,22 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
FeedbackNexus const& nexus) {
Name raw_name = nexus.GetName();
if (raw_name.is_null()) return base::nullopt;
- return MakeRef(this, handle(raw_name, isolate()));
+ return MakeRefAssumeMemoryFence(this, raw_name);
}
PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MapRef map, NameRef name, AccessMode access_mode,
- CompilationDependencies* dependencies, SerializationPolicy policy) {
+ CompilationDependencies* dependencies) {
+ DCHECK_NOT_NULL(dependencies);
+
PropertyAccessTarget target({map, name, access_mode});
auto it = property_access_infos_.find(target);
if (it != property_access_infos_.end()) return it->second;
- if (policy == SerializationPolicy::kAssumeSerialized &&
- !FLAG_turbo_concurrent_get_property_access_info) {
- TRACE_BROKER_MISSING(this, "PropertyAccessInfo for "
- << access_mode << " of property " << name
- << " on map " << map);
- return PropertyAccessInfo::Invalid(zone());
- }
-
- CHECK_NOT_NULL(dependencies);
AccessInfoFactory factory(this, dependencies, zone());
- PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo(
- map.object(), name.object(), access_mode);
+ PropertyAccessInfo access_info =
+ factory.ComputePropertyAccessInfo(map, name, access_mode);
if (is_concurrent_inlining_) {
- CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
- mode() == kSerializing);
TRACE(this, "Storing PropertyAccessInfo for "
<< access_mode << " of property " << name << " on map "
<< map);
@@ -1087,17 +984,10 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source, SerializationPolicy policy) {
+ FeedbackSource const& source) {
auto it = minimorphic_property_access_infos_.find(source);
if (it != minimorphic_property_access_infos_.end()) return it->second;
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_BROKER_MISSING(this, "MinimorphicLoadPropertyAccessInfo for slot "
- << source.index() << " "
- << MakeRef<Object>(this, source.vector));
- return MinimorphicLoadPropertyAccessInfo::Invalid();
- }
-
AccessInfoFactory factory(this, nullptr, zone());
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index cff68af67a..91b94bebb5 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -15,7 +15,6 @@
#include "src/compiler/heap-refs.h"
#include "src/compiler/processed-feedback.h"
#include "src/compiler/refs-map.h"
-#include "src/compiler/serializer-hints.h"
#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/persistent-handles.h"
@@ -119,23 +118,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
- bool is_native_context_independent() const {
- // TODO(jgruber,v8:8888): Remove dependent code.
- return false;
- }
- bool generate_full_feedback_collection() const {
- // NCI code currently collects full feedback.
- DCHECK_IMPLIES(is_native_context_independent(),
- CollectFeedbackInGenericLowering());
- return is_native_context_independent();
- }
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
- // TODO(mvstanton): when the broker gathers feedback on the background
- // thread, this should return a local NexusConfig object which points
- // to the associated LocalHeap.
- return NexusConfig::FromMainThread(isolate());
+ return IsMainThread() ? NexusConfig::FromMainThread(isolate())
+ : NexusConfig::FromBackgroundThread(
+ isolate(), local_isolate()->heap());
}
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
@@ -183,12 +171,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool HasFeedback(FeedbackSource const& source) const;
void SetFeedback(FeedbackSource const& source,
ProcessedFeedback const* feedback);
- ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const;
FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const;
// TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps, KeyedAccessMode const& keyed_mode,
+ ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind);
// Binary, comparison and for-in hints can be fully expressed via
@@ -216,71 +203,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const& ProcessFeedbackForBinaryOperation(
FeedbackSource const& source);
- ProcessedFeedback const& ProcessFeedbackForCall(FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForCompareOperation(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForForIn(
FeedbackSource const& source);
- ProcessedFeedback const& ProcessFeedbackForGlobalAccess(
- FeedbackSource const& source);
- ProcessedFeedback const& ProcessFeedbackForInstanceOf(
- FeedbackSource const& source);
- ProcessedFeedback const& ProcessFeedbackForPropertyAccess(
- FeedbackSource const& source, AccessMode mode,
- base::Optional<NameRef> static_name);
- ProcessedFeedback const& ProcessFeedbackForArrayOrObjectLiteral(
- FeedbackSource const& source);
- ProcessedFeedback const& ProcessFeedbackForRegExpLiteral(
- FeedbackSource const& source);
- ProcessedFeedback const& ProcessFeedbackForTemplateObject(
- FeedbackSource const& source);
bool FeedbackIsInsufficient(FeedbackSource const& source) const;
base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
- // If {policy} is {kAssumeSerialized} and the broker doesn't know about the
- // combination of {map}, {name}, and {access_mode}, returns Invalid.
PropertyAccessInfo GetPropertyAccessInfo(
MapRef map, NameRef name, AccessMode access_mode,
- CompilationDependencies* dependencies = nullptr,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ CompilationDependencies* dependencies);
MinimorphicLoadPropertyAccessInfo GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- // Used to separate the problem of a concurrent GetPropertyAccessInfo (GPAI)
- // from serialization. GPAI is currently called both during the serialization
- // phase, and on the background thread. While some crucial objects (like
- // JSObject) still must be serialized, we do the following:
- // - Run GPAI during serialization to discover and serialize required objects.
- // - After the serialization phase, clear cached property access infos.
- // - On the background thread, rerun GPAI in a concurrent setting. The cache
- // has been cleared, thus the actual logic runs again.
- // Once all required object kinds no longer require serialization, this
- // should be removed together with all GPAI calls during serialization.
- void ClearCachedPropertyAccessInfos() {
- CHECK(FLAG_turbo_concurrent_get_property_access_info);
- property_access_infos_.clear();
- }
-
- // As above, clear cached ObjectData that can be reconstructed, i.e. is
- // either never-serialized or background-serialized.
- void ClearReconstructibleData();
+ FeedbackSource const& source);
StringRef GetTypedArrayStringTag(ElementsKind kind);
- bool ShouldBeSerializedForCompilation(const SharedFunctionInfoRef& shared,
- const FeedbackVectorRef& feedback,
- const HintsVector& arguments) const;
- void SetSerializedForCompilation(const SharedFunctionInfoRef& shared,
- const FeedbackVectorRef& feedback,
- const HintsVector& arguments);
- bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared,
- const FeedbackVectorRef& feedback) const;
-
bool IsMainThread() const {
return local_isolate() == nullptr || local_isolate()->is_main_thread();
}
@@ -404,13 +345,23 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool ObjectMayBeUninitialized(Object object) const;
bool ObjectMayBeUninitialized(HeapObject object) const;
+ void set_dependencies(CompilationDependencies* dependencies) {
+ DCHECK_NOT_NULL(dependencies);
+ DCHECK_NULL(dependencies_);
+ dependencies_ = dependencies;
+ }
+ CompilationDependencies* dependencies() const {
+ DCHECK_NOT_NULL(dependencies_);
+ return dependencies_;
+ }
+
private:
friend class HeapObjectRef;
friend class ObjectRef;
friend class ObjectData;
friend class PropertyCellData;
- bool CanUseFeedback(const FeedbackNexus& nexus) const;
+ ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const;
const ProcessedFeedback& NewInsufficientFeedback(FeedbackSlotKind kind) const;
// Bottleneck FeedbackNexus access here, for storage in the broker
@@ -497,21 +448,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ZoneVector<ObjectData*> typed_array_string_tags_;
- struct SerializedFunction {
- SharedFunctionInfoRef shared;
- FeedbackVectorRef feedback;
-
- bool operator<(const SerializedFunction& other) const {
- if (shared.object().address() < other.shared.object().address()) {
- return true;
- }
- if (shared.object().address() == other.shared.object().address()) {
- return feedback.object().address() < other.feedback.object().address();
- }
- return false;
- }
- };
- ZoneMultimap<SerializedFunction, HintsVector> serialized_functions_;
+ CompilationDependencies* dependencies_ = nullptr;
// The MapUpdater mutex is used in recursive patterns; for example,
// ComputePropertyAccessInfo may call itself recursively. Thus we need to
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 7c5585a4bc..5692d128a7 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -27,26 +27,16 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kCheckClosure: {
- FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(node->op()));
- base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
- if (feedback_vector.has_value()) {
- feedback_vector->Serialize();
- }
- break;
- }
case IrOpcode::kHeapConstant: {
ObjectRef object = MakeRef(broker(), HeapConstantOf(node->op()));
- if (object.IsJSFunction()) object.AsJSFunction().Serialize();
if (object.IsJSObject()) {
- object.AsJSObject().SerializeObjectCreateMap();
+ object.AsJSObject().SerializeObjectCreateMap(
+ NotConcurrentInliningTag{broker()});
}
break;
}
case IrOpcode::kJSCreateArray: {
- CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
- Handle<AllocationSite> site;
- if (p.site().ToHandle(&site)) MakeRef(broker(), site);
+ CreateArrayParametersOf(node->op()).site(broker());
break;
}
case IrOpcode::kJSCreateArguments: {
@@ -56,29 +46,29 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kJSCreateBlockContext: {
- MakeRef(broker(), ScopeInfoOf(node->op()));
+ USE(ScopeInfoOf(broker(), node->op()));
break;
}
case IrOpcode::kJSCreateBoundFunction: {
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
- MakeRef(broker(), p.map());
+ p.map(broker());
break;
}
case IrOpcode::kJSCreateCatchContext: {
- MakeRef(broker(), ScopeInfoOf(node->op()));
+ USE(ScopeInfoOf(broker(), node->op()));
break;
}
case IrOpcode::kJSCreateClosure: {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- MakeRef(broker(), p.shared_info());
- MakeRef(broker(), p.code());
+ p.shared_info(broker());
+ p.code(broker());
break;
}
case IrOpcode::kJSCreateEmptyLiteralArray: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
+ broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
@@ -90,7 +80,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
// Unary ops are treated as binary ops with respect to feedback.
- broker()->ProcessFeedbackForBinaryOperation(p.feedback());
+ broker()->GetFeedbackForBinaryOperation(p.feedback());
}
break;
}
@@ -109,7 +99,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSShiftRightLogical: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForBinaryOperation(p.feedback());
+ broker()->GetFeedbackForBinaryOperation(p.feedback());
}
break;
}
@@ -122,64 +112,64 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSStrictEqual: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForCompareOperation(p.feedback());
+ broker()->GetFeedbackForCompareOperation(p.feedback());
}
break;
}
case IrOpcode::kJSCreateFunctionContext: {
CreateFunctionContextParameters const& p =
CreateFunctionContextParametersOf(node->op());
- MakeRef(broker(), p.scope_info());
+ p.scope_info(broker());
break;
}
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject: {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
+ broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSCreateLiteralRegExp: {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForRegExpLiteral(p.feedback());
+ broker()->GetFeedbackForRegExpLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSGetTemplateObject: {
GetTemplateObjectParameters const& p =
GetTemplateObjectParametersOf(node->op());
- MakeRef(broker(), p.shared());
- MakeRef(broker(), p.description());
- broker()->ProcessFeedbackForTemplateObject(p.feedback());
+ p.shared(broker());
+ p.description(broker());
+ broker()->GetFeedbackForTemplateObject(p.feedback());
break;
}
case IrOpcode::kJSCreateWithContext: {
- MakeRef(broker(), ScopeInfoOf(node->op()));
+ USE(ScopeInfoOf(broker(), node->op()));
break;
}
case IrOpcode::kJSLoadNamed: {
NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name = MakeRef(broker(), p.name());
+ NameRef name = p.name(broker());
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
- AccessMode::kLoad, name);
+ broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
+ name);
}
break;
}
case IrOpcode::kJSLoadNamedFromSuper: {
NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name = MakeRef(broker(), p.name());
+ NameRef name = p.name(broker());
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
- AccessMode::kLoad, name);
+ broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
+ name);
}
break;
}
case IrOpcode::kJSStoreNamed: {
NamedAccess const& p = NamedAccessOf(node->op());
- MakeRef(broker(), p.name());
+ p.name(broker());
break;
}
case IrOpcode::kStoreField:
@@ -220,8 +210,8 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
PropertyAccess const& p = PropertyAccessOf(node->op());
AccessMode access_mode = AccessMode::kLoad;
if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode,
- base::nullopt);
+ broker()->GetFeedbackForPropertyAccess(p.feedback(), access_mode,
+ base::nullopt);
}
break;
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 8449a0b3d5..177f35c7a0 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -37,34 +37,21 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
}
DCHECK(shared.HasBytecodeArray());
- if (!broker->IsSerializedForCompilation(shared, feedback_vector)) {
- TRACE_BROKER_MISSING(
- broker, "data for " << shared << " (not serialized for compilation)");
- TRACE("Cannot consider " << shared << " for inlining with "
- << feedback_vector << " (missing data)");
- return false;
- }
TRACE("Considering " << shared << " for inlining with " << feedback_vector);
return true;
}
bool CanConsiderForInlining(JSHeapBroker* broker,
JSFunctionRef const& function) {
- if (!function.has_feedback_vector()) {
+ if (!function.has_feedback_vector(broker->dependencies())) {
TRACE("Cannot consider " << function
<< " for inlining (no feedback vector)");
return false;
}
- if (!function.serialized() || !function.serialized_code_and_feedback()) {
- TRACE_BROKER_MISSING(
- broker, "data for " << function << " (cannot consider for inlining)");
- TRACE("Cannot consider " << function << " for inlining (missing data)");
- return false;
- }
-
- return CanConsiderForInlining(broker, function.shared(),
- function.feedback_vector());
+ return CanConsiderForInlining(
+ broker, function.shared(),
+ function.feedback_vector(broker->dependencies()));
}
} // namespace
@@ -124,7 +111,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
JSCreateClosureNode n(callee);
CreateClosureParameters const& p = n.Parameters();
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- SharedFunctionInfoRef shared_info = MakeRef(broker(), p.shared_info());
+ SharedFunctionInfoRef shared_info = p.shared_info(broker());
out.shared_info = shared_info;
if (feedback_cell.value().has_value() &&
CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
@@ -819,6 +806,10 @@ void JSInliningHeuristic::PrintCandidates() {
Graph* JSInliningHeuristic::graph() const { return jsgraph()->graph(); }
+CompilationDependencies* JSInliningHeuristic::dependencies() const {
+ return broker()->dependencies();
+}
+
CommonOperatorBuilder* JSInliningHeuristic::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 848efd2f57..af8e913a47 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -100,6 +100,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSGraph* jsgraph() const { return jsgraph_; }
// TODO(neis): Make heap broker a component of JSGraph?
JSHeapBroker* broker() const { return broker_; }
+ CompilationDependencies* dependencies() const;
Isolate* isolate() const { return jsgraph_->isolate(); }
SimplifiedOperatorBuilder* simplified() const;
Mode mode() const { return mode_; }
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 150e409651..a17a43ecd2 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -305,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
- if (!function.has_feedback_vector()) {
+ if (!function.has_feedback_vector(broker()->dependencies())) {
return base::nullopt;
}
@@ -355,11 +355,11 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
- CHECK(function.has_feedback_vector());
+ CHECK(function.has_feedback_vector(broker()->dependencies()));
// The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context());
- return function.raw_feedback_cell();
+ return function.raw_feedback_cell(broker()->dependencies());
}
if (match.IsJSCreateClosure()) {
@@ -520,20 +520,29 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// always hold true.
CHECK(shared_info->is_compiled());
- if (!broker()->is_concurrent_inlining() && info_->source_positions()) {
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
- shared_info->object());
+ if (info_->source_positions()) {
+ if (broker()->is_concurrent_inlining()) {
+ if (!shared_info->object()->AreSourcePositionsAvailable(
+ broker()->local_isolate_or_isolate())) {
+ // This case is expected to be very rare, since we generate source
+ // positions for all functions when debugging or profiling are turned
+ // on (see Isolate::NeedsDetailedOptimizedCodeLineInfo). Source
+ // positions should only be missing here if there is a race between 1)
+ // enabling/disabling the debugger/profiler, and 2) this compile job.
+ // In that case, we simply don't inline.
+ TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
+ << " because source positions are missing.");
+ return NoChange();
+ }
+ } else {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
+ shared_info->object());
+ }
}
// Determine the target's feedback vector and its context.
Node* context;
FeedbackCellRef feedback_cell = DetermineCallContext(node, &context);
- if (!broker()->IsSerializedForCompilation(*shared_info,
- *feedback_cell.value())) {
- TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
- << " because it wasn't serialized for compilation.");
- return NoChange();
- }
TRACE("Inlining " << *shared_info << " into " << outer_shared_info
<< ((exception_target != nullptr) ? " (inside try-block)"
@@ -683,7 +692,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// passed into this node has to be the callees context (loaded above).
if (node->opcode() == IrOpcode::kJSCall &&
is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
- Node* effect = NodeProperties::GetEffectInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
if (NodeProperties::CanBePrimitive(broker(), call.receiver(), effect)) {
CallParameters const& p = CallParametersOf(node->op());
Node* global_proxy = jsgraph()->Constant(
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 30cab3ae26..e03e0d41a3 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -35,19 +35,16 @@ namespace compiler {
namespace {
-bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) {
- for (auto map : maps) {
- MapRef map_ref = MakeRef(broker, map);
- if (map_ref.IsHeapNumberMap()) return true;
+bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) {
+ for (MapRef map : maps) {
+ if (map.IsHeapNumberMap()) return true;
}
return false;
}
-bool HasOnlyJSArrayMaps(JSHeapBroker* broker,
- ZoneVector<Handle<Map>> const& maps) {
- for (auto map : maps) {
- MapRef map_ref = MakeRef(broker, map);
- if (!map_ref.IsJSArrayMap()) return false;
+bool HasOnlyJSArrayMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) {
+ for (MapRef map : maps) {
+ if (!map.IsJSArrayMap()) return false;
}
return true;
}
@@ -393,42 +390,30 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Check if the right hand side is a known {receiver}, or
// we have feedback from the InstanceOfIC.
- Handle<JSObject> receiver;
+ base::Optional<JSObjectRef> receiver;
HeapObjectMatcher m(constructor);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSObject()) {
- receiver = m.Ref(broker()).AsJSObject().object();
+ receiver = m.Ref(broker()).AsJSObject();
} else if (p.feedback().IsValid()) {
ProcessedFeedback const& feedback =
broker()->GetFeedbackForInstanceOf(FeedbackSource(p.feedback()));
if (feedback.IsInsufficient()) return NoChange();
- base::Optional<JSObjectRef> maybe_receiver =
- feedback.AsInstanceOf().value();
- if (!maybe_receiver.has_value()) return NoChange();
- receiver = maybe_receiver->object();
+ receiver = feedback.AsInstanceOf().value();
} else {
return NoChange();
}
- JSObjectRef receiver_ref = MakeRef(broker(), receiver);
- MapRef receiver_map = receiver_ref.map();
+ if (!receiver.has_value()) return NoChange();
- PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone());
- if (broker()->is_concurrent_inlining()) {
- access_info = broker()->GetPropertyAccessInfo(
- receiver_map,
- MakeRef(broker(), isolate()->factory()->has_instance_symbol()),
- AccessMode::kLoad, dependencies());
- } else {
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
- access_info = access_info_factory.ComputePropertyAccessInfo(
- receiver_map.object(), factory()->has_instance_symbol(),
- AccessMode::kLoad);
- }
+ MapRef receiver_map = receiver->map();
+ NameRef name = MakeRef(broker(), isolate()->factory()->has_instance_symbol());
+ PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
+ receiver_map, name, AccessMode::kLoad, dependencies());
// TODO(v8:11457) Support dictionary mode holders here.
- if (access_info.IsInvalid() || access_info.HasDictionaryHolder())
+ if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
return NoChange();
+ }
access_info.RecordDependencies(dependencies());
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
@@ -456,26 +441,26 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
if (access_info.IsFastDataConstant()) {
- Handle<JSObject> holder;
- bool found_on_proto = access_info.holder().ToHandle(&holder);
- JSObjectRef holder_ref =
- found_on_proto ? MakeRef(broker(), holder) : receiver_ref;
+ base::Optional<JSObjectRef> holder = access_info.holder();
+ bool found_on_proto = holder.has_value();
+ JSObjectRef holder_ref = found_on_proto ? holder.value() : receiver.value();
base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
access_info.field_representation(), access_info.field_index(),
dependencies());
if (!constant.has_value() || !constant->IsHeapObject() ||
- !constant->AsHeapObject().map().is_callable())
+ !constant->AsHeapObject().map().is_callable()) {
return NoChange();
+ }
if (found_on_proto) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- MakeRef(broker(), holder));
+ holder.value());
}
// Check that {constructor} is actually {receiver}.
- constructor =
- access_builder.BuildCheckValue(constructor, &effect, control, receiver);
+ constructor = access_builder.BuildCheckValue(constructor, &effect, control,
+ receiver->object());
// Monomorphic property access.
access_builder.BuildCheckMaps(constructor, &effect, control,
@@ -526,19 +511,21 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
JSNativeContextSpecialization::InferHasInPrototypeChainResult
JSNativeContextSpecialization::InferHasInPrototypeChain(
- Node* receiver, Node* effect, HeapObjectRef const& prototype) {
- ZoneHandleSet<Map> receiver_maps;
+ Node* receiver, Effect effect, HeapObjectRef const& prototype) {
+ ZoneRefUnorderedSet<MapRef> receiver_maps(zone());
NodeProperties::InferMapsResult result = NodeProperties::InferMapsUnsafe(
broker(), receiver, effect, &receiver_maps);
if (result == NodeProperties::kNoMaps) return kMayBeInPrototypeChain;
+ ZoneVector<MapRef> receiver_map_refs(zone());
+
// Try to determine either that all of the {receiver_maps} have the given
// {prototype} in their chain, or that none do. If we can't tell, return
// kMayBeInPrototypeChain.
bool all = true;
bool none = true;
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- MapRef map = MakeRef(broker(), receiver_maps[i]);
+ for (MapRef map : receiver_maps) {
+ receiver_map_refs.push_back(map);
if (result == NodeProperties::kUnreliableMaps && !map.is_stable()) {
return kMayBeInPrototypeChain;
}
@@ -558,8 +545,9 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
}
map = map_prototype->map();
// TODO(v8:11457) Support dictionary mode protoypes here.
- if (!map.is_stable() || map.is_dictionary_map())
+ if (!map.is_stable() || map.is_dictionary_map()) {
return kMayBeInPrototypeChain;
+ }
if (map.oddball_type() == OddballType::kNull) {
all = false;
break;
@@ -584,7 +572,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
WhereToStart start = result == NodeProperties::kUnreliableMaps
? kStartAtReceiver
: kStartAtPrototype;
- dependencies()->DependOnStablePrototypeChains(receiver_maps, start,
+ dependencies()->DependOnStablePrototypeChains(receiver_map_refs, start,
last_prototype);
}
@@ -597,7 +585,7 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
DCHECK_EQ(IrOpcode::kJSHasInPrototypeChain, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* prototype = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
// Check if we can constant-fold the prototype chain walk
// for the given {value} and the {prototype}.
@@ -649,12 +637,12 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Optimize if we currently know the "prototype" property.
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
- if (!function.serialized()) return NoChange();
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
- if (!function.map().has_prototype_slot() || !function.has_prototype() ||
- function.PrototypeRequiresRuntimeLookup()) {
+ if (!function.map().has_prototype_slot() ||
+ !function.has_instance_prototype(dependencies()) ||
+ function.PrototypeRequiresRuntimeLookup(dependencies())) {
return NoChange();
}
@@ -677,9 +665,9 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
Node* constructor = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
@@ -712,38 +700,32 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
Node* promise = NodeProperties::GetValueInput(node, 0);
Node* resolution = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
// Check if we know something about the {resolution}.
MapInference inference(broker(), resolution, effect);
if (!inference.HaveMaps()) return NoChange();
- MapHandles const& resolution_maps = inference.GetMaps();
+ ZoneVector<MapRef> const& resolution_maps = inference.GetMaps();
// Compute property access info for "then" on {resolution}.
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- if (!broker()->is_concurrent_inlining()) {
- access_info_factory.ComputePropertyAccessInfos(
- resolution_maps, factory()->then_string(), AccessMode::kLoad,
- &access_infos);
- } else {
- // Obtain pre-computed access infos from the broker.
- for (auto map : resolution_maps) {
- MapRef map_ref = MakeRef(broker(), map);
- access_infos.push_back(broker()->GetPropertyAccessInfo(
- map_ref, MakeRef(broker(), isolate()->factory()->then_string()),
- AccessMode::kLoad, dependencies()));
- }
+
+ for (const MapRef& map : resolution_maps) {
+ access_infos.push_back(broker()->GetPropertyAccessInfo(
+ map, MakeRef(broker(), isolate()->factory()->then_string()),
+ AccessMode::kLoad, dependencies()));
}
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
// TODO(v8:11457) Support dictionary mode prototypes here.
- if (access_info.IsInvalid() || access_info.HasDictionaryHolder())
+ if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
return inference.NoChange();
+ }
// Only optimize when {resolution} definitely doesn't have a "then" property.
if (!access_info.IsNotFound()) return inference.NoChange();
@@ -793,7 +775,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* lookup_start_object, Node* receiver, Node* value,
NameRef const& name, AccessMode access_mode, Node* key,
PropertyCellRef const& property_cell, Node* effect) {
- if (!property_cell.Serialize()) {
+ if (!property_cell.Cache()) {
TRACE_BROKER_MISSING(broker(), "usable data for " << property_cell);
return NoChange();
}
@@ -1016,9 +998,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
ReplaceWithValue(node, value, effect);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr, nullptr,
- MakeRef(broker(), p.name()), AccessMode::kLoad,
- nullptr, feedback.property_cell());
+ return ReduceGlobalAccess(node, nullptr, nullptr, nullptr, p.name(broker()),
+ AccessMode::kLoad, nullptr,
+ feedback.property_cell());
} else {
DCHECK(feedback.IsMegamorphic());
return NoChange();
@@ -1047,9 +1029,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr, value,
- MakeRef(broker(), p.name()), AccessMode::kStore,
- nullptr, feedback.property_cell());
+ return ReduceGlobalAccess(node, nullptr, nullptr, value, p.name(broker()),
+ AccessMode::kStore, nullptr,
+ feedback.property_cell());
} else {
DCHECK(feedback.IsMegamorphic());
return NoChange();
@@ -1081,11 +1063,7 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
}
MinimorphicLoadPropertyAccessInfo access_info =
- broker()->GetPropertyAccessInfo(
- feedback, source,
- broker()->is_concurrent_inlining()
- ? SerializationPolicy::kAssumeSerialized
- : SerializationPolicy::kSerializeIfNeeded);
+ broker()->GetPropertyAccessInfo(feedback, source);
if (access_info.IsInvalid()) return NoChange();
PropertyAccessBuilder access_builder(jsgraph(), broker(), nullptr);
@@ -1095,8 +1073,8 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
}
ZoneHandleSet<Map> maps;
- for (Handle<Map> map : feedback.maps()) {
- maps.insert(map, graph()->zone());
+ for (const MapRef& map : feedback.maps()) {
+ maps.insert(map.object(), graph()->zone());
}
effect = graph()->NewNode(
@@ -1131,9 +1109,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
STATIC_ASSERT(JSLoadNamedFromSuperNode::ReceiverIndex() == 0);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
// receiver = the object we pass to the accessor (if any) as the "this" value.
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1150,9 +1128,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Either infer maps from the graph or use the feedback.
- ZoneVector<Handle<Map>> lookup_start_object_maps(zone());
+ ZoneVector<MapRef> lookup_start_object_maps(zone());
if (!InferMaps(lookup_start_object, effect, &lookup_start_object_maps)) {
- lookup_start_object_maps = feedback.maps();
+ for (const MapRef& map : feedback.maps()) {
+ lookup_start_object_maps.push_back(map);
+ }
}
RemoveImpossibleMaps(lookup_start_object, &lookup_start_object_maps);
@@ -1160,8 +1140,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// contexts' global proxy, and turn that into a direct access to the
// corresponding global object instead.
if (lookup_start_object_maps.size() == 1) {
- MapRef lookup_start_object_map =
- MakeRef(broker(), lookup_start_object_maps[0]);
+ MapRef lookup_start_object_map = lookup_start_object_maps[0];
if (lookup_start_object_map.equals(
native_context().global_proxy_object().map())) {
if (!native_context().GlobalIsDetached()) {
@@ -1180,14 +1159,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
ZoneVector<PropertyAccessInfo> access_infos(zone());
{
ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
- for (Handle<Map> map_handle : lookup_start_object_maps) {
- MapRef map = MakeRef(broker(), map_handle);
+ for (const MapRef& map : lookup_start_object_maps) {
if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- map, feedback.name(), access_mode, dependencies(),
- broker()->is_concurrent_inlining()
- ? SerializationPolicy::kAssumeSerialized
- : SerializationPolicy::kSerializeIfNeeded);
+ map, feedback.name(), access_mode, dependencies());
access_infos_for_feedback.push_back(access_info);
}
@@ -1246,12 +1221,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- {
- access_builder.BuildCheckMaps(receiver, &efalse, if_false,
- access_info.lookup_start_object_maps());
- }
+ Control if_false{graph()->NewNode(common()->IfFalse(), branch)};
+ Effect efalse = effect;
+ access_builder.BuildCheckMaps(receiver, &efalse, if_false,
+ access_info.lookup_start_object_maps());
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect =
@@ -1319,11 +1292,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* this_value = value;
Node* this_lookup_start_object = lookup_start_object;
Node* this_receiver = receiver;
- Node* this_effect = effect;
- Node* this_control = fallthrough_control;
+ Effect this_effect = effect;
+ Control this_control{fallthrough_control};
// Perform map check on {lookup_start_object}.
- ZoneVector<Handle<Map>> const& lookup_start_object_maps =
+ ZoneVector<MapRef> const& lookup_start_object_maps =
access_info.lookup_start_object_maps();
{
// Whether to insert a dedicated MapGuard node into the
@@ -1345,8 +1318,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
} else {
// Explicitly branch on the {lookup_start_object_maps}.
ZoneHandleSet<Map> maps;
- for (Handle<Map> map : lookup_start_object_maps) {
- maps.insert(map, graph()->zone());
+ for (MapRef map : lookup_start_object_maps) {
+ maps.insert(map.object(), graph()->zone());
}
Node* check = this_effect =
graph()->NewNode(simplified()->CompareMaps(maps),
@@ -1377,8 +1350,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Introduce a MapGuard to learn from this on the effect chain.
if (insert_map_guard) {
ZoneHandleSet<Map> maps;
- for (auto lookup_start_object_map : lookup_start_object_maps) {
- maps.insert(lookup_start_object_map, graph()->zone());
+ for (MapRef map : lookup_start_object_maps) {
+ maps.insert(map.object(), graph()->zone());
}
this_effect =
graph()->NewNode(simplified()->MapGuard(maps),
@@ -1464,7 +1437,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
JSLoadNamedNode n(node);
NamedAccess const& p = n.Parameters();
Node* const receiver = n.object();
- NameRef name = MakeRef(broker(), p.name());
+ NameRef name = p.name(broker());
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
@@ -1474,11 +1447,11 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
name.equals(MakeRef(broker(), factory()->prototype_string()))) {
// Optimize "prototype" property of functions.
JSFunctionRef function = object.AsJSFunction();
- if (!function.serialized()) return NoChange();
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
- if (!function.map().has_prototype_slot() || !function.has_prototype() ||
- function.PrototypeRequiresRuntimeLookup()) {
+ if (!function.map().has_prototype_slot() ||
+ !function.has_instance_prototype(dependencies()) ||
+ function.PrototypeRequiresRuntimeLookup(dependencies())) {
return NoChange();
}
ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
@@ -1504,7 +1477,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamedFromSuper(
Node* node) {
JSLoadNamedFromSuperNode n(node);
NamedAccess const& p = n.Parameters();
- NameRef name = MakeRef(broker(), p.name());
+ NameRef name = p.name(broker());
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
@@ -1522,7 +1495,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
Control control = n.control();
// Load iterator property operator
- Handle<Name> iterator_symbol = factory()->iterator_symbol();
+ NameRef iterator_symbol = MakeRef(broker(), factory()->iterator_symbol());
const Operator* load_op =
javascript()->LoadNamed(iterator_symbol, p.loadFeedback());
@@ -1599,17 +1572,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
JSStoreNamedNode n(node);
NamedAccess const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, MakeRef(broker(), p.name()),
- n.value(), FeedbackSource(p.feedback()),
- AccessMode::kStore);
+ return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
+ FeedbackSource(p.feedback()), AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
JSStoreNamedOwnNode n(node);
StoreNamedOwnParameters const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, MakeRef(broker(), p.name()),
- n.value(), FeedbackSource(p.feedback()),
+ return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
+ FeedbackSource(p.feedback()),
AccessMode::kStoreInLiteral);
}
@@ -1642,6 +1614,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
}
namespace {
+
base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Node* receiver) {
HeapObjectMatcher m(receiver);
@@ -1652,34 +1625,34 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
if (typed_array.is_on_heap()) return base::nullopt;
return typed_array;
}
+
} // namespace
void JSNativeContextSpecialization::RemoveImpossibleMaps(
- Node* object, ZoneVector<Handle<Map>>* maps) const {
+ Node* object, ZoneVector<MapRef>* maps) const {
base::Optional<MapRef> root_map = InferRootMap(object);
if (root_map.has_value() && !root_map->is_abandoned_prototype_map()) {
- maps->erase(
- std::remove_if(maps->begin(), maps->end(),
- [root_map, this](Handle<Map> map) {
- MapRef map_ref = MakeRef(broker(), map);
- return map_ref.is_abandoned_prototype_map() ||
- (map_ref.FindRootMap().has_value() &&
- !map_ref.FindRootMap()->equals(*root_map));
- }),
- maps->end());
+ maps->erase(std::remove_if(maps->begin(), maps->end(),
+ [root_map](const MapRef& map) {
+ return map.is_abandoned_prototype_map() ||
+ (map.FindRootMap().has_value() &&
+ !map.FindRootMap()->equals(*root_map));
+ }),
+ maps->end());
}
}
// Possibly refine the feedback using inferred map information from the graph.
ElementAccessFeedback const&
JSNativeContextSpecialization::TryRefineElementAccessFeedback(
- ElementAccessFeedback const& feedback, Node* receiver, Node* effect) const {
+ ElementAccessFeedback const& feedback, Node* receiver,
+ Effect effect) const {
AccessMode access_mode = feedback.keyed_mode().access_mode();
bool use_inference =
access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas;
if (!use_inference) return feedback;
- ZoneVector<Handle<Map>> inferred_maps(zone());
+ ZoneVector<MapRef> inferred_maps(zone());
if (!InferMaps(receiver, effect, &inferred_maps)) return feedback;
RemoveImpossibleMaps(receiver, &inferred_maps);
@@ -1687,7 +1660,7 @@ JSNativeContextSpecialization::TryRefineElementAccessFeedback(
// impossible maps when a target is kept only because more than one of its
// sources was inferred. Think of a way to completely rule out impossible
// maps.
- return feedback.Refine(inferred_maps, zone());
+ return feedback.Refine(broker(), inferred_maps);
}
Reduction JSNativeContextSpecialization::ReduceElementAccess(
@@ -1705,10 +1678,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
JSHasPropertyNode::ObjectIndex() == 0);
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state =
- NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
+ Effect effect{NodeProperties::GetEffectInput(node)};
+ Control control{NodeProperties::GetControlInput(node)};
// TODO(neis): It's odd that we do optimizations below that don't really care
// about the feedback, but we don't do them when the feedback is megamorphic.
@@ -1749,8 +1720,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// the zone allocation of this vector.
ZoneVector<MapRef> prototype_maps(zone());
for (ElementAccessInfo const& access_info : access_infos) {
- for (Handle<Map> map : access_info.lookup_start_object_maps()) {
- MapRef receiver_map = MakeRef(broker(), map);
+ for (MapRef receiver_map : access_info.lookup_start_object_maps()) {
// If the {receiver_map} has a prototype and its elements backing
// store is either holey, or we have a potentially growing store,
// then we need to check that all prototypes have stable maps with
@@ -1793,11 +1763,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
ElementAccessInfo access_info = access_infos.front();
// Perform possible elements kind transitions.
- MapRef transition_target =
- MakeRef(broker(), access_info.lookup_start_object_maps().front());
- for (auto source : access_info.transition_sources()) {
+ MapRef transition_target = access_info.lookup_start_object_maps().front();
+ for (MapRef transition_source : access_info.transition_sources()) {
DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
- MapRef transition_source = MakeRef(broker(), source);
effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
IsSimpleMapChangeTransition(transition_source.elements_kind(),
@@ -1813,6 +1781,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// elements kind transition above. This is because those operators
// don't have the kNoWrite flag on it, even though they are not
// observable by JavaScript.
+ Node* frame_state =
+ NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
effect =
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
@@ -1841,14 +1811,12 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* this_receiver = receiver;
Node* this_value = value;
Node* this_index = index;
- Node* this_effect = effect;
- Node* this_control = fallthrough_control;
+ Effect this_effect = effect;
+ Control this_control{fallthrough_control};
// Perform possible elements kind transitions.
- MapRef transition_target =
- MakeRef(broker(), access_info.lookup_start_object_maps().front());
- for (auto source : access_info.transition_sources()) {
- MapRef transition_source = MakeRef(broker(), source);
+ MapRef transition_target = access_info.lookup_start_object_maps().front();
+ for (MapRef transition_source : access_info.transition_sources()) {
DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
this_effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
@@ -1861,7 +1829,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Perform map check(s) on {receiver}.
- ZoneVector<Handle<Map>> const& receiver_maps =
+ ZoneVector<MapRef> const& receiver_maps =
access_info.lookup_start_object_maps();
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
@@ -1872,8 +1840,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
} else {
// Explicitly branch on the {receiver_maps}.
ZoneHandleSet<Map> maps;
- for (Handle<Map> map : receiver_maps) {
- maps.insert(map, graph()->zone());
+ for (MapRef map : receiver_maps) {
+ maps.insert(map.object(), graph()->zone());
}
Node* check = this_effect =
graph()->NewNode(simplified()->CompareMaps(maps), receiver,
@@ -2213,14 +2181,13 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node* receiver, ConvertReceiverMode receiver_mode, Node* context,
Node* frame_state, Node** effect, Node** control,
ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
- ObjectRef constant = MakeRef(broker(), access_info.constant());
+ ObjectRef constant = access_info.constant().value();
if (access_info.IsDictionaryProtoAccessorConstant()) {
// For fast mode holders we recorded dependencies in BuildPropertyLoad.
- for (const Handle<Map> map : access_info.lookup_start_object_maps()) {
+ for (const MapRef map : access_info.lookup_start_object_maps()) {
dependencies()->DependOnConstantInDictionaryPrototypeChain(
- MakeRef(broker(), map), MakeRef(broker(), access_info.name()),
- constant, PropertyKind::kAccessor);
+ map, access_info.name(), constant, PropertyKind::kAccessor);
}
}
@@ -2235,10 +2202,9 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
receiver_mode),
target, receiver, feedback, context, frame_state, *effect, *control);
} else {
- Node* holder = access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(MakeRef(
- broker(), access_info.holder().ToHandleChecked()));
+ Node* holder = access_info.holder().has_value()
+ ? jsgraph()->Constant(access_info.holder().value())
+ : receiver;
value = InlineApiCall(receiver, holder, frame_state, nullptr, effect,
control, constant.AsFunctionTemplateInfo());
}
@@ -2258,7 +2224,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* receiver, Node* value, Node* context, Node* frame_state,
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
- ObjectRef constant = MakeRef(broker(), access_info.constant());
+ ObjectRef constant = access_info.constant().value();
Node* target = jsgraph()->Constant(constant);
// Introduce the call to the setter function.
if (constant.IsJSFunction()) {
@@ -2270,10 +2236,9 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
target, receiver, value, feedback, context, frame_state, *effect,
*control);
} else {
- Node* holder = access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(MakeRef(
- broker(), access_info.holder().ToHandleChecked()));
+ Node* holder = access_info.holder().has_value()
+ ? jsgraph()->Constant(access_info.holder().value())
+ : receiver;
InlineApiCall(receiver, holder, frame_state, value, effect, control,
constant.AsFunctionTemplateInfo());
}
@@ -2347,12 +2312,11 @@ JSNativeContextSpecialization::BuildPropertyLoad(
Node* effect, Node* control, NameRef const& name,
ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
- Handle<JSObject> holder;
- if (access_info.holder().ToHandle(&holder) &&
- !access_info.HasDictionaryHolder()) {
+ base::Optional<JSObjectRef> holder = access_info.holder();
+ if (holder.has_value() && !access_info.HasDictionaryHolder()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- MakeRef(broker(), holder));
+ holder.value());
}
// Generate the actual property access.
@@ -2369,8 +2333,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
InlinePropertyGetterCall(receiver, receiver_mode, context, frame_state,
&effect, &control, if_exceptions, access_info);
} else if (access_info.IsModuleExport()) {
- Node* cell =
- jsgraph()->Constant(MakeRef(broker(), access_info.constant()).AsCell());
+ Node* cell = jsgraph()->Constant(access_info.constant().value().AsCell());
value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -2402,11 +2365,11 @@ JSNativeContextSpecialization::BuildPropertyTest(
DCHECK(!access_info.HasDictionaryHolder());
// Determine actual holder and perform prototype chain checks.
- Handle<JSObject> holder;
- if (access_info.holder().ToHandle(&holder)) {
+ base::Optional<JSObjectRef> holder = access_info.holder();
+ if (holder.has_value()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- MakeRef(broker(), holder));
+ holder.value());
}
Node* value = access_info.IsNotFound() ? jsgraph()->FalseConstant()
@@ -2444,13 +2407,13 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info, AccessMode access_mode) {
// Determine actual holder and perform prototype chain checks.
- Handle<JSObject> holder;
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- if (access_info.holder().ToHandle(&holder)) {
+ base::Optional<JSObjectRef> holder = access_info.holder();
+ if (holder.has_value()) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- MakeRef(broker(), holder));
+ holder.value());
}
DCHECK(!access_info.IsNotFound());
@@ -2571,13 +2534,14 @@ JSNativeContextSpecialization::BuildPropertyStore(
} else if (field_representation ==
MachineRepresentation::kTaggedPointer) {
- Handle<Map> field_map;
- if (access_info.field_map().ToHandle(&field_map)) {
+ base::Optional<MapRef> field_map = access_info.field_map();
+ if (field_map.has_value()) {
// Emit a map check for the value.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(field_map)),
- value, effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(field_map->object())),
+ value, effect, control);
} else {
// Ensure that {value} is a HeapObject.
value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
@@ -2603,11 +2567,11 @@ JSNativeContextSpecialization::BuildPropertyStore(
UNREACHABLE();
}
// Check if we need to perform a transitioning store.
- Handle<Map> transition_map;
- if (access_info.transition_map().ToHandle(&transition_map)) {
+ base::Optional<MapRef> transition_map = access_info.transition_map();
+ if (transition_map.has_value()) {
// Check if we need to grow the properties backing store
// with this transitioning store.
- MapRef transition_map_ref = MakeRef(broker(), transition_map);
+ MapRef transition_map_ref = transition_map.value();
MapRef original_map = transition_map_ref.GetBackPointer().AsMap();
if (original_map.UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
@@ -2674,7 +2638,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
+ Effect effect{NodeProperties::GetEffectInput(node)};
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAreJSReceiver()) {
@@ -2709,7 +2673,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
- ZoneVector<Handle<Map>> const& receiver_maps =
+ ZoneVector<MapRef> const& receiver_maps =
access_info.lookup_start_object_maps();
if (IsTypedArrayElementsKind(elements_kind)) {
@@ -3451,12 +3415,11 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name,
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
- ZoneVector<Handle<Map>> const& receiver_maps) {
+ ZoneVector<MapRef> const& receiver_maps) {
// Check if all {receiver_maps} have one of the initial Array.prototype
// or Object.prototype objects as their prototype (in any of the current
// native contexts, as the global Array protector works isolate-wide).
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map = MakeRef(broker(), map);
+ for (MapRef receiver_map : receiver_maps) {
ObjectRef receiver_prototype = receiver_map.prototype().value();
if (!receiver_prototype.IsJSObject() ||
!broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
@@ -3468,25 +3431,24 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
return dependencies()->DependOnNoElementsProtector();
}
-bool JSNativeContextSpecialization::InferMaps(
- Node* object, Node* effect, ZoneVector<Handle<Map>>* maps) const {
- ZoneHandleSet<Map> map_set;
+bool JSNativeContextSpecialization::InferMaps(Node* object, Effect effect,
+ ZoneVector<MapRef>* maps) const {
+ ZoneRefUnorderedSet<MapRef> map_set(broker()->zone());
NodeProperties::InferMapsResult result =
NodeProperties::InferMapsUnsafe(broker(), object, effect, &map_set);
if (result == NodeProperties::kReliableMaps) {
- for (size_t i = 0; i < map_set.size(); ++i) {
- maps->push_back(map_set[i]);
+ for (const MapRef& map : map_set) {
+ maps->push_back(map);
}
return true;
} else if (result == NodeProperties::kUnreliableMaps) {
// For untrusted maps, we can still use the information
// if the maps are stable.
- for (size_t i = 0; i < map_set.size(); ++i) {
- MapRef map = MakeRef(broker(), map_set[i]);
+ for (const MapRef& map : map_set) {
if (!map.is_stable()) return false;
}
- for (size_t i = 0; i < map_set.size(); ++i) {
- maps->push_back(map_set[i]);
+ for (const MapRef& map : map_set) {
+ maps->push_back(map);
}
return true;
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 0a6d1e9536..7f67a4d67c 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -210,17 +210,16 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
- bool CanTreatHoleAsUndefined(ZoneVector<Handle<Map>> const& receiver_maps);
+ bool CanTreatHoleAsUndefined(ZoneVector<MapRef> const& receiver_maps);
- void RemoveImpossibleMaps(Node* object, ZoneVector<Handle<Map>>* maps) const;
+ void RemoveImpossibleMaps(Node* object, ZoneVector<MapRef>* maps) const;
ElementAccessFeedback const& TryRefineElementAccessFeedback(
ElementAccessFeedback const& feedback, Node* receiver,
- Node* effect) const;
+ Effect effect) const;
// Try to infer maps for the given {object} at the current {effect}.
- bool InferMaps(Node* object, Node* effect,
- ZoneVector<Handle<Map>>* maps) const;
+ bool InferMaps(Node* object, Effect effect, ZoneVector<MapRef>* maps) const;
// Try to infer a root map for the {object} independent of the current program
// location.
@@ -235,7 +234,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
kMayBeInPrototypeChain
};
InferHasInPrototypeChainResult InferHasInPrototypeChain(
- Node* receiver, Node* effect, HeapObjectRef const& prototype);
+ Node* receiver, Effect effect, HeapObjectRef const& prototype);
Node* BuildLoadPrototypeFromObject(Node* object, Node* effect, Node* control);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 6bd0efd389..a06416b6f2 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -28,6 +28,12 @@ constexpr Operator::Properties BinopProperties(Operator::Opcode opcode) {
: Operator::kNoProperties;
}
+template <class T>
+Address AddressOrNull(base::Optional<T> ref) {
+ if (!ref.has_value()) return kNullAddress;
+ return ref->object().address();
+}
+
} // namespace
namespace js_node_wrapper_utils {
@@ -177,15 +183,10 @@ ContextAccess const& ContextAccessOf(Operator const* op) {
return OpParameter<ContextAccess>(op);
}
-CreateFunctionContextParameters::CreateFunctionContextParameters(
- Handle<ScopeInfo> scope_info, int slot_count, ScopeType scope_type)
- : scope_info_(scope_info),
- slot_count_(slot_count),
- scope_type_(scope_type) {}
-
bool operator==(CreateFunctionContextParameters const& lhs,
CreateFunctionContextParameters const& rhs) {
- return lhs.scope_info().location() == rhs.scope_info().location() &&
+ return lhs.scope_info_.object().location() ==
+ rhs.scope_info_.object().location() &&
lhs.slot_count() == rhs.slot_count() &&
lhs.scope_type() == rhs.scope_type();
}
@@ -196,7 +197,7 @@ bool operator!=(CreateFunctionContextParameters const& lhs,
}
size_t hash_value(CreateFunctionContextParameters const& parameters) {
- return base::hash_combine(parameters.scope_info().location(),
+ return base::hash_combine(parameters.scope_info_.object().location(),
parameters.slot_count(),
static_cast<int>(parameters.scope_type()));
}
@@ -214,7 +215,7 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
bool operator==(StoreNamedOwnParameters const& lhs,
StoreNamedOwnParameters const& rhs) {
- return lhs.name().location() == rhs.name().location() &&
+ return lhs.name_.object().location() == rhs.name_.object().location() &&
lhs.feedback() == rhs.feedback();
}
@@ -224,12 +225,12 @@ bool operator!=(StoreNamedOwnParameters const& lhs,
}
size_t hash_value(StoreNamedOwnParameters const& p) {
- return base::hash_combine(p.name().location(),
+ return base::hash_combine(p.name_.object().location(),
FeedbackSource::Hash()(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) {
- return os << Brief(*p.name());
+ return os << Brief(*p.name_.object());
}
StoreNamedOwnParameters const& StoreNamedOwnParametersOf(const Operator* op) {
@@ -264,7 +265,7 @@ FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
}
bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
- return lhs.name().location() == rhs.name().location() &&
+ return lhs.name_.object().location() == rhs.name_.object().location() &&
lhs.language_mode() == rhs.language_mode() &&
lhs.feedback() == rhs.feedback();
}
@@ -276,13 +277,13 @@ bool operator!=(NamedAccess const& lhs, NamedAccess const& rhs) {
size_t hash_value(NamedAccess const& p) {
- return base::hash_combine(p.name().location(), p.language_mode(),
+ return base::hash_combine(p.name_.object().location(), p.language_mode(),
FeedbackSource::Hash()(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, NamedAccess const& p) {
- return os << Brief(*p.name()) << ", " << p.language_mode();
+ return os << Brief(*p.name_.object()) << ", " << p.language_mode();
}
@@ -326,7 +327,7 @@ size_t hash_value(PropertyAccess const& p) {
bool operator==(LoadGlobalParameters const& lhs,
LoadGlobalParameters const& rhs) {
- return lhs.name().location() == rhs.name().location() &&
+ return lhs.name_.object().location() == rhs.name_.object().location() &&
lhs.feedback() == rhs.feedback() &&
lhs.typeof_mode() == rhs.typeof_mode();
}
@@ -339,13 +340,14 @@ bool operator!=(LoadGlobalParameters const& lhs,
size_t hash_value(LoadGlobalParameters const& p) {
- return base::hash_combine(p.name().location(),
+ return base::hash_combine(p.name_.object().location(),
static_cast<int>(p.typeof_mode()));
}
std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) {
- return os << Brief(*p.name()) << ", " << static_cast<int>(p.typeof_mode());
+ return os << Brief(*p.name_.object()) << ", "
+ << static_cast<int>(p.typeof_mode());
}
@@ -358,7 +360,7 @@ const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op) {
bool operator==(StoreGlobalParameters const& lhs,
StoreGlobalParameters const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
- lhs.name().location() == rhs.name().location() &&
+ lhs.name_.object().location() == rhs.name_.object().location() &&
lhs.feedback() == rhs.feedback();
}
@@ -370,13 +372,13 @@ bool operator!=(StoreGlobalParameters const& lhs,
size_t hash_value(StoreGlobalParameters const& p) {
- return base::hash_combine(p.language_mode(), p.name().location(),
+ return base::hash_combine(p.language_mode(), p.name_.object().location(),
FeedbackSource::Hash()(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, StoreGlobalParameters const& p) {
- return os << p.language_mode() << ", " << Brief(*p.name());
+ return os << p.language_mode() << ", " << Brief(*p.name_.object());
}
@@ -391,11 +393,10 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op) {
return OpParameter<CreateArgumentsType>(op);
}
-
bool operator==(CreateArrayParameters const& lhs,
CreateArrayParameters const& rhs) {
return lhs.arity() == rhs.arity() &&
- lhs.site().address() == rhs.site().address();
+ AddressOrNull(lhs.site_) == AddressOrNull(rhs.site_);
}
@@ -406,14 +407,15 @@ bool operator!=(CreateArrayParameters const& lhs,
size_t hash_value(CreateArrayParameters const& p) {
- return base::hash_combine(p.arity(), p.site().address());
+ return base::hash_combine(p.arity(), AddressOrNull(p.site_));
}
std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) {
os << p.arity();
- Handle<AllocationSite> site;
- if (p.site().ToHandle(&site)) os << ", " << Brief(*site);
+ if (p.site_.has_value()) {
+ os << ", " << Brief(*p.site_->object());
+ }
return os;
}
@@ -477,7 +479,7 @@ const CreateCollectionIteratorParameters& CreateCollectionIteratorParametersOf(
bool operator==(CreateBoundFunctionParameters const& lhs,
CreateBoundFunctionParameters const& rhs) {
return lhs.arity() == rhs.arity() &&
- lhs.map().location() == rhs.map().location();
+ lhs.map_.object().location() == rhs.map_.object().location();
}
bool operator!=(CreateBoundFunctionParameters const& lhs,
@@ -486,13 +488,13 @@ bool operator!=(CreateBoundFunctionParameters const& lhs,
}
size_t hash_value(CreateBoundFunctionParameters const& p) {
- return base::hash_combine(p.arity(), p.map().location());
+ return base::hash_combine(p.arity(), p.map_.object().location());
}
std::ostream& operator<<(std::ostream& os,
CreateBoundFunctionParameters const& p) {
os << p.arity();
- if (!p.map().is_null()) os << ", " << Brief(*p.map());
+ if (!p.map_.object().is_null()) os << ", " << Brief(*p.map_.object());
return os;
}
@@ -504,8 +506,9 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
bool operator==(GetTemplateObjectParameters const& lhs,
GetTemplateObjectParameters const& rhs) {
- return lhs.description().location() == rhs.description().location() &&
- lhs.shared().location() == rhs.shared().location() &&
+ return lhs.description_.object().location() ==
+ rhs.description_.object().location() &&
+ lhs.shared_.object().location() == rhs.shared_.object().location() &&
lhs.feedback() == rhs.feedback();
}
@@ -515,13 +518,15 @@ bool operator!=(GetTemplateObjectParameters const& lhs,
}
size_t hash_value(GetTemplateObjectParameters const& p) {
- return base::hash_combine(p.description().location(), p.shared().location(),
+ return base::hash_combine(p.description_.object().location(),
+ p.shared_.object().location(),
FeedbackSource::Hash()(p.feedback()));
}
std::ostream& operator<<(std::ostream& os,
GetTemplateObjectParameters const& p) {
- return os << Brief(*p.description()) << ", " << Brief(*p.shared());
+ return os << Brief(*p.description_.object()) << ", "
+ << Brief(*p.shared_.object());
}
const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
@@ -533,8 +538,9 @@ const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.allocation() == rhs.allocation() &&
- lhs.code().location() == rhs.code().location() &&
- lhs.shared_info().location() == rhs.shared_info().location();
+ lhs.code_.object().location() == rhs.code_.object().location() &&
+ lhs.shared_info_.object().location() ==
+ rhs.shared_info_.object().location();
}
@@ -545,13 +551,14 @@ bool operator!=(CreateClosureParameters const& lhs,
size_t hash_value(CreateClosureParameters const& p) {
- return base::hash_combine(p.allocation(), p.shared_info().location());
+ return base::hash_combine(p.allocation(), p.code_.object().location(),
+ p.shared_info_.object().location());
}
std::ostream& operator<<(std::ostream& os, CreateClosureParameters const& p) {
- return os << p.allocation() << ", " << Brief(*p.shared_info()) << ", "
- << Brief(*p.code());
+ return os << p.allocation() << ", " << Brief(*p.shared_info_.object()) << ", "
+ << Brief(*p.code_.object());
}
@@ -563,7 +570,8 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
bool operator==(CreateLiteralParameters const& lhs,
CreateLiteralParameters const& rhs) {
- return lhs.constant().location() == rhs.constant().location() &&
+ return lhs.constant_.object().location() ==
+ rhs.constant_.object().location() &&
lhs.feedback() == rhs.feedback() && lhs.length() == rhs.length() &&
lhs.flags() == rhs.flags();
}
@@ -576,14 +584,15 @@ bool operator!=(CreateLiteralParameters const& lhs,
size_t hash_value(CreateLiteralParameters const& p) {
- return base::hash_combine(p.constant().location(),
+ return base::hash_combine(p.constant_.object().location(),
FeedbackSource::Hash()(p.feedback()), p.length(),
p.flags());
}
std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
- return os << Brief(*p.constant()) << ", " << p.length() << ", " << p.flags();
+ return os << Brief(*p.constant_.object()) << ", " << p.length() << ", "
+ << p.flags();
}
@@ -983,7 +992,7 @@ const Operator* JSOperatorBuilder::ConstructWithSpread(
parameters); // parameter
}
-const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
+const Operator* JSOperatorBuilder::LoadNamed(const NameRef& name,
const FeedbackSource& feedback) {
static constexpr int kObject = 1;
static constexpr int kFeedbackVector = 1;
@@ -997,7 +1006,7 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
}
const Operator* JSOperatorBuilder::LoadNamedFromSuper(
- Handle<Name> name, const FeedbackSource& feedback) {
+ const NameRef& name, const FeedbackSource& feedback) {
static constexpr int kReceiver = 1;
static constexpr int kHomeObject = 1;
static constexpr int kFeedbackVector = 1;
@@ -1090,7 +1099,7 @@ int RestoreRegisterIndexOf(const Operator* op) {
}
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
- Handle<Name> name,
+ const NameRef& name,
FeedbackSource const& feedback) {
static constexpr int kObject = 1;
static constexpr int kValue = 1;
@@ -1115,7 +1124,7 @@ const Operator* JSOperatorBuilder::StoreProperty(
}
const Operator* JSOperatorBuilder::StoreNamedOwn(
- Handle<Name> name, FeedbackSource const& feedback) {
+ const NameRef& name, FeedbackSource const& feedback) {
static constexpr int kObject = 1;
static constexpr int kValue = 1;
static constexpr int kFeedbackVector = 1;
@@ -1142,7 +1151,7 @@ const Operator* JSOperatorBuilder::CreateGeneratorObject() {
2, 1, 1, 1, 1, 0); // counts
}
-const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
+const Operator* JSOperatorBuilder::LoadGlobal(const NameRef& name,
const FeedbackSource& feedback,
TypeofMode typeof_mode) {
static constexpr int kFeedbackVector = 1;
@@ -1156,7 +1165,7 @@ const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
}
const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
- const Handle<Name>& name,
+ const NameRef& name,
const FeedbackSource& feedback) {
static constexpr int kValue = 1;
static constexpr int kFeedbackVector = 1;
@@ -1235,7 +1244,7 @@ const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
}
const Operator* JSOperatorBuilder::CreateArray(
- size_t arity, MaybeHandle<AllocationSite> site) {
+ size_t arity, base::Optional<AllocationSiteRef> site) {
// constructor, new_target, arg1, ..., argN
int const value_input_count = static_cast<int>(arity) + 2;
CreateArrayParameters parameters(arity, site);
@@ -1275,7 +1284,7 @@ const Operator* JSOperatorBuilder::CreateCollectionIterator(
}
const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
- Handle<Map> map) {
+ const MapRef& map) {
// bound_target_function, bound_this, arg1, ..., argN
int const value_input_count = static_cast<int>(arity) + 2;
CreateBoundFunctionParameters parameters(arity, map);
@@ -1287,7 +1296,7 @@ const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
}
const Operator* JSOperatorBuilder::CreateClosure(
- Handle<SharedFunctionInfo> shared_info, Handle<CodeT> code,
+ const SharedFunctionInfoRef& shared_info, const CodeTRef& code,
AllocationType allocation) {
static constexpr int kFeedbackCell = 1;
static constexpr int kArity = kFeedbackCell;
@@ -1300,7 +1309,7 @@ const Operator* JSOperatorBuilder::CreateClosure(
}
const Operator* JSOperatorBuilder::CreateLiteralArray(
- Handle<ArrayBoilerplateDescription> description,
+ const ArrayBoilerplateDescriptionRef& description,
FeedbackSource const& feedback, int literal_flags, int number_of_elements) {
CreateLiteralParameters parameters(description, feedback, number_of_elements,
literal_flags);
@@ -1334,7 +1343,7 @@ const Operator* JSOperatorBuilder::CreateArrayFromIterable() {
}
const Operator* JSOperatorBuilder::CreateLiteralObject(
- Handle<ObjectBoilerplateDescription> constant_properties,
+ const ObjectBoilerplateDescriptionRef& constant_properties,
FeedbackSource const& feedback, int literal_flags,
int number_of_properties) {
CreateLiteralParameters parameters(constant_properties, feedback,
@@ -1348,8 +1357,8 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(
}
const Operator* JSOperatorBuilder::GetTemplateObject(
- Handle<TemplateObjectDescription> description,
- Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback) {
+ const TemplateObjectDescriptionRef& description,
+ const SharedFunctionInfoRef& shared, FeedbackSource const& feedback) {
GetTemplateObjectParameters parameters(description, shared, feedback);
return zone()->New<Operator1<GetTemplateObjectParameters>>( // --
IrOpcode::kJSGetTemplateObject, // opcode
@@ -1388,7 +1397,7 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
}
const Operator* JSOperatorBuilder::CreateLiteralRegExp(
- Handle<String> constant_pattern, FeedbackSource const& feedback,
+ const StringRef& constant_pattern, FeedbackSource const& feedback,
int literal_flags) {
CreateLiteralParameters parameters(constant_pattern, feedback, -1,
literal_flags);
@@ -1401,7 +1410,7 @@ const Operator* JSOperatorBuilder::CreateLiteralRegExp(
}
const Operator* JSOperatorBuilder::CreateFunctionContext(
- Handle<ScopeInfo> scope_info, int slot_count, ScopeType scope_type) {
+ const ScopeInfoRef& scope_info, int slot_count, ScopeType scope_type) {
CreateFunctionContextParameters parameters(scope_info, slot_count,
scope_type);
return zone()->New<Operator1<CreateFunctionContextParameters>>( // --
@@ -1412,37 +1421,53 @@ const Operator* JSOperatorBuilder::CreateFunctionContext(
}
const Operator* JSOperatorBuilder::CreateCatchContext(
- const Handle<ScopeInfo>& scope_info) {
- return zone()->New<Operator1<Handle<ScopeInfo>>>(
+ const ScopeInfoRef& scope_info) {
+ return zone()->New<Operator1<ScopeInfoTinyRef>>(
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
1, 1, 1, 1, 1, 2, // counts
- scope_info); // parameter
+ ScopeInfoTinyRef{scope_info}); // parameter
}
const Operator* JSOperatorBuilder::CreateWithContext(
- const Handle<ScopeInfo>& scope_info) {
- return zone()->New<Operator1<Handle<ScopeInfo>>>(
+ const ScopeInfoRef& scope_info) {
+ return zone()->New<Operator1<ScopeInfoTinyRef>>(
IrOpcode::kJSCreateWithContext, Operator::kNoProperties, // opcode
"JSCreateWithContext", // name
1, 1, 1, 1, 1, 2, // counts
- scope_info); // parameter
+ ScopeInfoTinyRef{scope_info}); // parameter
}
const Operator* JSOperatorBuilder::CreateBlockContext(
- const Handle<ScopeInfo>& scope_info) {
- return zone()->New<Operator1<Handle<ScopeInfo>>>( // --
+ const ScopeInfoRef& scope_info) {
+ return zone()->New<Operator1<ScopeInfoTinyRef>>( // --
IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
"JSCreateBlockContext", // name
0, 1, 1, 1, 1, 2, // counts
- scope_info); // parameter
+ ScopeInfoTinyRef{scope_info}); // parameter
}
-Handle<ScopeInfo> ScopeInfoOf(const Operator* op) {
+ScopeInfoRef ScopeInfoOf(JSHeapBroker* broker, const Operator* op) {
DCHECK(IrOpcode::kJSCreateBlockContext == op->opcode() ||
IrOpcode::kJSCreateWithContext == op->opcode() ||
IrOpcode::kJSCreateCatchContext == op->opcode());
- return OpParameter<Handle<ScopeInfo>>(op);
+ return OpParameter<ScopeInfoTinyRef>(op).AsRef(broker);
+}
+
+bool operator==(ScopeInfoTinyRef const& lhs, ScopeInfoTinyRef const& rhs) {
+ return lhs.object().location() == rhs.object().location();
+}
+
+bool operator!=(ScopeInfoTinyRef const& lhs, ScopeInfoTinyRef const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(ScopeInfoTinyRef const& ref) {
+ return reinterpret_cast<size_t>(ref.object().location());
+}
+
+std::ostream& operator<<(std::ostream& os, ScopeInfoTinyRef const& ref) {
+ return os << Brief(*ref.object());
}
#undef CACHED_OP_LIST
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 5bc2734023..260e366af8 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -363,28 +363,33 @@ V8_EXPORT_PRIVATE ContextAccess const& ContextAccessOf(Operator const*);
// is used as a parameter by the JSCreateFunctionContext operator.
class CreateFunctionContextParameters final {
public:
- CreateFunctionContextParameters(Handle<ScopeInfo> scope_info, int slot_count,
- ScopeType scope_type);
+ CreateFunctionContextParameters(const ScopeInfoRef& scope_info,
+ int slot_count, ScopeType scope_type)
+ : scope_info_(scope_info),
+ slot_count_(slot_count),
+ scope_type_(scope_type) {}
- Handle<ScopeInfo> scope_info() const { return scope_info_; }
+ ScopeInfoRef scope_info(JSHeapBroker* broker) const {
+ return scope_info_.AsRef(broker);
+ }
int slot_count() const { return slot_count_; }
ScopeType scope_type() const { return scope_type_; }
private:
- Handle<ScopeInfo> scope_info_;
+ const ScopeInfoTinyRef scope_info_;
int const slot_count_;
ScopeType const scope_type_;
-};
-bool operator==(CreateFunctionContextParameters const& lhs,
- CreateFunctionContextParameters const& rhs);
-bool operator!=(CreateFunctionContextParameters const& lhs,
- CreateFunctionContextParameters const& rhs);
+ friend bool operator==(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs);
+ friend bool operator!=(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs);
-size_t hash_value(CreateFunctionContextParameters const& parameters);
+ friend size_t hash_value(CreateFunctionContextParameters const& parameters);
-std::ostream& operator<<(std::ostream& os,
- CreateFunctionContextParameters const& parameters);
+ friend std::ostream& operator<<(
+ std::ostream& os, CreateFunctionContextParameters const& parameters);
+};
CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
Operator const*);
@@ -392,23 +397,24 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
// Defines parameters for JSStoreNamedOwn operator.
class StoreNamedOwnParameters final {
public:
- StoreNamedOwnParameters(Handle<Name> name, FeedbackSource const& feedback)
+ StoreNamedOwnParameters(const NameRef& name, FeedbackSource const& feedback)
: name_(name), feedback_(feedback) {}
- Handle<Name> name() const { return name_; }
+ NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
FeedbackSource const& feedback() const { return feedback_; }
private:
- Handle<Name> const name_;
+ const NameTinyRef name_;
FeedbackSource const feedback_;
-};
-
-bool operator==(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
-bool operator!=(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
-size_t hash_value(StoreNamedOwnParameters const&);
-
-std::ostream& operator<<(std::ostream&, StoreNamedOwnParameters const&);
+ friend bool operator==(StoreNamedOwnParameters const&,
+ StoreNamedOwnParameters const&);
+ friend bool operator!=(StoreNamedOwnParameters const&,
+ StoreNamedOwnParameters const&);
+ friend size_t hash_value(StoreNamedOwnParameters const&);
+ friend std::ostream& operator<<(std::ostream&,
+ StoreNamedOwnParameters const&);
+};
const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
@@ -439,26 +445,26 @@ const FeedbackParameter& FeedbackParameterOf(const Operator* op);
// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
class NamedAccess final {
public:
- NamedAccess(LanguageMode language_mode, Handle<Name> name,
+ NamedAccess(LanguageMode language_mode, const NameRef& name,
FeedbackSource const& feedback)
: name_(name), feedback_(feedback), language_mode_(language_mode) {}
- Handle<Name> name() const { return name_; }
+ NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
LanguageMode language_mode() const { return language_mode_; }
FeedbackSource const& feedback() const { return feedback_; }
private:
- Handle<Name> const name_;
+ const NameTinyRef name_;
FeedbackSource const feedback_;
LanguageMode const language_mode_;
-};
-bool operator==(NamedAccess const&, NamedAccess const&);
-bool operator!=(NamedAccess const&, NamedAccess const&);
+ friend bool operator==(NamedAccess const&, NamedAccess const&);
+ friend bool operator!=(NamedAccess const&, NamedAccess const&);
-size_t hash_value(NamedAccess const&);
+ friend size_t hash_value(NamedAccess const&);
-std::ostream& operator<<(std::ostream&, NamedAccess const&);
+ friend std::ostream& operator<<(std::ostream&, NamedAccess const&);
+};
const NamedAccess& NamedAccessOf(const Operator* op);
@@ -467,27 +473,29 @@ const NamedAccess& NamedAccessOf(const Operator* op);
// used as a parameter by JSLoadGlobal operator.
class LoadGlobalParameters final {
public:
- LoadGlobalParameters(const Handle<Name>& name, const FeedbackSource& feedback,
+ LoadGlobalParameters(const NameRef& name, const FeedbackSource& feedback,
TypeofMode typeof_mode)
: name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {}
- const Handle<Name>& name() const { return name_; }
+ NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
TypeofMode typeof_mode() const { return typeof_mode_; }
const FeedbackSource& feedback() const { return feedback_; }
private:
- const Handle<Name> name_;
+ const NameTinyRef name_;
const FeedbackSource feedback_;
const TypeofMode typeof_mode_;
-};
-bool operator==(LoadGlobalParameters const&, LoadGlobalParameters const&);
-bool operator!=(LoadGlobalParameters const&, LoadGlobalParameters const&);
+ friend bool operator==(LoadGlobalParameters const&,
+ LoadGlobalParameters const&);
+ friend bool operator!=(LoadGlobalParameters const&,
+ LoadGlobalParameters const&);
-size_t hash_value(LoadGlobalParameters const&);
+ friend size_t hash_value(LoadGlobalParameters const&);
-std::ostream& operator<<(std::ostream&, LoadGlobalParameters const&);
+ friend std::ostream& operator<<(std::ostream&, LoadGlobalParameters const&);
+};
const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op);
@@ -497,26 +505,27 @@ const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op);
class StoreGlobalParameters final {
public:
StoreGlobalParameters(LanguageMode language_mode,
- const FeedbackSource& feedback,
- const Handle<Name>& name)
+ const FeedbackSource& feedback, const NameRef& name)
: language_mode_(language_mode), name_(name), feedback_(feedback) {}
LanguageMode language_mode() const { return language_mode_; }
FeedbackSource const& feedback() const { return feedback_; }
- Handle<Name> const& name() const { return name_; }
+ NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
private:
LanguageMode const language_mode_;
- Handle<Name> const name_;
+ const NameTinyRef name_;
FeedbackSource const feedback_;
-};
-bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
-bool operator!=(StoreGlobalParameters const&, StoreGlobalParameters const&);
+ friend bool operator==(StoreGlobalParameters const&,
+ StoreGlobalParameters const&);
+ friend bool operator!=(StoreGlobalParameters const&,
+ StoreGlobalParameters const&);
-size_t hash_value(StoreGlobalParameters const&);
+ friend size_t hash_value(StoreGlobalParameters const&);
-std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&);
+ friend std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&);
+};
const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op);
@@ -555,24 +564,26 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op);
// used as parameter by JSCreateArray operators.
class CreateArrayParameters final {
public:
- explicit CreateArrayParameters(size_t arity, MaybeHandle<AllocationSite> site)
+ CreateArrayParameters(size_t arity, base::Optional<AllocationSiteRef> site)
: arity_(arity), site_(site) {}
size_t arity() const { return arity_; }
- MaybeHandle<AllocationSite> site() const { return site_; }
+ base::Optional<AllocationSiteRef> site(JSHeapBroker* broker) const {
+ return AllocationSiteTinyRef::AsOptionalRef(broker, site_);
+ }
private:
size_t const arity_;
- MaybeHandle<AllocationSite> const site_;
+ base::Optional<AllocationSiteTinyRef> const site_;
+
+ friend bool operator==(CreateArrayParameters const&,
+ CreateArrayParameters const&);
+ friend bool operator!=(CreateArrayParameters const&,
+ CreateArrayParameters const&);
+ friend size_t hash_value(CreateArrayParameters const&);
+ friend std::ostream& operator<<(std::ostream&, CreateArrayParameters const&);
};
-bool operator==(CreateArrayParameters const&, CreateArrayParameters const&);
-bool operator!=(CreateArrayParameters const&, CreateArrayParameters const&);
-
-size_t hash_value(CreateArrayParameters const&);
-
-std::ostream& operator<<(std::ostream&, CreateArrayParameters const&);
-
const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
// Defines shared information for the array iterator that should be created.
@@ -635,25 +646,26 @@ const CreateCollectionIteratorParameters& CreateCollectionIteratorParametersOf(
// This is used as parameter by JSCreateBoundFunction operators.
class CreateBoundFunctionParameters final {
public:
- CreateBoundFunctionParameters(size_t arity, Handle<Map> map)
+ CreateBoundFunctionParameters(size_t arity, const MapRef& map)
: arity_(arity), map_(map) {}
size_t arity() const { return arity_; }
- Handle<Map> map() const { return map_; }
+ MapRef map(JSHeapBroker* broker) const { return map_.AsRef(broker); }
private:
size_t const arity_;
- Handle<Map> const map_;
-};
+ const MapTinyRef map_;
-bool operator==(CreateBoundFunctionParameters const&,
- CreateBoundFunctionParameters const&);
-bool operator!=(CreateBoundFunctionParameters const&,
- CreateBoundFunctionParameters const&);
+ friend bool operator==(CreateBoundFunctionParameters const&,
+ CreateBoundFunctionParameters const&);
+ friend bool operator!=(CreateBoundFunctionParameters const&,
+ CreateBoundFunctionParameters const&);
-size_t hash_value(CreateBoundFunctionParameters const&);
+ friend size_t hash_value(CreateBoundFunctionParameters const&);
-std::ostream& operator<<(std::ostream&, CreateBoundFunctionParameters const&);
+ friend std::ostream& operator<<(std::ostream&,
+ CreateBoundFunctionParameters const&);
+};
const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
const Operator* op);
@@ -662,54 +674,64 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
// used as a parameter by JSCreateClosure operators.
class CreateClosureParameters final {
public:
- CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
- Handle<CodeT> code, AllocationType allocation)
+ CreateClosureParameters(const SharedFunctionInfoRef& shared_info,
+ const CodeTRef& code, AllocationType allocation)
: shared_info_(shared_info), code_(code), allocation_(allocation) {}
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- Handle<CodeT> code() const { return code_; }
+ SharedFunctionInfoRef shared_info(JSHeapBroker* broker) const {
+ return shared_info_.AsRef(broker);
+ }
+ CodeTRef code(JSHeapBroker* broker) const { return code_.AsRef(broker); }
AllocationType allocation() const { return allocation_; }
private:
- Handle<SharedFunctionInfo> const shared_info_;
- Handle<CodeT> const code_;
+ const SharedFunctionInfoTinyRef shared_info_;
+ const CodeTTinyRef code_;
AllocationType const allocation_;
-};
-bool operator==(CreateClosureParameters const&, CreateClosureParameters const&);
-bool operator!=(CreateClosureParameters const&, CreateClosureParameters const&);
+ friend bool operator==(CreateClosureParameters const&,
+ CreateClosureParameters const&);
+ friend bool operator!=(CreateClosureParameters const&,
+ CreateClosureParameters const&);
-size_t hash_value(CreateClosureParameters const&);
+ friend size_t hash_value(CreateClosureParameters const&);
-std::ostream& operator<<(std::ostream&, CreateClosureParameters const&);
+ friend std::ostream& operator<<(std::ostream&,
+ CreateClosureParameters const&);
+};
const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
class GetTemplateObjectParameters final {
public:
- GetTemplateObjectParameters(Handle<TemplateObjectDescription> description,
- Handle<SharedFunctionInfo> shared,
+ GetTemplateObjectParameters(const TemplateObjectDescriptionRef& description,
+ const SharedFunctionInfoRef& shared,
FeedbackSource const& feedback)
: description_(description), shared_(shared), feedback_(feedback) {}
- Handle<TemplateObjectDescription> description() const { return description_; }
- Handle<SharedFunctionInfo> shared() const { return shared_; }
+ TemplateObjectDescriptionRef description(JSHeapBroker* broker) const {
+ return description_.AsRef(broker);
+ }
+ SharedFunctionInfoRef shared(JSHeapBroker* broker) const {
+ return shared_.AsRef(broker);
+ }
FeedbackSource const& feedback() const { return feedback_; }
private:
- Handle<TemplateObjectDescription> const description_;
- Handle<SharedFunctionInfo> const shared_;
+ const TemplateObjectDescriptionTinyRef description_;
+ const SharedFunctionInfoTinyRef shared_;
FeedbackSource const feedback_;
-};
-bool operator==(GetTemplateObjectParameters const&,
- GetTemplateObjectParameters const&);
-bool operator!=(GetTemplateObjectParameters const&,
- GetTemplateObjectParameters const&);
+ friend bool operator==(GetTemplateObjectParameters const&,
+ GetTemplateObjectParameters const&);
+ friend bool operator!=(GetTemplateObjectParameters const&,
+ GetTemplateObjectParameters const&);
-size_t hash_value(GetTemplateObjectParameters const&);
+ friend size_t hash_value(GetTemplateObjectParameters const&);
-std::ostream& operator<<(std::ostream&, GetTemplateObjectParameters const&);
+ friend std::ostream& operator<<(std::ostream&,
+ GetTemplateObjectParameters const&);
+};
const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
const Operator* op);
@@ -719,31 +741,36 @@ const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
// JSCreateLiteralRegExp operators.
class CreateLiteralParameters final {
public:
- CreateLiteralParameters(Handle<HeapObject> constant,
+ CreateLiteralParameters(const HeapObjectRef& constant,
FeedbackSource const& feedback, int length, int flags)
: constant_(constant),
feedback_(feedback),
length_(length),
flags_(flags) {}
- Handle<HeapObject> constant() const { return constant_; }
+ HeapObjectRef constant(JSHeapBroker* broker) const {
+ return constant_.AsRef(broker);
+ }
FeedbackSource const& feedback() const { return feedback_; }
int length() const { return length_; }
int flags() const { return flags_; }
private:
- Handle<HeapObject> const constant_;
+ const HeapObjectTinyRef constant_;
FeedbackSource const feedback_;
int const length_;
int const flags_;
-};
-bool operator==(CreateLiteralParameters const&, CreateLiteralParameters const&);
-bool operator!=(CreateLiteralParameters const&, CreateLiteralParameters const&);
+ friend bool operator==(CreateLiteralParameters const&,
+ CreateLiteralParameters const&);
+ friend bool operator!=(CreateLiteralParameters const&,
+ CreateLiteralParameters const&);
-size_t hash_value(CreateLiteralParameters const&);
+ friend size_t hash_value(CreateLiteralParameters const&);
-std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
+ friend std::ostream& operator<<(std::ostream&,
+ CreateLiteralParameters const&);
+};
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
@@ -857,7 +884,16 @@ int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
int RestoreRegisterIndexOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-Handle<ScopeInfo> ScopeInfoOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+ScopeInfoRef ScopeInfoOf(JSHeapBroker* broker,
+ const Operator* op) V8_WARN_UNUSED_RESULT;
+
+bool operator==(ScopeInfoTinyRef const&, ScopeInfoTinyRef const&);
+bool operator!=(ScopeInfoTinyRef const&, ScopeInfoTinyRef const&);
+
+size_t hash_value(ScopeInfoTinyRef const&);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ ScopeInfoTinyRef const&);
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
@@ -904,13 +940,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
- const Operator* CreateArray(size_t arity, MaybeHandle<AllocationSite> site);
+ const Operator* CreateArray(size_t arity,
+ base::Optional<AllocationSiteRef> site);
const Operator* CreateArrayIterator(IterationKind);
const Operator* CreateAsyncFunctionObject(int register_count);
const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
- const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
+ const Operator* CreateBoundFunction(size_t arity, const MapRef& map);
const Operator* CreateClosure(
- Handle<SharedFunctionInfo> shared_info, Handle<CodeT> code,
+ const SharedFunctionInfoRef& shared_info, const CodeTRef& code,
AllocationType allocation = AllocationType::kYoung);
const Operator* CreateIterResultObject();
const Operator* CreateStringIterator();
@@ -919,25 +956,25 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreatePromise();
const Operator* CreateTypedArray();
const Operator* CreateLiteralArray(
- Handle<ArrayBoilerplateDescription> constant,
+ const ArrayBoilerplateDescriptionRef& constant,
FeedbackSource const& feedback, int literal_flags,
int number_of_elements);
const Operator* CreateEmptyLiteralArray(FeedbackSource const& feedback);
const Operator* CreateArrayFromIterable();
const Operator* CreateEmptyLiteralObject();
const Operator* CreateLiteralObject(
- Handle<ObjectBoilerplateDescription> constant,
+ const ObjectBoilerplateDescriptionRef& constant,
FeedbackSource const& feedback, int literal_flags,
int number_of_properties);
const Operator* CloneObject(FeedbackSource const& feedback,
int literal_flags);
- const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
+ const Operator* CreateLiteralRegExp(const StringRef& constant_pattern,
FeedbackSource const& feedback,
int literal_flags);
const Operator* GetTemplateObject(
- Handle<TemplateObjectDescription> description,
- Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback);
+ const TemplateObjectDescriptionRef& description,
+ const SharedFunctionInfoRef& shared, FeedbackSource const& feedback);
const Operator* CallForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Call(
@@ -978,16 +1015,17 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
FeedbackSource const& feedback = FeedbackSource());
const Operator* LoadProperty(FeedbackSource const& feedback);
- const Operator* LoadNamed(Handle<Name> name, FeedbackSource const& feedback);
- const Operator* LoadNamedFromSuper(Handle<Name> name,
+ const Operator* LoadNamed(const NameRef& name,
+ FeedbackSource const& feedback);
+ const Operator* LoadNamedFromSuper(const NameRef& name,
FeedbackSource const& feedback);
const Operator* StoreProperty(LanguageMode language_mode,
FeedbackSource const& feedback);
- const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
+ const Operator* StoreNamed(LanguageMode language_mode, const NameRef& name,
FeedbackSource const& feedback);
- const Operator* StoreNamedOwn(Handle<Name> name,
+ const Operator* StoreNamedOwn(const NameRef& name,
FeedbackSource const& feedback);
const Operator* StoreDataPropertyInLiteral(const FeedbackSource& feedback);
const Operator* StoreInArrayLiteral(const FeedbackSource& feedback);
@@ -1000,11 +1038,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateGeneratorObject();
- const Operator* LoadGlobal(const Handle<Name>& name,
+ const Operator* LoadGlobal(const NameRef& name,
const FeedbackSource& feedback,
TypeofMode typeof_mode = TypeofMode::kNotInside);
- const Operator* StoreGlobal(LanguageMode language_mode,
- const Handle<Name>& name,
+ const Operator* StoreGlobal(LanguageMode language_mode, const NameRef& name,
const FeedbackSource& feedback);
const Operator* HasContextExtension(size_t depth);
@@ -1051,11 +1088,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* RejectPromise();
const Operator* ResolvePromise();
- const Operator* CreateFunctionContext(Handle<ScopeInfo> scope_info,
+ const Operator* CreateFunctionContext(const ScopeInfoRef& scope_info,
int slot_count, ScopeType scope_type);
- const Operator* CreateCatchContext(const Handle<ScopeInfo>& scope_info);
- const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
- const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
+ const Operator* CreateCatchContext(const ScopeInfoRef& scope_info);
+ const Operator* CreateWithContext(const ScopeInfoRef& scope_info);
+ const Operator* CreateBlockContext(const ScopeInfoRef& scpope_info);
const Operator* ObjectIsArray();
const Operator* ParseInt();
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 833afaf31d..e986ef1baf 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -10,6 +10,7 @@
#include "src/codegen/interface-descriptors-inl.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
@@ -595,7 +596,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
PropertyCellRef string_length_protector =
MakeRef(broker(), factory()->string_length_protector());
- string_length_protector.SerializeAsProtector();
+ string_length_protector.CacheAsProtector();
if (string_length_protector.value().AsSmi() ==
Protectors::kProtectorValid) {
@@ -1172,7 +1173,7 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
JSLoadNamedNode n(node);
Node* receiver = n.object();
Type receiver_type = NodeProperties::GetType(receiver);
- NameRef name = MakeRef(broker(), NamedAccessOf(node->op()).name());
+ NameRef name = NamedAccessOf(node->op()).name(broker());
NameRef length_str = MakeRef(broker(), factory()->length_string());
// Optimize "length" property of strings.
if (name.equals(length_str) && receiver_type.Is(Type::String())) {
@@ -1622,11 +1623,6 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
// Only optimize [[Construct]] here if {function} is a Constructor.
if (!function.map().is_constructor()) return NoChange();
- if (!function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for function " << function);
- return NoChange();
- }
-
// Patch {node} to an indirect call via the {function}s construct stub.
bool use_builtin_construct_stub = function.shared().construct_as_builtin();
CodeRef code = MakeRef(
@@ -1704,22 +1700,14 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
if (target_type.IsHeapConstant() &&
target_type.AsHeapConstant()->Ref().IsJSFunction()) {
function = target_type.AsHeapConstant()->Ref().AsJSFunction();
-
- if (!function->serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for function " << *function);
- return NoChange();
- }
shared = function->shared();
} else if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& ccp =
JSCreateClosureNode{target}.Parameters();
- shared = MakeRef(broker(), ccp.shared_info());
+ shared = ccp.shared_info(broker());
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
- base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
- if (feedback_vector.has_value()) {
- shared = feedback_vector->shared_function_info();
- }
+ shared = cell.shared_function_info();
}
if (shared.has_value()) {
@@ -2086,7 +2074,7 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadMessage, node->opcode());
ExternalReference const ref =
- ExternalReference::address_of_pending_message_obj(isolate());
+ ExternalReference::address_of_pending_message(isolate());
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
NodeProperties::ChangeOp(node, simplified()->LoadMessage());
return Changed(node);
@@ -2095,7 +2083,7 @@ Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreMessage, node->opcode());
ExternalReference const ref =
- ExternalReference::address_of_pending_message_obj(isolate());
+ ExternalReference::address_of_pending_message(isolate());
Node* value = NodeProperties::GetValueInput(node, 0);
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
node->ReplaceInput(1, value);
@@ -2356,16 +2344,7 @@ Reduction JSTypedLowering::ReduceJSResolvePromise(Node* node) {
}
Reduction JSTypedLowering::Reduce(Node* node) {
- const IrOpcode::Value opcode = node->opcode();
- if (broker()->generate_full_feedback_collection() &&
- IrOpcode::IsFeedbackCollectingOpcode(opcode)) {
- // In NCI code, it is not valid to reduce feedback-collecting JS opcodes
- // into non-feedback-collecting lower-level opcodes; missed feedback would
- // result in soft deopts.
- return NoChange();
- }
-
- switch (opcode) {
+ switch (node->opcode()) {
case IrOpcode::kJSEqual:
return ReduceJSEqual(node);
case IrOpcode::kJSStrictEqual:
@@ -2469,18 +2448,18 @@ Reduction JSTypedLowering::Reduce(Node* node) {
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
-
Graph* JSTypedLowering::graph() const { return jsgraph()->graph(); }
+CompilationDependencies* JSTypedLowering::dependencies() const {
+ return broker()->dependencies();
+}
Isolate* JSTypedLowering::isolate() const { return jsgraph()->isolate(); }
-
JSOperatorBuilder* JSTypedLowering::javascript() const {
return jsgraph()->javascript();
}
-
CommonOperatorBuilder* JSTypedLowering::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 9fa6e01a93..4c0031df3d 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -20,6 +20,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+class CompilationDependencies;
class JSGraph;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
@@ -93,6 +94,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
JSHeapBroker* broker() const { return broker_; }
+ CompilationDependencies* dependencies() const;
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index ee56a665db..e184534ed7 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -543,8 +543,9 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
return loop_tree;
}
+#if V8_ENABLE_WEBASSEMBLY
// static
-ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader(
+ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
Node* loop_header, Zone* zone, size_t max_size) {
auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone);
std::vector<Node*> queue;
@@ -580,6 +581,12 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader(
loop_header);
// All uses are outside the loop, do nothing.
break;
+ case IrOpcode::kCall:
+ case IrOpcode::kTailCall:
+ case IrOpcode::kJSWasmCall:
+ case IrOpcode::kJSCall:
+ // Call nodes are considered to have unbounded size, i.e. >max_size.
+ return nullptr;
default:
for (Node* use : node->uses()) {
if (visited->count(use) == 0) queue.push_back(use);
@@ -614,6 +621,7 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader(
return visited;
}
+#endif // V8_ENABLE_WEBASSEMBLY
bool LoopFinder::HasMarkedExits(LoopTree* loop_tree,
const LoopTree::Loop* loop) {
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 49db12fef3..e928e5a779 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -179,16 +179,19 @@ class V8_EXPORT_PRIVATE LoopFinder {
static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop);
- // Find all nodes of a loop given its header node. Will exit early once the
- // current loop size exceed {max_size}. This is a very restricted version of
- // BuildLoopTree.
- // Assumptions:
+#if V8_ENABLE_WEBASSEMBLY
+ // Find all nodes of a loop given headed by {loop_header}. Returns {nullptr}
+ // if the loop size in Nodes exceeds {max_size}. In that context, function
+ // calls are considered to have unbounded size, so if the loop contains a
+ // function call, {nullptr} is always returned.
+ // This is a very restricted version of BuildLoopTree and makes the following
+ // assumptions:
// 1) All loop exits of the loop are marked with LoopExit, LoopExitEffect,
// and LoopExitValue nodes.
// 2) There are no nested loops within this loop.
- static ZoneUnorderedSet<Node*>* FindUnnestedLoopFromHeader(Node* loop_header,
- Zone* zone,
- size_t max_size);
+ static ZoneUnorderedSet<Node*>* FindSmallUnnestedLoopFromHeader(
+ Node* loop_header, Zone* zone, size_t max_size);
+#endif
};
// Copies a range of nodes any number of times.
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 3d61d70b02..411c6d4cb3 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -147,8 +147,8 @@ size_t hash_value(StoreLaneParameters params) {
}
std::ostream& operator<<(std::ostream& os, StoreLaneParameters params) {
- return os << "(" << params.kind << " " << params.rep << " " << params.laneidx
- << ")";
+ return os << "(" << params.kind << " " << params.rep << " "
+ << static_cast<unsigned int>(params.laneidx) << ")";
}
StoreLaneParameters const& StoreLaneParametersOf(Operator const* op) {
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index b6c96163c6..f6f87cd62e 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -9,15 +9,14 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects/map-inl.h"
-#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
namespace compiler {
-MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect)
- : broker_(broker), object_(object) {
- ZoneHandleSet<Map> maps;
+MapInference::MapInference(JSHeapBroker* broker, Node* object, Effect effect)
+ : broker_(broker), object_(object), maps_(broker->zone()) {
+ ZoneRefUnorderedSet<MapRef> maps(broker->zone());
auto result =
NodeProperties::InferMapsUnsafe(broker_, object_, effect, &maps);
maps_.insert(maps_.end(), maps.begin(), maps.end());
@@ -67,9 +66,8 @@ bool MapInference::AllOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
CHECK(HaveMaps());
- auto instance_type = [this, f](Handle<Map> map) {
- MapRef map_ref = MakeRef(broker_, map);
- return f(map_ref.instance_type());
+ auto instance_type = [f](const MapRef& map) {
+ return f(map.instance_type());
};
return std::all_of(maps_.begin(), maps_.end(), instance_type);
}
@@ -78,22 +76,21 @@ bool MapInference::AnyOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
CHECK(HaveMaps());
- auto instance_type = [this, f](Handle<Map> map) {
- MapRef map_ref = MakeRef(broker_, map);
- return f(map_ref.instance_type());
+ auto instance_type = [f](const MapRef& map) {
+ return f(map.instance_type());
};
return std::any_of(maps_.begin(), maps_.end(), instance_type);
}
-MapHandles const& MapInference::GetMaps() {
+ZoneVector<MapRef> const& MapInference::GetMaps() {
SetNeedGuardIfUnreliable();
return maps_;
}
-bool MapInference::Is(Handle<Map> expected_map) {
+bool MapInference::Is(const MapRef& expected_map) {
if (!HaveMaps()) return false;
- const MapHandles& maps = GetMaps();
+ const ZoneVector<MapRef>& maps = GetMaps();
if (maps.size() != 1) return false;
return maps[0].equals(expected_map);
}
@@ -104,7 +101,9 @@ void MapInference::InsertMapChecks(JSGraph* jsgraph, Effect* effect,
CHECK(HaveMaps());
CHECK(feedback.IsValid());
ZoneHandleSet<Map> maps;
- for (Handle<Map> map : maps_) maps.insert(map, jsgraph->graph()->zone());
+ for (const MapRef& map : maps_) {
+ maps.insert(map.object(), jsgraph->graph()->zone());
+ }
*effect = jsgraph->graph()->NewNode(
jsgraph->simplified()->CheckMaps(CheckMapsFlag::kNone, maps, feedback),
object_, *effect, control);
@@ -133,14 +132,11 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies,
const FeedbackSource& feedback) {
if (Safe()) return true;
- auto is_stable = [this](Handle<Map> map) {
- MapRef map_ref = MakeRef(broker_, map);
- return map_ref.is_stable();
- };
+ auto is_stable = [](const MapRef& map) { return map.is_stable(); };
if (dependencies != nullptr &&
std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) {
- for (Handle<Map> map : maps_) {
- dependencies->DependOnStableMap(MakeRef(broker_, map));
+ for (const MapRef& map : maps_) {
+ dependencies->DependOnStableMap(map);
}
SetGuarded();
return true;
diff --git a/deps/v8/src/compiler/map-inference.h b/deps/v8/src/compiler/map-inference.h
index a1e2efbc22..e1392b6805 100644
--- a/deps/v8/src/compiler/map-inference.h
+++ b/deps/v8/src/compiler/map-inference.h
@@ -34,7 +34,7 @@ class Node;
// reliable).
class MapInference {
public:
- MapInference(JSHeapBroker* broker, Node* object, Node* effect);
+ MapInference(JSHeapBroker* broker, Node* object, Effect effect);
// The destructor checks that the information has been made reliable (if
// necessary) and force-crashes if not.
@@ -52,10 +52,10 @@ class MapInference {
// These queries require a guard. (Even instance types are generally not
// reliable because of how the representation of a string can change.)
- V8_WARN_UNUSED_RESULT MapHandles const& GetMaps();
+ V8_WARN_UNUSED_RESULT ZoneVector<MapRef> const& GetMaps();
V8_WARN_UNUSED_RESULT bool AllOfInstanceTypes(
std::function<bool(InstanceType)> f);
- V8_WARN_UNUSED_RESULT bool Is(Handle<Map> expected_map);
+ V8_WARN_UNUSED_RESULT bool Is(const MapRef& expected_map);
// These methods provide a guard.
//
@@ -83,7 +83,7 @@ class MapInference {
JSHeapBroker* const broker_;
Node* const object_;
- MapHandles maps_;
+ ZoneVector<MapRef> maps_;
enum {
kReliableOrGuarded,
kUnreliableDontNeedGuard,
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index ac113ddd70..9673a51844 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -15,6 +15,10 @@
#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-objects.h"
+#endif
namespace v8 {
namespace internal {
namespace compiler {
@@ -100,6 +104,32 @@ Reduction MemoryLowering::Reduce(Node* node) {
}
}
+void MemoryLowering::EnsureAllocateOperator() {
+ if (allocate_operator_.is_set()) return;
+
+ auto descriptor = AllocateDescriptor{};
+ StubCallMode mode = isolate_ != nullptr ? StubCallMode::kCallCodeObject
+ : StubCallMode::kCallBuiltinPointer;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph_zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow, mode);
+ allocate_operator_.set(common()->Call(call_descriptor));
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+Node* MemoryLowering::GetWasmInstanceNode() {
+ if (wasm_instance_node_.is_set()) return wasm_instance_node_.get();
+ for (Node* use : graph()->start()->uses()) {
+ if (use->opcode() == IrOpcode::kParameter &&
+ ParameterIndexOf(use->op()) == wasm::kWasmInstanceParameterIndex) {
+ wasm_instance_node_.set(use);
+ return use;
+ }
+ }
+ UNREACHABLE(); // The instance node must have been created before.
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
#define __ gasm()->
Reduction MemoryLowering::ReduceAllocateRaw(
@@ -123,29 +153,82 @@ Reduction MemoryLowering::ReduceAllocateRaw(
gasm()->InitializeEffectControl(effect, control);
Node* allocate_builtin;
- if (allocation_type == AllocationType::kYoung) {
- if (allow_large_objects == AllowLargeObjects::kTrue) {
- allocate_builtin = __ AllocateInYoungGenerationStubConstant();
+ if (isolate_ != nullptr) {
+ if (allocation_type == AllocationType::kYoung) {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInYoungGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
+ }
} else {
- allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInOldGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
+ }
}
} else {
- if (allow_large_objects == AllowLargeObjects::kTrue) {
- allocate_builtin = __ AllocateInOldGenerationStubConstant();
+ // This lowering is used by Wasm, where we compile isolate-independent
+ // code. Builtin calls simply encode the target builtin ID, which will
+ // be patched to the builtin's address later.
+#if V8_ENABLE_WEBASSEMBLY
+ Builtin builtin;
+ if (allocation_type == AllocationType::kYoung) {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ builtin = Builtin::kAllocateInYoungGeneration;
+ } else {
+ builtin = Builtin::kAllocateRegularInYoungGeneration;
+ }
} else {
- allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ builtin = Builtin::kAllocateInOldGeneration;
+ } else {
+ builtin = Builtin::kAllocateRegularInOldGeneration;
+ }
}
+ static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
+ allocate_builtin =
+ graph()->NewNode(common()->NumberConstant(static_cast<int>(builtin)));
+#else
+ UNREACHABLE();
+#endif
}
// Determine the top/limit addresses.
- Node* top_address = __ ExternalConstant(
- allocation_type == AllocationType::kYoung
- ? ExternalReference::new_space_allocation_top_address(isolate())
- : ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = __ ExternalConstant(
- allocation_type == AllocationType::kYoung
- ? ExternalReference::new_space_allocation_limit_address(isolate())
- : ExternalReference::old_space_allocation_limit_address(isolate()));
+ Node* top_address;
+ Node* limit_address;
+ if (isolate_ != nullptr) {
+ top_address = __ ExternalConstant(
+ allocation_type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_top_address(isolate())
+ : ExternalReference::old_space_allocation_top_address(isolate()));
+ limit_address = __ ExternalConstant(
+ allocation_type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_limit_address(isolate())
+ : ExternalReference::old_space_allocation_limit_address(isolate()));
+ } else {
+ // Wasm mode: producing isolate-independent code, loading the isolate
+ // address at runtime.
+#if V8_ENABLE_WEBASSEMBLY
+ Node* instance_node = GetWasmInstanceNode();
+ int top_address_offset =
+ allocation_type == AllocationType::kYoung
+ ? WasmInstanceObject::kNewAllocationTopAddressOffset
+ : WasmInstanceObject::kOldAllocationTopAddressOffset;
+ int limit_address_offset =
+ allocation_type == AllocationType::kYoung
+ ? WasmInstanceObject::kNewAllocationLimitAddressOffset
+ : WasmInstanceObject::kOldAllocationLimitAddressOffset;
+ top_address =
+ __ Load(MachineType::Pointer(), instance_node,
+ __ IntPtrConstant(top_address_offset - kHeapObjectTag));
+ limit_address =
+ __ Load(MachineType::Pointer(), instance_node,
+ __ IntPtrConstant(limit_address_offset - kHeapObjectTag));
+#else
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
@@ -216,13 +299,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
__ Bind(&call_runtime);
{
- if (!allocate_operator_.is_set()) {
- auto descriptor = AllocateDescriptor{};
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph_zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kCanUseRoots, Operator::kNoThrow);
- allocate_operator_.set(common()->Call(call_descriptor));
- }
+ EnsureAllocateOperator();
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
@@ -277,13 +354,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
- if (!allocate_operator_.is_set()) {
- auto descriptor = AllocateDescriptor{};
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph_zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kCanUseRoots, Operator::kNoThrow);
- allocate_operator_.set(common()->Call(call_descriptor));
- }
+ EnsureAllocateOperator();
__ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
index 1ebbf40bc8..1fbe18abff 100644
--- a/deps/v8/src/compiler/memory-lowering.h
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -113,6 +113,8 @@ class MemoryLowering final : public Reducer {
Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
+ void EnsureAllocateOperator();
+ Node* GetWasmInstanceNode();
Graph* graph() const { return graph_; }
Isolate* isolate() const { return isolate_; }
@@ -123,6 +125,7 @@ class MemoryLowering final : public Reducer {
JSGraphAssembler* gasm() const { return graph_assembler_; }
SetOncePointer<const Operator> allocate_operator_;
+ SetOncePointer<Node> wasm_instance_node_;
Isolate* isolate_;
Zone* zone_;
Graph* graph_;
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
index 9e577eb183..c29f4dfe98 100644
--- a/deps/v8/src/compiler/node-aux-data.h
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -16,21 +16,26 @@ namespace compiler {
class Node;
template <class T>
-T DefaultConstruct() {
+T DefaultConstruct(Zone* zone) {
return T();
}
-template <class T, T def() = DefaultConstruct<T>>
+template <class T>
+T ZoneConstruct(Zone* zone) {
+ return T(zone);
+}
+
+template <class T, T def(Zone*) = DefaultConstruct<T>>
class NodeAuxData {
public:
- explicit NodeAuxData(Zone* zone) : aux_data_(zone) {}
+ explicit NodeAuxData(Zone* zone) : zone_(zone), aux_data_(zone) {}
explicit NodeAuxData(size_t initial_size, Zone* zone)
- : aux_data_(initial_size, zone) {}
+ : zone_(zone), aux_data_(initial_size, def(zone), zone) {}
// Update entry. Returns true iff entry was changed.
bool Set(Node* node, T const& data) {
size_t const id = node->id();
- if (id >= aux_data_.size()) aux_data_.resize(id + 1, def());
+ if (id >= aux_data_.size()) aux_data_.resize(id + 1, def(zone_));
if (aux_data_[id] != data) {
aux_data_[id] = data;
return true;
@@ -40,7 +45,7 @@ class NodeAuxData {
T Get(Node* node) const {
size_t const id = node->id();
- return (id < aux_data_.size()) ? aux_data_[id] : def();
+ return (id < aux_data_.size()) ? aux_data_[id] : def(zone_);
}
class const_iterator;
@@ -50,10 +55,11 @@ class NodeAuxData {
const_iterator end() const;
private:
+ Zone* zone_;
ZoneVector<T> aux_data_;
};
-template <class T, T def()>
+template <class T, T def(Zone*)>
class NodeAuxData<T, def>::const_iterator {
public:
using iterator_category = std::forward_iterator_tag;
@@ -87,13 +93,13 @@ class NodeAuxData<T, def>::const_iterator {
size_t current_;
};
-template <class T, T def()>
+template <class T, T def(Zone*)>
typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::begin()
const {
return typename NodeAuxData<T, def>::const_iterator(&aux_data_, 0);
}
-template <class T, T def()>
+template <class T, T def(Zone*)>
typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::end() const {
return typename NodeAuxData<T, def>::const_iterator(&aux_data_,
aux_data_.size());
diff --git a/deps/v8/src/compiler/node-origin-table.h b/deps/v8/src/compiler/node-origin-table.h
index c4b2ec3cf1..899c62d411 100644
--- a/deps/v8/src/compiler/node-origin-table.h
+++ b/deps/v8/src/compiler/node-origin-table.h
@@ -136,7 +136,10 @@ class V8_EXPORT_PRIVATE NodeOriginTable final
NodeOrigin current_origin_;
const char* current_phase_name_;
- NodeAuxData<NodeOrigin, NodeOrigin::Unknown> table_;
+ static NodeOrigin UnknownNodeOrigin(Zone* zone) {
+ return NodeOrigin::Unknown();
+ }
+ NodeAuxData<NodeOrigin, UnknownNodeOrigin> table_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 26fbed0abb..8cc6bfee63 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -332,12 +332,9 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
mnewtarget.Ref(broker).IsJSFunction()) {
ObjectRef target = mtarget.Ref(broker);
JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction();
- if (newtarget.map().has_prototype_slot() && newtarget.has_initial_map()) {
- if (!newtarget.serialized()) {
- TRACE_BROKER_MISSING(broker, "initial map on " << newtarget);
- return base::nullopt;
- }
- MapRef initial_map = newtarget.initial_map();
+ if (newtarget.map().has_prototype_slot() &&
+ newtarget.has_initial_map(broker->dependencies())) {
+ MapRef initial_map = newtarget.initial_map(broker->dependencies());
if (initial_map.GetConstructor().equals(target)) {
DCHECK(target.AsJSFunction().map().is_constructor());
DCHECK(newtarget.map().is_constructor());
@@ -348,10 +345,32 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
return base::nullopt;
}
+namespace {
+
+// TODO(jgruber): Remove the intermediate ZoneHandleSet and then this function.
+ZoneRefUnorderedSet<MapRef> ToRefSet(JSHeapBroker* broker,
+ const ZoneHandleSet<Map>& handles) {
+ ZoneRefUnorderedSet<MapRef> refs =
+ ZoneRefUnorderedSet<MapRef>(broker->zone());
+ for (Handle<Map> handle : handles) {
+ refs.insert(MakeRefAssumeMemoryFence(broker, *handle));
+ }
+ return refs;
+}
+
+ZoneRefUnorderedSet<MapRef> RefSetOf(JSHeapBroker* broker, const MapRef& ref) {
+ ZoneRefUnorderedSet<MapRef> refs =
+ ZoneRefUnorderedSet<MapRef>(broker->zone());
+ refs.insert(ref);
+ return refs;
+}
+
+} // namespace
+
// static
NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
- JSHeapBroker* broker, Node* receiver, Node* effect,
- ZoneHandleSet<Map>* maps_return) {
+ JSHeapBroker* broker, Node* receiver, Effect effect,
+ ZoneRefUnorderedSet<MapRef>* maps_out) {
HeapObjectMatcher m(receiver);
if (m.HasResolvedValue()) {
HeapObjectRef receiver = m.Ref(broker);
@@ -367,7 +386,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
if (receiver.map().is_stable()) {
// The {receiver_map} is only reliable when we install a stability
// code dependency.
- *maps_return = ZoneHandleSet<Map>(receiver.map().object());
+ *maps_out = RefSetOf(broker, receiver.map());
return kUnreliableMaps;
}
}
@@ -378,7 +397,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
case IrOpcode::kMapGuard: {
Node* const object = GetValueInput(effect, 0);
if (IsSame(receiver, object)) {
- *maps_return = MapGuardMapsOf(effect->op());
+ *maps_out = ToRefSet(broker, MapGuardMapsOf(effect->op()));
return result;
}
break;
@@ -386,7 +405,8 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
case IrOpcode::kCheckMaps: {
Node* const object = GetValueInput(effect, 0);
if (IsSame(receiver, object)) {
- *maps_return = CheckMapsParametersOf(effect->op()).maps();
+ *maps_out =
+ ToRefSet(broker, CheckMapsParametersOf(effect->op()).maps());
return result;
}
break;
@@ -395,7 +415,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
if (IsSame(receiver, effect)) {
base::Optional<MapRef> initial_map = GetJSCreateMap(broker, receiver);
if (initial_map.has_value()) {
- *maps_return = ZoneHandleSet<Map>(initial_map->object());
+ *maps_out = RefSetOf(broker, initial_map.value());
return result;
}
// We reached the allocation of the {receiver}.
@@ -406,10 +426,10 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
}
case IrOpcode::kJSCreatePromise: {
if (IsSame(receiver, effect)) {
- *maps_return = ZoneHandleSet<Map>(broker->target_native_context()
- .promise_function()
- .initial_map()
- .object());
+ *maps_out = RefSetOf(
+ broker,
+ broker->target_native_context().promise_function().initial_map(
+ broker->dependencies()));
return result;
}
break;
@@ -424,7 +444,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
Node* const value = GetValueInput(effect, 1);
HeapObjectMatcher m(value);
if (m.HasResolvedValue()) {
- *maps_return = ZoneHandleSet<Map>(m.Ref(broker).AsMap().object());
+ *maps_out = RefSetOf(broker, m.Ref(broker).AsMap());
return result;
}
}
@@ -503,7 +523,7 @@ bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
// static
bool NodeProperties::CanBePrimitive(JSHeapBroker* broker, Node* receiver,
- Node* effect) {
+ Effect effect) {
switch (receiver->opcode()) {
#define CASE(Opcode) case IrOpcode::k##Opcode:
JS_CONSTRUCT_OP_LIST(CASE)
@@ -528,7 +548,7 @@ bool NodeProperties::CanBePrimitive(JSHeapBroker* broker, Node* receiver,
// static
bool NodeProperties::CanBeNullOrUndefined(JSHeapBroker* broker, Node* receiver,
- Node* effect) {
+ Effect effect) {
if (CanBePrimitive(broker, receiver, effect)) {
switch (receiver->opcode()) {
case IrOpcode::kCheckInternalizedString:
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 50f3a17136..2d4c16370b 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_NODE_PROPERTIES_H_
#include "src/common/globals.h"
+#include "src/compiler/heap-refs.h"
#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/types.h"
@@ -219,9 +220,9 @@ class V8_EXPORT_PRIVATE NodeProperties {
kUnreliableMaps // Maps might have changed (side-effect).
};
// DO NOT USE InferMapsUnsafe IN NEW CODE. Use MapInference instead.
- static InferMapsResult InferMapsUnsafe(JSHeapBroker* broker, Node* object,
- Node* effect,
- ZoneHandleSet<Map>* maps);
+ static InferMapsResult InferMapsUnsafe(JSHeapBroker* broker, Node* receiver,
+ Effect effect,
+ ZoneRefUnorderedSet<MapRef>* maps_out);
// Return the initial map of the new-target if the allocation can be inlined.
static base::Optional<MapRef> GetJSCreateMap(JSHeapBroker* broker,
@@ -236,12 +237,12 @@ class V8_EXPORT_PRIVATE NodeProperties {
// definitely a JavaScript object); might walk up the {effect} chain to
// find map checks on {receiver}.
static bool CanBePrimitive(JSHeapBroker* broker, Node* receiver,
- Node* effect);
+ Effect effect);
// Returns true if the {receiver} can be null or undefined. Might walk
// up the {effect} chain to find map checks for {receiver}.
static bool CanBeNullOrUndefined(JSHeapBroker* broker, Node* receiver,
- Node* effect);
+ Effect effect);
// ---------------------------------------------------------------------------
// Context.
diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h
index 84e905b812..1373ff5f25 100644
--- a/deps/v8/src/compiler/persistent-map.h
+++ b/deps/v8/src/compiler/persistent-map.h
@@ -387,9 +387,11 @@ void PersistentMap<Key, Value, Hasher>::Set(Key key, Value value) {
if (old->more) {
*more = *old->more;
} else {
- (*more)[old->key_value.key()] = old->key_value.value();
+ more->erase(old->key_value.key());
+ more->emplace(old->key_value.key(), old->key_value.value());
}
- (*more)[key] = value;
+ more->erase(key);
+ more->emplace(key, value);
}
size_t size = sizeof(FocusedTree) +
std::max(0, length - 1) * sizeof(const FocusedTree*);
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 676f338cf4..e802cd7268 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -69,7 +69,6 @@
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
-#include "src/compiler/serializer-for-background-compilation.h"
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/simplified-operator.h"
@@ -147,9 +146,6 @@ class PipelineData {
may_have_unverifiable_graph_(false),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
- roots_relative_addressing_enabled_(
- !isolate->serializer_enabled() &&
- !isolate->IsGeneratingEmbeddedBuiltins()),
graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
graph_zone_(graph_zone_scope_.zone()),
instruction_zone_scope_(zone_stats_, kInstructionZoneName),
@@ -551,7 +547,7 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- info()->GetPoisoningMitigationLevel(), assembler_options_,
+ info()->GetPoisoningMitigationLevel(), assembler_options(),
info_->builtin(), max_unoptimized_frame_height(),
max_pushed_argument_count(),
FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
@@ -571,10 +567,6 @@ class PipelineData {
const char* debug_name() const { return debug_name_.get(); }
- bool roots_relative_addressing_enabled() {
- return roots_relative_addressing_enabled_;
- }
-
const ProfileDataFromFile* profile_data() const { return profile_data_; }
void set_profile_data(const ProfileDataFromFile* profile_data) {
profile_data_ = profile_data;
@@ -615,7 +607,6 @@ class PipelineData {
CodeGenerator* code_generator_ = nullptr;
Typer* typer_ = nullptr;
Typer::Flags typer_flags_ = Typer::kNoFlags;
- bool roots_relative_addressing_enabled_ = false;
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
@@ -683,8 +674,8 @@ class PipelineImpl final {
template <typename Phase, typename... Args>
void Run(Args&&... args);
- // Step A.1. Serialize the data needed for the compilation front-end.
- void Serialize();
+ // Step A.1. Initialize the heap broker.
+ void InitializeHeapBroker();
// Step A.2. Run the graph creation and initial optimization passes.
bool CreateGraph();
@@ -1212,10 +1203,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
- // Serialize() and CreateGraph() may already use IsPendingAllocation.
+ // InitializeHeapBroker() and CreateGraph() may already use
+ // IsPendingAllocation.
isolate->heap()->PublishPendingAllocations();
- pipeline_.Serialize();
+ pipeline_.InitializeHeapBroker();
if (!data_.broker()->is_concurrent_inlining()) {
if (!pipeline_.CreateGraph()) {
@@ -1354,10 +1346,10 @@ struct GraphBuilderPhase {
CallFrequency frequency(1.0f);
BuildGraphFromBytecode(
data->broker(), temp_zone, closure.shared(),
- closure.raw_feedback_cell(), data->info()->osr_offset(),
- data->jsgraph(), frequency, data->source_positions(),
- SourcePosition::kNotInlined, data->info()->code_kind(), flags,
- &data->info()->tick_counter(),
+ closure.raw_feedback_cell(data->dependencies()),
+ data->info()->osr_offset(), data->jsgraph(), frequency,
+ data->source_positions(), SourcePosition::kNotInlined,
+ data->info()->code_kind(), flags, &data->info()->tick_counter(),
ObserveNodeInfo{data->observe_node_manager(),
data->info()->node_observer()});
}
@@ -1385,8 +1377,7 @@ struct InliningPhase {
call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls;
}
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
- temp_zone, call_reducer_flags,
- data->dependencies());
+ temp_zone, call_reducer_flags);
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(), data->broker(),
data->specialization_context(),
@@ -1548,42 +1539,6 @@ struct CopyMetadataForConcurrentCompilePhase {
}
};
-struct SerializationPhase {
- DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Serialization)
-
- void Run(PipelineData* data, Zone* temp_zone) {
- SerializerForBackgroundCompilationFlags flags;
- if (data->info()->bailout_on_uninitialized()) {
- flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized;
- }
- if (data->info()->source_positions()) {
- flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
- }
- if (data->info()->analyze_environment_liveness()) {
- flags |=
- SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
- }
- if (data->info()->inlining()) {
- flags |= SerializerForBackgroundCompilationFlag::kEnableTurboInlining;
- }
- RunSerializerForBackgroundCompilation(
- data->zone_stats(), data->broker(), data->dependencies(),
- data->info()->closure(), flags, data->info()->osr_offset());
- if (data->specialization_context().IsJust()) {
- MakeRef(data->broker(),
- data->specialization_context().FromJust().context);
- }
- if (FLAG_turbo_concurrent_get_property_access_info) {
- data->broker()->ClearCachedPropertyAccessInfos();
- data->dependencies()->ClearForConcurrentGetPropertyAccessInfo();
- }
- if (FLAG_stress_concurrent_inlining) {
- // Force re-serialization from the background thread.
- data->broker()->ClearReconstructibleData();
- }
- }
-};
-
struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
@@ -1717,11 +1672,12 @@ struct WasmLoopUnrollingPhase {
std::vector<compiler::WasmLoopInfo>* loop_infos) {
for (WasmLoopInfo& loop_info : *loop_infos) {
if (loop_info.is_innermost) {
- ZoneUnorderedSet<Node*>* loop = LoopFinder::FindUnnestedLoopFromHeader(
- loop_info.header, temp_zone,
- // Only discover the loop until its size is the maximum unrolled
- // size for its depth.
- maximum_unrollable_size(loop_info.nesting_depth));
+ ZoneUnorderedSet<Node*>* loop =
+ LoopFinder::FindSmallUnnestedLoopFromHeader(
+ loop_info.header, temp_zone,
+ // Only discover the loop until its size is the maximum unrolled
+ // size for its depth.
+ maximum_unrollable_size(loop_info.nesting_depth));
UnrollLoop(loop_info.header, loop, loop_info.nesting_depth,
data->graph(), data->common(), temp_zone,
data->source_positions(), data->node_origins());
@@ -2246,7 +2202,7 @@ struct InstructionSelectionPhase {
FLAG_turbo_instruction_scheduling
? InstructionSelector::kEnableScheduling
: InstructionSelector::kDisableScheduling,
- data->roots_relative_addressing_enabled()
+ data->assembler_options().enable_root_relative_access
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
data->info()->GetPoisoningMitigationLevel(),
@@ -2666,8 +2622,8 @@ void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
}
}
-void PipelineImpl::Serialize() {
- PipelineData* data = this->data_;
+void PipelineImpl::InitializeHeapBroker() {
+ PipelineData* data = data_;
data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
@@ -2691,7 +2647,6 @@ void PipelineImpl::Serialize() {
data->broker()->SetTargetNativeContextRef(data->native_context());
if (data->broker()->is_concurrent_inlining()) {
Run<HeapBrokerInitializationPhase>();
- Run<SerializationPhase>();
data->broker()->StopSerializing();
}
data->EndPhaseKind();
@@ -3362,7 +3317,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
CompilationHandleScope compilation_scope(isolate, info);
CanonicalHandleScope canonical(isolate, info);
info->ReopenHandlesInNewHandleScope(isolate);
- pipeline.Serialize();
+ pipeline.InitializeHeapBroker();
// Emulating the proper pipeline, we call CreateGraph on different places
// (i.e before or after creating a LocalIsolateScope) depending on
// is_concurrent_inlining.
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index 78163a23a7..832fc441da 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -153,7 +153,7 @@ class ElementAccessFeedback : public ProcessedFeedback {
// [e0, e1] [e0, e1]
//
ElementAccessFeedback const& Refine(
- ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const;
+ JSHeapBroker* broker, ZoneVector<MapRef> const& inferred_maps) const;
private:
KeyedAccessMode const keyed_mode_;
@@ -162,15 +162,15 @@ class ElementAccessFeedback : public ProcessedFeedback {
class NamedAccessFeedback : public ProcessedFeedback {
public:
- NamedAccessFeedback(NameRef const& name, ZoneVector<Handle<Map>> const& maps,
+ NamedAccessFeedback(NameRef const& name, ZoneVector<MapRef> const& maps,
FeedbackSlotKind slot_kind);
NameRef const& name() const { return name_; }
- ZoneVector<Handle<Map>> const& maps() const { return maps_; }
+ ZoneVector<MapRef> const& maps() const { return maps_; }
private:
NameRef const name_;
- ZoneVector<Handle<Map>> const maps_;
+ ZoneVector<MapRef> const maps_;
};
class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
@@ -178,19 +178,19 @@ class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
MinimorphicLoadPropertyAccessFeedback(NameRef const& name,
FeedbackSlotKind slot_kind,
Handle<Object> handler,
- ZoneVector<Handle<Map>> const& maps,
+ ZoneVector<MapRef> const& maps,
bool has_migration_target_maps);
NameRef const& name() const { return name_; }
bool is_monomorphic() const { return maps_.size() == 1; }
Handle<Object> handler() const { return handler_; }
- ZoneVector<Handle<Map>> const& maps() const { return maps_; }
+ ZoneVector<MapRef> const& maps() const { return maps_; }
bool has_migration_target_maps() const { return has_migration_target_maps_; }
private:
NameRef const name_;
Handle<Object> const handler_;
- ZoneVector<Handle<Map>> const maps_;
+ ZoneVector<MapRef> const maps_;
bool const has_migration_target_maps_;
};
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index b1ad17a1c4..a64521d6f6 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -34,31 +34,28 @@ SimplifiedOperatorBuilder* PropertyAccessBuilder::simplified() const {
return jsgraph()->simplified();
}
-bool HasOnlyStringMaps(JSHeapBroker* broker,
- ZoneVector<Handle<Map>> const& maps) {
- for (auto map : maps) {
- MapRef map_ref = MakeRef(broker, map);
- if (!map_ref.IsStringMap()) return false;
+bool HasOnlyStringMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) {
+ for (MapRef map : maps) {
+ if (!map.IsStringMap()) return false;
}
return true;
}
namespace {
-bool HasOnlyNumberMaps(JSHeapBroker* broker,
- ZoneVector<Handle<Map>> const& maps) {
- for (auto map : maps) {
- MapRef map_ref = MakeRef(broker, map);
- if (map_ref.instance_type() != HEAP_NUMBER_TYPE) return false;
+bool HasOnlyNumberMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) {
+ for (MapRef map : maps) {
+ if (map.instance_type() != HEAP_NUMBER_TYPE) return false;
}
return true;
}
} // namespace
-bool PropertyAccessBuilder::TryBuildStringCheck(
- JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps, Node** receiver,
- Node** effect, Node* control) {
+bool PropertyAccessBuilder::TryBuildStringCheck(JSHeapBroker* broker,
+ ZoneVector<MapRef> const& maps,
+ Node** receiver, Effect* effect,
+ Control control) {
if (HasOnlyStringMaps(broker, maps)) {
// Monormorphic string access (ignoring the fact that there are multiple
// String maps).
@@ -70,9 +67,10 @@ bool PropertyAccessBuilder::TryBuildStringCheck(
return false;
}
-bool PropertyAccessBuilder::TryBuildNumberCheck(
- JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps, Node** receiver,
- Node** effect, Node* control) {
+bool PropertyAccessBuilder::TryBuildNumberCheck(JSHeapBroker* broker,
+ ZoneVector<MapRef> const& maps,
+ Node** receiver, Effect* effect,
+ Control control) {
if (HasOnlyNumberMaps(broker, maps)) {
// Monomorphic number access (we also deal with Smis here).
*receiver = *effect =
@@ -83,15 +81,15 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(
return false;
}
-void PropertyAccessBuilder::BuildCheckMaps(
- Node* object, Node** effect, Node* control,
- ZoneVector<Handle<Map>> const& maps) {
+void PropertyAccessBuilder::BuildCheckMaps(Node* object, Effect* effect,
+ Control control,
+ ZoneVector<MapRef> const& maps) {
HeapObjectMatcher m(object);
if (m.HasResolvedValue()) {
MapRef object_map = m.Ref(broker()).map();
if (object_map.is_stable()) {
- for (Handle<Map> map : maps) {
- if (MakeRef(broker(), map).equals(object_map)) {
+ for (MapRef map : maps) {
+ if (map.equals(object_map)) {
dependencies()->DependOnStableMap(object_map);
return;
}
@@ -100,10 +98,9 @@ void PropertyAccessBuilder::BuildCheckMaps(
}
ZoneHandleSet<Map> map_set;
CheckMapsFlags flags = CheckMapsFlag::kNone;
- for (Handle<Map> map : maps) {
- MapRef object_map = MakeRef(broker(), map);
- map_set.insert(object_map.object(), graph()->zone());
- if (object_map.is_migration_target()) {
+ for (MapRef map : maps) {
+ map_set.insert(map.object(), graph()->zone());
+ if (map.is_migration_target()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
}
}
@@ -127,9 +124,9 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Effect* effect,
Node* PropertyAccessBuilder::ResolveHolder(
PropertyAccessInfo const& access_info, Node* lookup_start_object) {
- Handle<JSObject> holder;
- if (access_info.holder().ToHandle(&holder)) {
- return jsgraph()->Constant(MakeRef(broker(), holder));
+ base::Optional<JSObjectRef> holder = access_info.holder();
+ if (holder.has_value()) {
+ return jsgraph()->Constant(holder.value());
}
return lookup_start_object;
}
@@ -155,29 +152,27 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
DCHECK(access_info.IsDictionaryProtoDataConstant());
- JSObjectRef holder =
- MakeRef(broker(), access_info.holder().ToHandleChecked());
InternalIndex index = access_info.dictionary_index();
base::Optional<ObjectRef> value =
- holder.GetOwnDictionaryProperty(index, dependencies());
+ access_info.holder()->GetOwnDictionaryProperty(index, dependencies());
if (!value) return {};
- for (Handle<Map> map : access_info.lookup_start_object_maps()) {
+ for (MapRef map : access_info.lookup_start_object_maps()) {
+ Handle<Map> map_handle = map.object();
// Non-JSReceivers that passed AccessInfoFactory::ComputePropertyAccessInfo
// must have different lookup start map.
- if (!map->IsJSReceiverMap()) {
+ if (!map_handle->IsJSReceiverMap()) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
JSFunction constructor =
Map::GetConstructorFunction(
- *map, *broker()->target_native_context().object())
+ *map_handle, *broker()->target_native_context().object())
.value();
- map = MakeRef(broker(), constructor.initial_map()).object();
- DCHECK(map->IsJSObjectMap());
+ map = MakeRef(broker(), constructor.initial_map());
+ DCHECK(map.object()->IsJSObjectMap());
}
dependencies()->DependOnConstantInDictionaryPrototypeChain(
- MakeRef(broker(), map), MakeRef(broker(), access_info.name()),
- value.value(), PropertyKind::kData);
+ map, access_info.name(), value.value(), PropertyKind::kData);
}
return jsgraph()->Constant(value.value());
@@ -189,9 +184,10 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
if (!access_info.IsFastDataConstant()) return nullptr;
// First, determine if we have a constant holder to load from.
- Handle<JSObject> holder;
+ base::Optional<JSObjectRef> holder = access_info.holder();
+
// If {access_info} has a holder, just use it.
- if (!access_info.holder().ToHandle(&holder)) {
+ if (!holder.has_value()) {
// Otherwise, try to match the {lookup_start_object} as a constant.
HeapObjectMatcher m(lookup_start_object);
if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSObject()) return nullptr;
@@ -199,26 +195,22 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
// Let us make sure the actual map of the constant lookup_start_object is
// among the maps in {access_info}.
MapRef lookup_start_object_map = m.Ref(broker()).map();
- if (std::find_if(
- access_info.lookup_start_object_maps().begin(),
- access_info.lookup_start_object_maps().end(), [&](Handle<Map> map) {
- return MakeRef(broker(), map).equals(lookup_start_object_map);
- }) == access_info.lookup_start_object_maps().end()) {
+ if (std::find_if(access_info.lookup_start_object_maps().begin(),
+ access_info.lookup_start_object_maps().end(),
+ [&](MapRef map) {
+ return map.equals(lookup_start_object_map);
+ }) == access_info.lookup_start_object_maps().end()) {
// The map of the lookup_start_object is not in the feedback, let us bail
// out.
return nullptr;
}
- holder = m.Ref(broker()).AsJSObject().object();
+ holder = m.Ref(broker()).AsJSObject();
}
- JSObjectRef holder_ref = MakeRef(broker(), holder);
- base::Optional<ObjectRef> value = holder_ref.GetOwnFastDataProperty(
- access_info.field_representation(), access_info.field_index(),
- dependencies());
- if (!value.has_value()) {
- return nullptr;
- }
- return jsgraph()->Constant(*value);
+ base::Optional<ObjectRef> value =
+ holder->GetOwnFastDataProperty(access_info.field_representation(),
+ access_info.field_index(), dependencies());
+ return value.has_value() ? jsgraph()->Constant(*value) : nullptr;
}
Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
@@ -333,12 +325,11 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
field_representation == MachineRepresentation::kCompressedPointer) {
// Remember the map of the field value, if its map is stable. This is
// used by the LoadElimination to eliminate map checks on the result.
- Handle<Map> field_map;
- if (access_info.field_map().ToHandle(&field_map)) {
- MapRef field_map_ref = MakeRef(broker(), field_map);
- if (field_map_ref.is_stable()) {
- dependencies()->DependOnStableMap(field_map_ref);
- field_access.map = field_map;
+ base::Optional<MapRef> field_map = access_info.field_map();
+ if (field_map.has_value()) {
+ if (field_map->is_stable()) {
+ dependencies()->DependOnStableMap(field_map.value());
+ field_access.map = field_map->object();
}
}
}
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 69518d9a52..d86037a578 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -36,25 +36,15 @@ class PropertyAccessBuilder {
// Builds the appropriate string check if the maps are only string
// maps.
- bool TryBuildStringCheck(JSHeapBroker* broker,
- ZoneVector<Handle<Map>> const& maps, Node** receiver,
- Node** effect, Node* control);
+ bool TryBuildStringCheck(JSHeapBroker* broker, ZoneVector<MapRef> const& maps,
+ Node** receiver, Effect* effect, Control control);
// Builds a number check if all maps are number maps.
- bool TryBuildNumberCheck(JSHeapBroker* broker,
- ZoneVector<Handle<Map>> const& maps, Node** receiver,
- Node** effect, Node* control);
-
- // TODO(jgruber): Remove the untyped version once all uses are
- // updated.
- void BuildCheckMaps(Node* object, Node** effect, Node* control,
- ZoneVector<Handle<Map>> const& maps);
+ bool TryBuildNumberCheck(JSHeapBroker* broker, ZoneVector<MapRef> const& maps,
+ Node** receiver, Effect* effect, Control control);
+
void BuildCheckMaps(Node* object, Effect* effect, Control control,
- ZoneVector<Handle<Map>> const& maps) {
- Node* e = *effect;
- Node* c = control;
- BuildCheckMaps(object, &e, c, maps);
- *effect = e;
- }
+ ZoneVector<MapRef> const& maps);
+
Node* BuildCheckValue(Node* receiver, Effect* effect, Control control,
Handle<HeapObject> value);
@@ -106,8 +96,7 @@ class PropertyAccessBuilder {
CompilationDependencies* dependencies_;
};
-bool HasOnlyStringMaps(JSHeapBroker* broker,
- ZoneVector<Handle<Map>> const& maps);
+bool HasOnlyStringMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
deleted file mode 100644
index 6978c6de6e..0000000000
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ /dev/null
@@ -1,3605 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/serializer-for-background-compilation.h"
-
-#include <sstream>
-
-#include "src/base/optional.h"
-#include "src/compiler/access-info.h"
-#include "src/compiler/bytecode-analysis.h"
-#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/js-heap-broker.h"
-#include "src/compiler/serializer-hints.h"
-#include "src/compiler/zone-stats.h"
-#include "src/handles/handles-inl.h"
-#include "src/ic/call-optimization.h"
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects/code.h"
-#include "src/objects/js-array-inl.h"
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/literal-objects-inl.h"
-#include "src/objects/shared-function-info-inl.h"
-#include "src/objects/template-objects-inl.h"
-#include "src/zone/zone-containers.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define KILL_ENVIRONMENT_LIST(V) \
- V(Abort) \
- V(ReThrow) \
- V(Throw)
-
-#define CLEAR_ACCUMULATOR_LIST(V) \
- V(CallRuntime) \
- V(CloneObject) \
- V(CreateArrayFromIterable) \
- V(CreateEmptyObjectLiteral) \
- V(CreateMappedArguments) \
- V(CreateRestParameter) \
- V(CreateUnmappedArguments) \
- V(DeletePropertySloppy) \
- V(DeletePropertyStrict) \
- V(ForInContinue) \
- V(ForInEnumerate) \
- V(ForInStep) \
- V(LogicalNot) \
- V(SetPendingMessage) \
- V(TestNull) \
- V(TestReferenceEqual) \
- V(TestTypeOf) \
- V(TestUndefined) \
- V(TestUndetectable) \
- V(ToBooleanLogicalNot) \
- V(ToName) \
- V(ToString) \
- V(TypeOf)
-
-#define UNCONDITIONAL_JUMPS_LIST(V) \
- V(Jump) \
- V(JumpConstant) \
- V(JumpLoop)
-
-#define CONDITIONAL_JUMPS_LIST(V) \
- V(JumpIfFalse) \
- V(JumpIfFalseConstant) \
- V(JumpIfJSReceiver) \
- V(JumpIfJSReceiverConstant) \
- V(JumpIfNotNull) \
- V(JumpIfNotNullConstant) \
- V(JumpIfNotUndefined) \
- V(JumpIfNotUndefinedConstant) \
- V(JumpIfNull) \
- V(JumpIfNullConstant) \
- V(JumpIfToBooleanFalse) \
- V(JumpIfToBooleanFalseConstant) \
- V(JumpIfToBooleanTrue) \
- V(JumpIfToBooleanTrueConstant) \
- V(JumpIfTrue) \
- V(JumpIfTrueConstant) \
- V(JumpIfUndefined) \
- V(JumpIfUndefinedConstant) \
- V(JumpIfUndefinedOrNull) \
- V(JumpIfUndefinedOrNullConstant)
-
-#define IGNORED_BYTECODE_LIST(V) \
- V(CallRuntimeForPair) \
- V(CollectTypeProfile) \
- V(DebugBreak0) \
- V(DebugBreak1) \
- V(DebugBreak2) \
- V(DebugBreak3) \
- V(DebugBreak4) \
- V(DebugBreak5) \
- V(DebugBreak6) \
- V(DebugBreakExtraWide) \
- V(DebugBreakWide) \
- V(Debugger) \
- V(IncBlockCounter) \
- V(ResumeGenerator) \
- V(SuspendGenerator) \
- V(ThrowIfNotSuperConstructor) \
- V(ThrowSuperAlreadyCalledIfNotHole) \
- V(ThrowSuperNotCalledIfHole) \
- V(ToObject)
-
-#define UNREACHABLE_BYTECODE_LIST(V) \
- V(ExtraWide) \
- V(Illegal) \
- V(Wide)
-
-#define BINARY_OP_LIST(V) \
- V(Add) \
- V(AddSmi) \
- V(BitwiseAnd) \
- V(BitwiseAndSmi) \
- V(BitwiseOr) \
- V(BitwiseOrSmi) \
- V(BitwiseXor) \
- V(BitwiseXorSmi) \
- V(Div) \
- V(DivSmi) \
- V(Exp) \
- V(ExpSmi) \
- V(Mod) \
- V(ModSmi) \
- V(Mul) \
- V(MulSmi) \
- V(ShiftLeft) \
- V(ShiftLeftSmi) \
- V(ShiftRight) \
- V(ShiftRightSmi) \
- V(ShiftRightLogical) \
- V(ShiftRightLogicalSmi) \
- V(Sub) \
- V(SubSmi)
-
-#define UNARY_OP_LIST(V) \
- V(BitwiseNot) \
- V(Dec) \
- V(Inc) \
- V(Negate)
-
-#define COMPARE_OP_LIST(V) \
- V(TestEqual) \
- V(TestEqualStrict) \
- V(TestGreaterThan) \
- V(TestGreaterThanOrEqual) \
- V(TestLessThan) \
- V(TestLessThanOrEqual)
-
-#define SUPPORTED_BYTECODE_LIST(V) \
- V(CallAnyReceiver) \
- V(CallJSRuntime) \
- V(CallProperty) \
- V(CallProperty0) \
- V(CallProperty1) \
- V(CallProperty2) \
- V(CallUndefinedReceiver) \
- V(CallUndefinedReceiver0) \
- V(CallUndefinedReceiver1) \
- V(CallUndefinedReceiver2) \
- V(CallWithSpread) \
- V(Construct) \
- V(ConstructWithSpread) \
- V(CreateArrayLiteral) \
- V(CreateBlockContext) \
- V(CreateCatchContext) \
- V(CreateClosure) \
- V(CreateEmptyArrayLiteral) \
- V(CreateEvalContext) \
- V(CreateFunctionContext) \
- V(CreateObjectLiteral) \
- V(CreateRegExpLiteral) \
- V(CreateWithContext) \
- V(ForInNext) \
- V(ForInPrepare) \
- V(GetIterator) \
- V(GetSuperConstructor) \
- V(GetTemplateObject) \
- V(InvokeIntrinsic) \
- V(LdaConstant) \
- V(LdaContextSlot) \
- V(LdaCurrentContextSlot) \
- V(LdaImmutableContextSlot) \
- V(LdaImmutableCurrentContextSlot) \
- V(LdaModuleVariable) \
- V(LdaFalse) \
- V(LdaGlobal) \
- V(LdaGlobalInsideTypeof) \
- V(LdaKeyedProperty) \
- V(LdaLookupContextSlot) \
- V(LdaLookupContextSlotInsideTypeof) \
- V(LdaLookupGlobalSlot) \
- V(LdaLookupGlobalSlotInsideTypeof) \
- V(LdaLookupSlot) \
- V(LdaLookupSlotInsideTypeof) \
- V(LdaNamedProperty) \
- V(LdaNamedPropertyFromSuper) \
- V(LdaNull) \
- V(Ldar) \
- V(LdaSmi) \
- V(LdaTheHole) \
- V(LdaTrue) \
- V(LdaUndefined) \
- V(LdaZero) \
- V(Mov) \
- V(PopContext) \
- V(PushContext) \
- V(Return) \
- V(StaContextSlot) \
- V(StaCurrentContextSlot) \
- V(StaDataPropertyInLiteral) \
- V(StaGlobal) \
- V(StaInArrayLiteral) \
- V(StaKeyedProperty) \
- V(StaLookupSlot) \
- V(StaModuleVariable) \
- V(StaNamedOwnProperty) \
- V(StaNamedProperty) \
- V(Star) \
- V(SwitchOnGeneratorState) \
- V(SwitchOnSmiNoFeedback) \
- V(TestIn) \
- V(TestInstanceOf) \
- V(ThrowReferenceErrorIfHole) \
- V(ToNumber) \
- V(ToNumeric) \
- BINARY_OP_LIST(V) \
- COMPARE_OP_LIST(V) \
- CLEAR_ACCUMULATOR_LIST(V) \
- CONDITIONAL_JUMPS_LIST(V) \
- IGNORED_BYTECODE_LIST(V) \
- KILL_ENVIRONMENT_LIST(V) \
- UNARY_OP_LIST(V) \
- UNCONDITIONAL_JUMPS_LIST(V) \
- UNREACHABLE_BYTECODE_LIST(V)
-
-struct HintsImpl : public ZoneObject {
- explicit HintsImpl(Zone* zone) : zone_(zone) {}
-
- ConstantsSet constants_;
- MapsSet maps_;
- VirtualClosuresSet virtual_closures_;
- VirtualContextsSet virtual_contexts_;
- VirtualBoundFunctionsSet virtual_bound_functions_;
-
- Zone* const zone_;
-};
-
-void Hints::EnsureAllocated(Zone* zone, bool check_zone_equality) {
- if (IsAllocated()) {
- if (check_zone_equality) CHECK_EQ(zone, impl_->zone_);
- // ... else {zone} lives no longer than {impl_->zone_} but we have no way of
- // checking that.
- } else {
- impl_ = zone->New<HintsImpl>(zone);
- }
- DCHECK(IsAllocated());
-}
-
-struct VirtualBoundFunction {
- Hints const bound_target;
- HintsVector const bound_arguments;
-
- VirtualBoundFunction(Hints const& target, const HintsVector& arguments)
- : bound_target(target), bound_arguments(arguments) {}
-
- bool operator==(const VirtualBoundFunction& other) const {
- if (bound_arguments.size() != other.bound_arguments.size()) return false;
- if (bound_target != other.bound_target) return false;
-
- for (size_t i = 0; i < bound_arguments.size(); ++i) {
- if (bound_arguments[i] != other.bound_arguments[i]) return false;
- }
- return true;
- }
-};
-
-// A VirtualClosure is a SharedFunctionInfo and a FeedbackVector, plus
-// Hints about the context in which a closure will be created from them.
-class VirtualClosure {
- public:
- VirtualClosure(Handle<JSFunction> function, Isolate* isolate, Zone* zone);
-
- VirtualClosure(Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector,
- Hints const& context_hints);
-
- Handle<SharedFunctionInfo> shared() const { return shared_; }
- Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
- Hints const& context_hints() const { return context_hints_; }
-
- bool operator==(const VirtualClosure& other) const {
- // A feedback vector is never used for more than one SFI. There might,
- // however, be two virtual closures with the same SFI and vector, but
- // different context hints. crbug.com/1024282 has a link to a document
- // describing why the context_hints_ might be different in that case.
- DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_),
- shared_.equals(other.shared_));
- return feedback_vector_.equals(other.feedback_vector_) &&
- context_hints_ == other.context_hints_;
- }
-
- private:
- Handle<SharedFunctionInfo> const shared_;
- Handle<FeedbackVector> const feedback_vector_;
- Hints const context_hints_;
-};
-
-// A CompilationSubject is a VirtualClosure, optionally with a matching
-// concrete closure.
-class CompilationSubject {
- public:
- explicit CompilationSubject(VirtualClosure virtual_closure)
- : virtual_closure_(virtual_closure), closure_() {}
-
- // The zone parameter is to correctly initialize the virtual closure,
- // which contains zone-allocated context information.
- CompilationSubject(Handle<JSFunction> closure, Isolate* isolate, Zone* zone);
-
- const VirtualClosure& virtual_closure() const { return virtual_closure_; }
- MaybeHandle<JSFunction> closure() const { return closure_; }
-
- private:
- VirtualClosure const virtual_closure_;
- MaybeHandle<JSFunction> const closure_;
-};
-
-// A Callee is either a JSFunction (which may not have a feedback vector), or a
-// VirtualClosure. Note that this is different from CompilationSubject, which
-// always has a VirtualClosure.
-class Callee {
- public:
- explicit Callee(Handle<JSFunction> jsfunction)
- : jsfunction_(jsfunction), virtual_closure_() {}
- explicit Callee(VirtualClosure const& virtual_closure)
- : jsfunction_(), virtual_closure_(virtual_closure) {}
-
- Handle<SharedFunctionInfo> shared(Isolate* isolate) const {
- return virtual_closure_.has_value()
- ? virtual_closure_->shared()
- : handle(jsfunction_.ToHandleChecked()->shared(), isolate);
- }
-
- bool HasFeedbackVector() const {
- Handle<JSFunction> function;
- return virtual_closure_.has_value() ||
- jsfunction_.ToHandleChecked()->has_feedback_vector();
- }
-
- CompilationSubject ToCompilationSubject(Isolate* isolate, Zone* zone) const {
- CHECK(HasFeedbackVector());
- return virtual_closure_.has_value()
- ? CompilationSubject(*virtual_closure_)
- : CompilationSubject(jsfunction_.ToHandleChecked(), isolate,
- zone);
- }
-
- private:
- MaybeHandle<JSFunction> const jsfunction_;
- base::Optional<VirtualClosure> const virtual_closure_;
-};
-
-// If a list of arguments (hints) is shorter than the function's parameter
-// count, this enum expresses what we know about the missing arguments.
-enum MissingArgumentsPolicy {
- kMissingArgumentsAreUndefined, // ... as in the JS undefined value
- kMissingArgumentsAreUnknown,
-};
-
-// The SerializerForBackgroundCompilation makes sure that the relevant function
-// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
-// optimizations in the compiler, is copied to the heap broker.
-class SerializerForBackgroundCompilation {
- public:
- SerializerForBackgroundCompilation(
- ZoneStats* zone_stats, JSHeapBroker* broker,
- CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset);
- Hints Run(); // NOTE: Returns empty for an
- // already-serialized function.
-
- class Environment;
-
- private:
- SerializerForBackgroundCompilation(
- ZoneStats* zone_stats, JSHeapBroker* broker,
- CompilationDependencies* dependencies, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments,
- MissingArgumentsPolicy padding,
- SerializerForBackgroundCompilationFlags flags, int nesting_level);
-
- bool BailoutOnUninitialized(ProcessedFeedback const& feedback);
-
- void TraverseBytecode();
-
-#define DECLARE_VISIT_BYTECODE(name, ...) \
- void Visit##name(interpreter::BytecodeArrayIterator* iterator);
- SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
-#undef DECLARE_VISIT_BYTECODE
-
- void VisitShortStar(interpreter::Register reg);
-
- Hints& register_hints(interpreter::Register reg);
-
- // Return a vector containing the hints for the given register range (in
- // order). Also prepare these hints for feedback backpropagation by allocating
- // any that aren't yet allocated.
- HintsVector PrepareArgumentsHints(interpreter::Register first, size_t count);
-
- // Like above except that the hints have to be given directly.
- template <typename... MoreHints>
- HintsVector PrepareArgumentsHints(Hints* hints, MoreHints... more);
-
- void ProcessCalleeForCallOrConstruct(Callee const& callee,
- base::Optional<Hints> new_target,
- const HintsVector& arguments,
- SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding,
- Hints* result_hints);
- void ProcessCalleeForCallOrConstruct(Handle<Object> callee,
- base::Optional<Hints> new_target,
- const HintsVector& arguments,
- SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding,
- Hints* result_hints);
- void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
- HintsVector* arguments, FeedbackSlot slot,
- MissingArgumentsPolicy padding);
- void ProcessCallOrConstructRecursive(Hints const& callee,
- base::Optional<Hints> new_target,
- const HintsVector& arguments,
- SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding,
- Hints* result_hints);
- void ProcessNewTargetForConstruct(Hints const& new_target,
- Hints* result_hints);
- void ProcessCallVarArgs(
- ConvertReceiverMode receiver_mode, Hints const& callee,
- interpreter::Register first_reg, int reg_count, FeedbackSlot slot,
- MissingArgumentsPolicy padding = kMissingArgumentsAreUndefined);
- void ProcessApiCall(Handle<SharedFunctionInfo> target,
- const HintsVector& arguments);
- void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target,
- Handle<Map> receiver);
- void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
- base::Optional<Hints> new_target,
- const HintsVector& arguments,
- SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding, Hints* result_hints);
-
- void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
-
- void ProcessKeyedPropertyAccess(Hints* receiver, Hints const& key,
- FeedbackSlot slot, AccessMode access_mode,
- bool honor_bailout_on_uninitialized);
- void ProcessNamedPropertyAccess(Hints* receiver, NameRef const& name,
- FeedbackSlot slot, AccessMode access_mode);
- void ProcessNamedSuperPropertyAccess(Hints* receiver, NameRef const& name,
- FeedbackSlot slot,
- AccessMode access_mode);
- void ProcessNamedAccess(Hints* receiver, NamedAccessFeedback const& feedback,
- AccessMode access_mode, Hints* result_hints);
- void ProcessNamedSuperAccess(Hints* receiver,
- NamedAccessFeedback const& feedback,
- AccessMode access_mode, Hints* result_hints);
- void ProcessElementAccess(Hints const& receiver, Hints const& key,
- ElementAccessFeedback const& feedback,
- AccessMode access_mode);
- void ProcessMinimorphicPropertyAccess(
- MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source);
-
- void ProcessModuleVariableAccess(
- interpreter::BytecodeArrayIterator* iterator);
-
- void ProcessHintsForObjectCreate(Hints const& prototype);
- void ProcessMapHintsForPromises(Hints const& receiver_hints);
- void ProcessHintsForPromiseResolve(Hints const& resolution_hints);
- void ProcessHintsForHasInPrototypeChain(Hints const& instance_hints);
- void ProcessHintsForRegExpTest(Hints const& regexp_hints);
- PropertyAccessInfo ProcessMapForRegExpTest(MapRef map);
- void ProcessHintsForFunctionBind(Hints const& receiver_hints);
- void ProcessHintsForObjectGetPrototype(Hints const& object_hints);
- void ProcessConstantForOrdinaryHasInstance(HeapObjectRef const& constructor,
- bool* walk_prototypes);
- void ProcessConstantForInstanceOf(ObjectRef const& constant,
- bool* walk_prototypes);
- void ProcessHintsForOrdinaryHasInstance(Hints const& constructor_hints,
- Hints const& instance_hints);
-
- void ProcessGlobalAccess(FeedbackSlot slot, bool is_load);
-
- void ProcessCompareOperation(FeedbackSlot slot);
- void ProcessForIn(FeedbackSlot slot);
- void ProcessUnaryOrBinaryOperation(FeedbackSlot slot,
- bool honor_bailout_on_uninitialized);
-
- void ProcessMapForNamedPropertyAccess(
- Hints* receiver, base::Optional<MapRef> receiver_map,
- MapRef lookup_start_object_map, NameRef const& name,
- AccessMode access_mode, base::Optional<JSObjectRef> concrete_receiver,
- Hints* result_hints);
-
- void ProcessCreateContext(interpreter::BytecodeArrayIterator* iterator,
- int scopeinfo_operand_index);
-
- enum ContextProcessingMode {
- kIgnoreSlot,
- kSerializeSlot,
- };
-
- void ProcessContextAccess(Hints const& context_hints, int slot, int depth,
- ContextProcessingMode mode,
- Hints* result_hints = nullptr);
- void ProcessImmutableLoad(ContextRef const& context, int slot,
- ContextProcessingMode mode,
- Hints* new_accumulator_hints);
- void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator);
- void ProcessLdaLookupContextSlot(
- interpreter::BytecodeArrayIterator* iterator);
-
- // Performs extension lookups for [0, depth) like
- // BytecodeGraphBuilder::CheckContextExtensions().
- void ProcessCheckContextExtensions(int depth);
-
- Hints RunChildSerializer(CompilationSubject function,
- base::Optional<Hints> new_target,
- const HintsVector& arguments,
- MissingArgumentsPolicy padding);
-
- // When (forward-)branching bytecodes are encountered, e.g. a conditional
- // jump, we call ContributeToJumpTargetEnvironment to "remember" the current
- // environment, associated with the jump target offset. When serialization
- // eventually reaches that offset, we call IncorporateJumpTargetEnvironment to
- // merge that environment back into whatever is the current environment then.
- // Note: Since there may be multiple jumps to the same target,
- // ContributeToJumpTargetEnvironment may actually do a merge as well.
- void ContributeToJumpTargetEnvironment(int target_offset);
- void IncorporateJumpTargetEnvironment(int target_offset);
-
- VirtualClosure function() const { return function_; }
-
- Hints& return_value_hints() { return return_value_hints_; }
-
- Handle<FeedbackVector> feedback_vector() const;
- Handle<BytecodeArray> bytecode_array() const;
-
- JSHeapBroker* broker() const { return broker_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
- Zone* zone() { return zone_scope_.zone(); }
- Environment* environment() const { return environment_; }
- SerializerForBackgroundCompilationFlags flags() const { return flags_; }
- BytecodeOffset osr_offset() const { return osr_offset_; }
- const BytecodeAnalysis& bytecode_analysis() { return *bytecode_analysis_; }
-
- JSHeapBroker* const broker_;
- CompilationDependencies* const dependencies_;
- ZoneStats::Scope zone_scope_;
- SerializerForBackgroundCompilationFlags const flags_;
- // Instead of storing the virtual_closure here, we could extract it from the
- // {closure_hints_} but that would be cumbersome.
- VirtualClosure const function_;
- BytecodeOffset const osr_offset_;
- base::Optional<BytecodeAnalysis> bytecode_analysis_;
- ZoneUnorderedMap<int, Environment*> jump_target_environments_;
- Environment* const environment_;
- HintsVector const arguments_;
- Hints return_value_hints_;
- Hints closure_hints_;
-
- int nesting_level_ = 0;
-};
-
-void RunSerializerForBackgroundCompilation(
- ZoneStats* zone_stats, JSHeapBroker* broker,
- CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset) {
- SerializerForBackgroundCompilation serializer(
- zone_stats, broker, dependencies, closure, flags, osr_offset);
- serializer.Run();
-}
-
-using BytecodeArrayIterator = interpreter::BytecodeArrayIterator;
-
-VirtualClosure::VirtualClosure(Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector,
- Hints const& context_hints)
- : shared_(shared),
- feedback_vector_(feedback_vector),
- context_hints_(context_hints) {
- // The checked invariant rules out recursion and thus avoids complexity.
- CHECK(context_hints_.virtual_closures().IsEmpty());
-}
-
-VirtualClosure::VirtualClosure(Handle<JSFunction> function, Isolate* isolate,
- Zone* zone)
- : shared_(handle(function->shared(), isolate)),
- feedback_vector_(function->feedback_vector(), isolate),
- context_hints_(
- Hints::SingleConstant(handle(function->context(), isolate), zone)) {
- // The checked invariant rules out recursion and thus avoids complexity.
- CHECK(context_hints_.virtual_closures().IsEmpty());
-}
-
-CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
- Isolate* isolate, Zone* zone)
- : virtual_closure_(closure, isolate, zone), closure_(closure) {
- CHECK(closure->has_feedback_vector());
-}
-
-Hints Hints::Copy(Zone* zone) const {
- if (!IsAllocated()) return *this;
- Hints result;
- result.EnsureAllocated(zone);
- result.impl_->constants_ = impl_->constants_;
- result.impl_->maps_ = impl_->maps_;
- result.impl_->virtual_contexts_ = impl_->virtual_contexts_;
- result.impl_->virtual_closures_ = impl_->virtual_closures_;
- result.impl_->virtual_bound_functions_ = impl_->virtual_bound_functions_;
- return result;
-}
-
-bool Hints::operator==(Hints const& other) const {
- if (impl_ == other.impl_) return true;
- if (IsEmpty() && other.IsEmpty()) return true;
- return IsAllocated() && other.IsAllocated() &&
- constants() == other.constants() &&
- virtual_closures() == other.virtual_closures() &&
- maps() == other.maps() &&
- virtual_contexts() == other.virtual_contexts() &&
- virtual_bound_functions() == other.virtual_bound_functions();
-}
-
-bool Hints::operator!=(Hints const& other) const { return !(*this == other); }
-
-#ifdef ENABLE_SLOW_DCHECKS
-bool Hints::Includes(Hints const& other) const {
- if (impl_ == other.impl_ || other.IsEmpty()) return true;
- return IsAllocated() && constants().Includes(other.constants()) &&
- virtual_closures().Includes(other.virtual_closures()) &&
- maps().Includes(other.maps());
-}
-#endif
-
-Hints Hints::SingleConstant(Handle<Object> constant, Zone* zone) {
- Hints result;
- result.AddConstant(constant, zone, nullptr);
- return result;
-}
-
-Hints Hints::SingleMap(Handle<Map> map, Zone* zone) {
- Hints result;
- result.AddMap(map, zone, nullptr);
- return result;
-}
-
-ConstantsSet Hints::constants() const {
- return IsAllocated() ? impl_->constants_ : ConstantsSet();
-}
-
-MapsSet Hints::maps() const { return IsAllocated() ? impl_->maps_ : MapsSet(); }
-
-VirtualClosuresSet Hints::virtual_closures() const {
- return IsAllocated() ? impl_->virtual_closures_ : VirtualClosuresSet();
-}
-
-VirtualContextsSet Hints::virtual_contexts() const {
- return IsAllocated() ? impl_->virtual_contexts_ : VirtualContextsSet();
-}
-
-VirtualBoundFunctionsSet Hints::virtual_bound_functions() const {
- return IsAllocated() ? impl_->virtual_bound_functions_
- : VirtualBoundFunctionsSet();
-}
-
-void Hints::AddVirtualContext(VirtualContext const& virtual_context, Zone* zone,
- JSHeapBroker* broker) {
- EnsureAllocated(zone);
- if (impl_->virtual_contexts_.Size() >= kMaxHintsSize) {
- TRACE_BROKER_MISSING(broker,
- "opportunity - limit for virtual contexts reached.");
- return;
- }
- impl_->virtual_contexts_.Add(virtual_context, impl_->zone_);
-}
-
-void Hints::AddConstant(Handle<Object> constant, Zone* zone,
- JSHeapBroker* broker) {
- EnsureAllocated(zone);
- if (impl_->constants_.Size() >= kMaxHintsSize) {
- TRACE_BROKER_MISSING(broker, "opportunity - limit for constants reached.");
- return;
- }
- impl_->constants_.Add(constant, impl_->zone_);
-}
-
-void Hints::AddMap(Handle<Map> map, Zone* zone, JSHeapBroker* broker,
- bool check_zone_equality) {
- EnsureAllocated(zone, check_zone_equality);
- if (impl_->maps_.Size() >= kMaxHintsSize) {
- TRACE_BROKER_MISSING(broker, "opportunity - limit for maps reached.");
- return;
- }
- impl_->maps_.Add(map, impl_->zone_);
-}
-
-void Hints::AddVirtualClosure(VirtualClosure const& virtual_closure, Zone* zone,
- JSHeapBroker* broker) {
- EnsureAllocated(zone);
- if (impl_->virtual_closures_.Size() >= kMaxHintsSize) {
- TRACE_BROKER_MISSING(broker,
- "opportunity - limit for virtual closures reached.");
- return;
- }
- impl_->virtual_closures_.Add(virtual_closure, impl_->zone_);
-}
-
-void Hints::AddVirtualBoundFunction(VirtualBoundFunction const& bound_function,
- Zone* zone, JSHeapBroker* broker) {
- EnsureAllocated(zone);
- if (impl_->virtual_bound_functions_.Size() >= kMaxHintsSize) {
- TRACE_BROKER_MISSING(
- broker, "opportunity - limit for virtual bound functions reached.");
- return;
- }
- // TODO(mslekova): Consider filtering the hints in the added bound function,
- // for example: a) Remove any non-JS(Bound)Function constants, b) Truncate the
- // argument vector the formal parameter count.
- impl_->virtual_bound_functions_.Add(bound_function, impl_->zone_);
-}
-
-void Hints::Add(Hints const& other, Zone* zone, JSHeapBroker* broker) {
- if (impl_ == other.impl_ || other.IsEmpty()) return;
- EnsureAllocated(zone);
- if (!Union(other)) {
- TRACE_BROKER_MISSING(broker, "opportunity - hints limit reached.");
- }
-}
-
-Hints Hints::CopyToParentZone(Zone* zone, JSHeapBroker* broker) const {
- if (!IsAllocated()) return *this;
-
- Hints result;
-
- for (auto const& x : constants()) result.AddConstant(x, zone, broker);
- for (auto const& x : maps()) result.AddMap(x, zone, broker);
- for (auto const& x : virtual_contexts())
- result.AddVirtualContext(x, zone, broker);
-
- // Adding hints from a child serializer run means copying data out from
- // a zone that's being destroyed. VirtualClosures and VirtualBoundFunction
- // have zone allocated data, so we've got to make a deep copy to eliminate
- // traces of the dying zone.
- for (auto const& x : virtual_closures()) {
- VirtualClosure new_virtual_closure(
- x.shared(), x.feedback_vector(),
- x.context_hints().CopyToParentZone(zone, broker));
- result.AddVirtualClosure(new_virtual_closure, zone, broker);
- }
- for (auto const& x : virtual_bound_functions()) {
- HintsVector new_arguments_hints(zone);
- for (auto hint : x.bound_arguments) {
- new_arguments_hints.push_back(hint.CopyToParentZone(zone, broker));
- }
- VirtualBoundFunction new_bound_function(
- x.bound_target.CopyToParentZone(zone, broker), new_arguments_hints);
- result.AddVirtualBoundFunction(new_bound_function, zone, broker);
- }
-
- return result;
-}
-
-bool Hints::IsEmpty() const {
- if (!IsAllocated()) return true;
- return constants().IsEmpty() && maps().IsEmpty() &&
- virtual_closures().IsEmpty() && virtual_contexts().IsEmpty() &&
- virtual_bound_functions().IsEmpty();
-}
-
-std::ostream& operator<<(std::ostream& out,
- const VirtualContext& virtual_context) {
- out << "Distance " << virtual_context.distance << " from "
- << Brief(*virtual_context.context) << std::endl;
- return out;
-}
-
-std::ostream& operator<<(std::ostream& out, const Hints& hints);
-
-std::ostream& operator<<(std::ostream& out,
- const VirtualClosure& virtual_closure) {
- out << Brief(*virtual_closure.shared()) << std::endl;
- out << Brief(*virtual_closure.feedback_vector()) << std::endl;
- !virtual_closure.context_hints().IsEmpty() &&
- out << virtual_closure.context_hints() << "):" << std::endl;
- return out;
-}
-
-std::ostream& operator<<(std::ostream& out,
- const VirtualBoundFunction& virtual_bound_function) {
- out << std::endl << " Target: " << virtual_bound_function.bound_target;
- out << " Arguments:" << std::endl;
- for (auto hint : virtual_bound_function.bound_arguments) {
- out << " " << hint;
- }
- return out;
-}
-
-std::ostream& operator<<(std::ostream& out, const Hints& hints) {
- out << "(impl_ = " << hints.impl_ << ")\n";
- for (Handle<Object> constant : hints.constants()) {
- out << " constant " << Brief(*constant) << std::endl;
- }
- for (Handle<Map> map : hints.maps()) {
- out << " map " << Brief(*map) << std::endl;
- }
- for (VirtualClosure const& virtual_closure : hints.virtual_closures()) {
- out << " virtual closure " << virtual_closure << std::endl;
- }
- for (VirtualContext const& virtual_context : hints.virtual_contexts()) {
- out << " virtual context " << virtual_context << std::endl;
- }
- for (VirtualBoundFunction const& virtual_bound_function :
- hints.virtual_bound_functions()) {
- out << " virtual bound function " << virtual_bound_function << std::endl;
- }
- return out;
-}
-
-void Hints::Reset(Hints* other, Zone* zone) {
- other->EnsureShareable(zone);
- *this = *other;
- DCHECK(IsAllocated());
-}
-
-class SerializerForBackgroundCompilation::Environment : public ZoneObject {
- public:
- Environment(Zone* zone, Isolate* isolate, CompilationSubject function);
- Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments,
- MissingArgumentsPolicy padding);
-
- bool IsDead() const { return !alive_; }
-
- void Kill() {
- DCHECK(!IsDead());
- alive_ = false;
- DCHECK(IsDead());
- }
-
- void Resurrect() {
- DCHECK(IsDead());
- alive_ = true;
- DCHECK(!IsDead());
- }
-
- // Merge {other} into {this} environment (leaving {other} unmodified).
- void Merge(Environment* other, Zone* zone, JSHeapBroker* broker);
-
- Hints const& current_context_hints() const { return current_context_hints_; }
- Hints const& accumulator_hints() const { return accumulator_hints_; }
-
- Hints& current_context_hints() { return current_context_hints_; }
- Hints& accumulator_hints() { return accumulator_hints_; }
- Hints& register_hints(interpreter::Register reg);
-
- private:
- friend std::ostream& operator<<(std::ostream& out, const Environment& env);
-
- Hints current_context_hints_;
- Hints accumulator_hints_;
-
- HintsVector parameters_hints_; // First parameter is the receiver.
- HintsVector locals_hints_;
-
- bool alive_ = true;
-};
-
-SerializerForBackgroundCompilation::Environment::Environment(
- Zone* zone, Isolate* isolate, CompilationSubject function)
- : parameters_hints_(function.virtual_closure()
- .shared()
- ->GetBytecodeArray(isolate)
- .parameter_count(),
- Hints(), zone),
- locals_hints_(function.virtual_closure()
- .shared()
- ->GetBytecodeArray(isolate)
- .register_count(),
- Hints(), zone) {
- // Consume the virtual_closure's context hint information.
- current_context_hints_ = function.virtual_closure().context_hints();
-}
-
-SerializerForBackgroundCompilation::Environment::Environment(
- Zone* zone, Isolate* isolate, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments,
- MissingArgumentsPolicy padding)
- : Environment(zone, isolate, function) {
- // Set the hints for the actually passed arguments, at most up to
- // the parameter_count.
- for (size_t i = 0; i < std::min(arguments.size(), parameters_hints_.size());
- ++i) {
- parameters_hints_[i] = arguments[i];
- }
-
- if (padding == kMissingArgumentsAreUndefined) {
- Hints const undefined_hint =
- Hints::SingleConstant(isolate->factory()->undefined_value(), zone);
- for (size_t i = arguments.size(); i < parameters_hints_.size(); ++i) {
- parameters_hints_[i] = undefined_hint;
- }
- } else {
- DCHECK_EQ(padding, kMissingArgumentsAreUnknown);
- }
-
- // Set hints for new_target.
- interpreter::Register new_target_reg =
- function.virtual_closure()
- .shared()
- ->GetBytecodeArray(isolate)
- .incoming_new_target_or_generator_register();
- if (new_target_reg.is_valid()) {
- Hints& hints = register_hints(new_target_reg);
- CHECK(hints.IsEmpty());
- if (new_target.has_value()) hints = *new_target;
- }
-}
-
-Hints& SerializerForBackgroundCompilation::register_hints(
- interpreter::Register reg) {
- if (reg.is_function_closure()) return closure_hints_;
- return environment()->register_hints(reg);
-}
-
-Hints& SerializerForBackgroundCompilation::Environment::register_hints(
- interpreter::Register reg) {
- if (reg.is_current_context()) return current_context_hints_;
- if (reg.is_parameter()) {
- return parameters_hints_[reg.ToParameterIndex(
- static_cast<int>(parameters_hints_.size()))];
- }
- DCHECK(!reg.is_function_closure());
- CHECK_LT(reg.index(), locals_hints_.size());
- return locals_hints_[reg.index()];
-}
-
-void SerializerForBackgroundCompilation::Environment::Merge(
- Environment* other, Zone* zone, JSHeapBroker* broker) {
- // {other} is guaranteed to have the same layout because it comes from an
- // earlier bytecode in the same function.
- DCHECK_EQ(parameters_hints_.size(), other->parameters_hints_.size());
- DCHECK_EQ(locals_hints_.size(), other->locals_hints_.size());
-
- if (IsDead()) {
- parameters_hints_ = other->parameters_hints_;
- locals_hints_ = other->locals_hints_;
- current_context_hints_ = other->current_context_hints_;
- accumulator_hints_ = other->accumulator_hints_;
- Resurrect();
- } else {
- for (size_t i = 0; i < parameters_hints_.size(); ++i) {
- parameters_hints_[i].Merge(other->parameters_hints_[i], zone, broker);
- }
- for (size_t i = 0; i < locals_hints_.size(); ++i) {
- locals_hints_[i].Merge(other->locals_hints_[i], zone, broker);
- }
- current_context_hints_.Merge(other->current_context_hints_, zone, broker);
- accumulator_hints_.Merge(other->accumulator_hints_, zone, broker);
- }
-
- CHECK(!IsDead());
-}
-
-bool Hints::Union(Hints const& other) {
- CHECK(IsAllocated());
- if (impl_->constants_.Size() + other.constants().Size() > kMaxHintsSize ||
- impl_->maps_.Size() + other.maps().Size() > kMaxHintsSize ||
- impl_->virtual_closures_.Size() + other.virtual_closures().Size() >
- kMaxHintsSize ||
- impl_->virtual_contexts_.Size() + other.virtual_contexts().Size() >
- kMaxHintsSize ||
- impl_->virtual_bound_functions_.Size() +
- other.virtual_bound_functions().Size() >
- kMaxHintsSize) {
- return false;
- }
- Zone* zone = impl_->zone_;
- impl_->constants_.Union(other.constants(), zone);
- impl_->maps_.Union(other.maps(), zone);
- impl_->virtual_closures_.Union(other.virtual_closures(), zone);
- impl_->virtual_contexts_.Union(other.virtual_contexts(), zone);
- impl_->virtual_bound_functions_.Union(other.virtual_bound_functions(), zone);
- return true;
-}
-
-void Hints::Merge(Hints const& other, Zone* zone, JSHeapBroker* broker) {
- if (impl_ == other.impl_) {
- return;
- }
- if (!IsAllocated()) {
- *this = other.Copy(zone);
- DCHECK(IsAllocated());
- return;
- }
- *this = this->Copy(zone);
- if (!Union(other)) {
- TRACE_BROKER_MISSING(broker, "opportunity - hints limit reached.");
- }
- DCHECK(IsAllocated());
-}
-
-std::ostream& operator<<(
- std::ostream& out,
- const SerializerForBackgroundCompilation::Environment& env) {
- std::ostringstream output_stream;
-
- if (env.IsDead()) {
- output_stream << "dead\n";
- } else {
- output_stream << "alive\n";
- for (size_t i = 0; i < env.parameters_hints_.size(); ++i) {
- Hints const& hints = env.parameters_hints_[i];
- if (!hints.IsEmpty()) {
- if (i == 0) {
- output_stream << "Hints for <this>: ";
- } else {
- output_stream << "Hints for a" << i - 1 << ": ";
- }
- output_stream << hints;
- }
- }
- for (size_t i = 0; i < env.locals_hints_.size(); ++i) {
- Hints const& hints = env.locals_hints_[i];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for r" << i << ": " << hints;
- }
- }
- }
-
- if (!env.current_context_hints().IsEmpty()) {
- output_stream << "Hints for <context>: " << env.current_context_hints();
- }
- if (!env.accumulator_hints().IsEmpty()) {
- output_stream << "Hints for <accumulator>: " << env.accumulator_hints();
- }
-
- out << output_stream.str();
- return out;
-}
-
-SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- ZoneStats* zone_stats, JSHeapBroker* broker,
- CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset)
- : broker_(broker),
- dependencies_(dependencies),
- zone_scope_(zone_stats, ZONE_NAME),
- flags_(flags),
- function_(closure, broker->isolate(), zone()),
- osr_offset_(osr_offset),
- jump_target_environments_(zone()),
- environment_(zone()->New<Environment>(
- zone(), broker_->isolate(),
- CompilationSubject(closure, broker_->isolate(), zone()))),
- arguments_(zone()) {
- closure_hints_.AddConstant(closure, zone(), broker_);
- JSFunctionRef closure_ref = MakeRef(broker, closure);
- closure_ref.Serialize();
- closure_ref.SerializeCodeAndFeedback();
-
- TRACE_BROKER(broker_, "Hints for <closure>: " << closure_hints_);
- TRACE_BROKER(broker_, "Initial environment:\n" << *environment_);
-}
-
-SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- ZoneStats* zone_stats, JSHeapBroker* broker,
- CompilationDependencies* dependencies, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments,
- MissingArgumentsPolicy padding,
- SerializerForBackgroundCompilationFlags flags, int nesting_level)
- : broker_(broker),
- dependencies_(dependencies),
- zone_scope_(zone_stats, ZONE_NAME),
- flags_(flags),
- function_(function.virtual_closure()),
- osr_offset_(BytecodeOffset::None()),
- jump_target_environments_(zone()),
- environment_(zone()->New<Environment>(zone(), broker_->isolate(),
- function, new_target, arguments,
- padding)),
- arguments_(arguments),
- nesting_level_(nesting_level) {
- Handle<JSFunction> closure;
- if (function.closure().ToHandle(&closure)) {
- closure_hints_.AddConstant(closure, zone(), broker);
- JSFunctionRef closure_ref = MakeRef(broker, closure);
- closure_ref.Serialize();
- closure_ref.SerializeCodeAndFeedback();
- } else {
- closure_hints_.AddVirtualClosure(function.virtual_closure(), zone(),
- broker);
- }
-
- TRACE_BROKER(broker_, "Hints for <closure>: " << closure_hints_);
- TRACE_BROKER(broker_, "Initial environment:\n" << *environment_);
-}
-
-bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
- ProcessedFeedback const& feedback) {
- DCHECK(!environment()->IsDead());
- if (!(flags() &
- SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) {
- return false;
- }
- if (!osr_offset().IsNone()) {
- // Exclude OSR from this optimization because we might end up skipping the
- // OSR entry point. TODO(neis): Support OSR?
- return false;
- }
- if (feedback.IsInsufficient()) {
- environment()->Kill();
- return true;
- }
- return false;
-}
-
-Hints SerializerForBackgroundCompilation::Run() {
- TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
- if (nesting_level_ >= FLAG_max_serializer_nesting) {
- TRACE_BROKER_MISSING(
- broker(),
- "opportunity - Reached max nesting level for "
- "SerializerForBackgroundCompilation::Run, bailing out.\n");
- return Hints();
- }
-
- TRACE_BROKER_MEMORY(broker(), "[serializer start] Broker zone usage: "
- << broker()->zone()->allocation_size());
- SharedFunctionInfoRef shared = MakeRef(broker(), function().shared());
- FeedbackVectorRef feedback_vector_ref = MakeRef(broker(), feedback_vector());
- if (!broker()->ShouldBeSerializedForCompilation(shared, feedback_vector_ref,
- arguments_)) {
- TRACE_BROKER(broker(),
- "opportunity - Already ran serializer for SharedFunctionInfo "
- << Brief(*shared.object()) << ", bailing out.\n");
- return Hints();
- }
-
- {
- HintsVector arguments_copy_in_broker_zone(broker()->zone());
- for (auto const& hints : arguments_) {
- arguments_copy_in_broker_zone.push_back(
- hints.CopyToParentZone(broker()->zone(), broker()));
- }
- broker()->SetSerializedForCompilation(shared, feedback_vector_ref,
- arguments_copy_in_broker_zone);
- }
-
- // We eagerly call the {EnsureSourcePositionsAvailable} for all serialized
- // SFIs while still on the main thread. Source positions will later be used
- // by JSInliner::ReduceJSCall.
- if (flags() &
- SerializerForBackgroundCompilationFlag::kCollectSourcePositions) {
- SharedFunctionInfo::EnsureSourcePositionsAvailable(broker()->isolate(),
- shared.object());
- }
-
- feedback_vector_ref.Serialize();
- TraverseBytecode();
-
- if (return_value_hints().IsEmpty()) {
- TRACE_BROKER(broker(), "Return value hints: none");
- } else {
- TRACE_BROKER(broker(), "Return value hints: " << return_value_hints());
- }
- TRACE_BROKER_MEMORY(broker(), "[serializer end] Broker zone usage: "
- << broker()->zone()->allocation_size());
- return return_value_hints();
-}
-
-class HandlerRangeMatcher {
- public:
- HandlerRangeMatcher(BytecodeArrayIterator const& bytecode_iterator,
- Handle<BytecodeArray> bytecode_array)
- : bytecode_iterator_(bytecode_iterator) {
- HandlerTable table(*bytecode_array);
- for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) {
- ranges_.insert({table.GetRangeStart(i), table.GetRangeEnd(i),
- table.GetRangeHandler(i)});
- }
- ranges_iterator_ = ranges_.cbegin();
- }
-
- using OffsetReporter = std::function<void(int handler_offset)>;
-
- void HandlerOffsetForCurrentPosition(const OffsetReporter& offset_reporter) {
- CHECK(!bytecode_iterator_.done());
- const int current_offset = bytecode_iterator_.current_offset();
-
- // Remove outdated try ranges from the stack.
- while (!stack_.empty()) {
- const int end = stack_.top().end;
- if (end < current_offset) {
- stack_.pop();
- } else {
- break;
- }
- }
-
- // Advance the iterator and maintain the stack.
- while (ranges_iterator_ != ranges_.cend() &&
- ranges_iterator_->start <= current_offset) {
- if (ranges_iterator_->end >= current_offset) {
- stack_.push(*ranges_iterator_);
- if (ranges_iterator_->start == current_offset) {
- offset_reporter(ranges_iterator_->handler);
- }
- }
- ranges_iterator_++;
- }
-
- if (!stack_.empty() && stack_.top().start < current_offset) {
- offset_reporter(stack_.top().handler);
- }
- }
-
- private:
- BytecodeArrayIterator const& bytecode_iterator_;
-
- struct Range {
- int start;
- int end;
- int handler;
- friend bool operator<(const Range& a, const Range& b) {
- if (a.start < b.start) return true;
- if (a.start == b.start) {
- if (a.end < b.end) return true;
- CHECK_GT(a.end, b.end);
- }
- return false;
- }
- };
- std::set<Range> ranges_;
- std::set<Range>::const_iterator ranges_iterator_;
- std::stack<Range> stack_;
-};
-
-Handle<FeedbackVector> SerializerForBackgroundCompilation::feedback_vector()
- const {
- return function().feedback_vector();
-}
-
-Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
- const {
- return handle(function().shared()->GetBytecodeArray(broker()->isolate()),
- broker()->isolate());
-}
-
-void SerializerForBackgroundCompilation::TraverseBytecode() {
- bytecode_analysis_.emplace(bytecode_array(), zone(), osr_offset(), false);
-
- BytecodeArrayIterator iterator(bytecode_array());
- HandlerRangeMatcher try_start_matcher(iterator, bytecode_array());
-
- for (; !iterator.done(); iterator.Advance()) {
- int const current_offset = iterator.current_offset();
-
- // TODO(mvstanton): we might want to ignore the current environment if we
- // are at the start of a catch handler.
- IncorporateJumpTargetEnvironment(current_offset);
-
- TRACE_BROKER(broker(),
- "Handling bytecode: " << current_offset << " "
- << iterator.current_bytecode());
- TRACE_BROKER(broker(), "Current environment: " << *environment());
-
- if (environment()->IsDead()) {
- continue; // Skip this bytecode since TF won't generate code for it.
- }
-
- auto save_handler_environments = [&](int handler_offset) {
- auto it = jump_target_environments_.find(handler_offset);
- if (it == jump_target_environments_.end()) {
- ContributeToJumpTargetEnvironment(handler_offset);
- TRACE_BROKER(broker(),
- "Handler offset for current pos: " << handler_offset);
- }
- };
- try_start_matcher.HandlerOffsetForCurrentPosition(
- save_handler_environments);
-
- if (bytecode_analysis().IsLoopHeader(current_offset)) {
- // Graph builder might insert jumps to resume targets in the loop body.
- LoopInfo const& loop_info =
- bytecode_analysis().GetLoopInfoFor(current_offset);
- for (const auto& target : loop_info.resume_jump_targets()) {
- ContributeToJumpTargetEnvironment(target.target_offset());
- }
- }
-
- interpreter::Bytecode current_bytecode = iterator.current_bytecode();
- switch (current_bytecode) {
-#define DEFINE_BYTECODE_CASE(name) \
- case interpreter::Bytecode::k##name: \
- Visit##name(&iterator); \
- break;
- SUPPORTED_BYTECODE_LIST(DEFINE_BYTECODE_CASE)
-#undef DEFINE_BYTECODE_CASE
-
-#define DEFINE_SHORT_STAR_CASE(Name, ...) case interpreter::Bytecode::k##Name:
- SHORT_STAR_BYTECODE_LIST(DEFINE_SHORT_STAR_CASE)
-#undef DEFINE_SHORT_STAR_CASE
- VisitShortStar(interpreter::Register::FromShortStar(current_bytecode));
- break;
- }
- }
-}
-
-void SerializerForBackgroundCompilation::VisitGetIterator(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- FeedbackSlot load_slot = iterator->GetSlotOperand(1);
- FeedbackSlot call_slot = iterator->GetSlotOperand(2);
-
- Handle<Name> name = broker()->isolate()->factory()->iterator_symbol();
- ProcessNamedPropertyAccess(receiver, MakeRef(broker(), name), load_slot,
- AccessMode::kLoad);
- if (environment()->IsDead()) return;
-
- Hints callee;
- HintsVector args = PrepareArgumentsHints(receiver);
-
- ProcessCallOrConstruct(callee, base::nullopt, &args, call_slot,
- kMissingArgumentsAreUndefined);
-}
-
-void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
- BytecodeArrayIterator* iterator) {
- interpreter::Register dst = iterator->GetRegisterOperand(0);
- Hints result_hints;
- for (auto constant : environment()->accumulator_hints().constants()) {
- // For JSNativeContextSpecialization::ReduceJSGetSuperConstructor.
- if (!constant->IsJSFunction()) continue;
- MapRef map = MakeRef(broker(), handle(HeapObject::cast(*constant).map(),
- broker()->isolate()));
- map.SerializePrototype();
- ObjectRef proto = map.prototype().value();
- if (proto.IsHeapObject() && proto.AsHeapObject().map().is_constructor()) {
- result_hints.AddConstant(proto.object(), zone(), broker());
- }
- }
- register_hints(dst) = result_hints;
-}
-
-void SerializerForBackgroundCompilation::VisitGetTemplateObject(
- BytecodeArrayIterator* iterator) {
- MakeRef(broker(),
- Handle<TemplateObjectDescription>::cast(
- iterator->GetConstantForIndexOperand(0, broker()->isolate())));
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- FeedbackSource source(feedback_vector(), slot);
-
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForTemplateObject(source);
- if (feedback.IsInsufficient()) {
- environment()->accumulator_hints() = Hints();
- } else {
- JSArrayRef template_object = feedback.AsTemplateObject().value();
- environment()->accumulator_hints() =
- Hints::SingleConstant(template_object.object(), zone());
- }
-}
-
-void SerializerForBackgroundCompilation::VisitLdaTrue(
- BytecodeArrayIterator* iterator) {
- environment()->accumulator_hints() = Hints::SingleConstant(
- broker()->isolate()->factory()->true_value(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitLdaFalse(
- BytecodeArrayIterator* iterator) {
- environment()->accumulator_hints() = Hints::SingleConstant(
- broker()->isolate()->factory()->false_value(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitLdaTheHole(
- BytecodeArrayIterator* iterator) {
- environment()->accumulator_hints() = Hints::SingleConstant(
- broker()->isolate()->factory()->the_hole_value(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitLdaUndefined(
- BytecodeArrayIterator* iterator) {
- environment()->accumulator_hints() = Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitLdaNull(
- BytecodeArrayIterator* iterator) {
- environment()->accumulator_hints() = Hints::SingleConstant(
- broker()->isolate()->factory()->null_value(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitLdaZero(
- BytecodeArrayIterator* iterator) {
- environment()->accumulator_hints() = Hints::SingleConstant(
- handle(Smi::FromInt(0), broker()->isolate()), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitLdaSmi(
- BytecodeArrayIterator* iterator) {
- Handle<Smi> smi(Smi::FromInt(iterator->GetImmediateOperand(0)),
- broker()->isolate());
- environment()->accumulator_hints() = Hints::SingleConstant(smi, zone());
-}
-
-void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
- BytecodeArrayIterator* iterator) {
- Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0);
- // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and
- // JSNativeContextSpecialization::ReduceJSResolvePromise.
- switch (functionId) {
- case Runtime::kInlineAsyncFunctionResolve: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncFunctionResolve));
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- size_t reg_count = iterator->GetRegisterCountOperand(2);
- CHECK_EQ(reg_count, 3);
- HintsVector args = PrepareArgumentsHints(first_reg, reg_count);
- Hints const& resolution_hints = args[1]; // The resolution object.
- ProcessHintsForPromiseResolve(resolution_hints);
- return;
- }
- case Runtime::kInlineAsyncGeneratorReject:
- case Runtime::kAsyncGeneratorReject: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncGeneratorReject));
- break;
- }
- case Runtime::kInlineAsyncGeneratorResolve:
- case Runtime::kAsyncGeneratorResolve: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncGeneratorResolve));
- break;
- }
- case Runtime::kInlineAsyncGeneratorYield:
- case Runtime::kAsyncGeneratorYield: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncGeneratorYield));
- break;
- }
- case Runtime::kInlineAsyncGeneratorAwaitUncaught:
- case Runtime::kAsyncGeneratorAwaitUncaught: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncGeneratorAwaitUncaught));
- break;
- }
- case Runtime::kInlineAsyncGeneratorAwaitCaught:
- case Runtime::kAsyncGeneratorAwaitCaught: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncGeneratorAwaitCaught));
- break;
- }
- case Runtime::kInlineAsyncFunctionAwaitUncaught:
- case Runtime::kAsyncFunctionAwaitUncaught: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncFunctionAwaitUncaught));
- break;
- }
- case Runtime::kInlineAsyncFunctionAwaitCaught:
- case Runtime::kAsyncFunctionAwaitCaught: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncFunctionAwaitCaught));
- break;
- }
- case Runtime::kInlineAsyncFunctionReject:
- case Runtime::kAsyncFunctionReject: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncFunctionReject));
- break;
- }
- case Runtime::kAsyncFunctionResolve: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kAsyncFunctionResolve));
- break;
- }
- case Runtime::kInlineCopyDataProperties:
- case Runtime::kCopyDataProperties: {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kCopyDataProperties));
- break;
- }
- default: {
- break;
- }
- }
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitLdaConstant(
- BytecodeArrayIterator* iterator) {
- Handle<Object> constant =
- iterator->GetConstantForIndexOperand(0, broker()->isolate());
- // TODO(v8:7790): FixedArrays still need to be serialized until they are
- // moved to kNeverSerialized.
- if (!broker()->is_concurrent_inlining() || constant->IsFixedArray()) {
- MakeRef(broker(), constant);
- }
- environment()->accumulator_hints() = Hints::SingleConstant(constant, zone());
-}
-
-void SerializerForBackgroundCompilation::VisitPushContext(
- BytecodeArrayIterator* iterator) {
- register_hints(iterator->GetRegisterOperand(0))
- .Reset(&environment()->current_context_hints(), zone());
- environment()->current_context_hints().Reset(
- &environment()->accumulator_hints(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitPopContext(
- BytecodeArrayIterator* iterator) {
- environment()->current_context_hints().Reset(
- &register_hints(iterator->GetRegisterOperand(0)), zone());
-}
-
-void SerializerForBackgroundCompilation::ProcessImmutableLoad(
- ContextRef const& context_ref, int slot, ContextProcessingMode mode,
- Hints* result_hints) {
- DCHECK_EQ(mode, kSerializeSlot);
- base::Optional<ObjectRef> slot_value = context_ref.get(slot);
-
- // If requested, record the object as a hint for the result value.
- if (result_hints != nullptr && slot_value.has_value()) {
- result_hints->AddConstant(slot_value.value().object(), zone(), broker());
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessContextAccess(
- Hints const& context_hints, int slot, int depth, ContextProcessingMode mode,
- Hints* result_hints) {
- // This function is for JSContextSpecialization::ReduceJSLoadContext and
- // ReduceJSStoreContext. Those reductions attempt to eliminate as many
- // loads as possible by making use of constant Context objects. In the
- // case of an immutable load, ReduceJSLoadContext even attempts to load
- // the value at {slot}, replacing the load with a constant.
- for (auto x : context_hints.constants()) {
- if (x->IsContext()) {
- // Walk this context to the given depth and serialize the slot found.
- ContextRef context_ref = MakeRef(broker(), Handle<Context>::cast(x));
- size_t remaining_depth = depth;
- context_ref = context_ref.previous(&remaining_depth);
- if (remaining_depth == 0 && mode != kIgnoreSlot) {
- ProcessImmutableLoad(context_ref, slot, mode, result_hints);
- }
- }
- }
- for (auto x : context_hints.virtual_contexts()) {
- if (x.distance <= static_cast<unsigned int>(depth)) {
- ContextRef context_ref =
- MakeRef(broker(), Handle<Context>::cast(x.context));
- size_t remaining_depth = depth - x.distance;
- context_ref = context_ref.previous(&remaining_depth);
- if (remaining_depth == 0 && mode != kIgnoreSlot) {
- ProcessImmutableLoad(context_ref, slot, mode, result_hints);
- }
- }
- }
-}
-
-void SerializerForBackgroundCompilation::VisitLdaContextSlot(
- BytecodeArrayIterator* iterator) {
- Hints const& context_hints = register_hints(iterator->GetRegisterOperand(0));
- const int slot = iterator->GetIndexOperand(1);
- const int depth = iterator->GetUnsignedImmediateOperand(2);
- Hints new_accumulator_hints;
- ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
- &new_accumulator_hints);
- environment()->accumulator_hints() = new_accumulator_hints;
-}
-
-void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
- BytecodeArrayIterator* iterator) {
- const int slot = iterator->GetIndexOperand(0);
- const int depth = 0;
- Hints const& context_hints = environment()->current_context_hints();
- Hints new_accumulator_hints;
- ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
- &new_accumulator_hints);
- environment()->accumulator_hints() = new_accumulator_hints;
-}
-
-void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
- BytecodeArrayIterator* iterator) {
- const int slot = iterator->GetIndexOperand(1);
- const int depth = iterator->GetUnsignedImmediateOperand(2);
- Hints const& context_hints = register_hints(iterator->GetRegisterOperand(0));
- Hints new_accumulator_hints;
- ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
- &new_accumulator_hints);
- environment()->accumulator_hints() = new_accumulator_hints;
-}
-
-void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
- BytecodeArrayIterator* iterator) {
- const int slot = iterator->GetIndexOperand(0);
- const int depth = 0;
- Hints const& context_hints = environment()->current_context_hints();
- Hints new_accumulator_hints;
- ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
- &new_accumulator_hints);
- environment()->accumulator_hints() = new_accumulator_hints;
-}
-
-void SerializerForBackgroundCompilation::ProcessModuleVariableAccess(
- BytecodeArrayIterator* iterator) {
- const int slot = Context::EXTENSION_INDEX;
- const int depth = iterator->GetUnsignedImmediateOperand(1);
- Hints const& context_hints = environment()->current_context_hints();
-
- Hints result_hints;
- ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
- &result_hints);
- for (Handle<Object> constant : result_hints.constants()) {
- MakeRef(broker(), constant);
- }
-}
-
-void SerializerForBackgroundCompilation::VisitLdaModuleVariable(
- BytecodeArrayIterator* iterator) {
- ProcessModuleVariableAccess(iterator);
-}
-
-void SerializerForBackgroundCompilation::VisitStaModuleVariable(
- BytecodeArrayIterator* iterator) {
- ProcessModuleVariableAccess(iterator);
-}
-
-void SerializerForBackgroundCompilation::VisitStaLookupSlot(
- BytecodeArrayIterator* iterator) {
- MakeRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitStaContextSlot(
- BytecodeArrayIterator* iterator) {
- const int slot = iterator->GetIndexOperand(1);
- const int depth = iterator->GetUnsignedImmediateOperand(2);
- Hints const& hints = register_hints(iterator->GetRegisterOperand(0));
- ProcessContextAccess(hints, slot, depth, kIgnoreSlot);
-}
-
-void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot(
- BytecodeArrayIterator* iterator) {
- const int slot = iterator->GetIndexOperand(0);
- const int depth = 0;
- Hints const& context_hints = environment()->current_context_hints();
- ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
-}
-
-void SerializerForBackgroundCompilation::VisitLdar(
- BytecodeArrayIterator* iterator) {
- environment()->accumulator_hints().Reset(
- &register_hints(iterator->GetRegisterOperand(0)), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitStar(
- BytecodeArrayIterator* iterator) {
- interpreter::Register reg = iterator->GetRegisterOperand(0);
- register_hints(reg).Reset(&environment()->accumulator_hints(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitShortStar(
- interpreter::Register reg) {
- register_hints(reg).Reset(&environment()->accumulator_hints(), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitMov(
- BytecodeArrayIterator* iterator) {
- interpreter::Register src = iterator->GetRegisterOperand(0);
- interpreter::Register dst = iterator->GetRegisterOperand(1);
- register_hints(dst).Reset(&register_hints(src), zone());
-}
-
-void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral(
- BytecodeArrayIterator* iterator) {
- Handle<String> constant_pattern = Handle<String>::cast(
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- MakeRef(broker(), constant_pattern);
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- FeedbackSource source(feedback_vector(), slot);
- broker()->ProcessFeedbackForRegExpLiteral(source);
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitCreateArrayLiteral(
- BytecodeArrayIterator* iterator) {
- Handle<ArrayBoilerplateDescription> array_boilerplate_description =
- Handle<ArrayBoilerplateDescription>::cast(
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- MakeRef(broker(), array_boilerplate_description);
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- FeedbackSource source(feedback_vector(), slot);
- broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitCreateEmptyArrayLiteral(
- BytecodeArrayIterator* iterator) {
- FeedbackSlot slot = iterator->GetSlotOperand(0);
- FeedbackSource source(feedback_vector(), slot);
- broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitCreateObjectLiteral(
- BytecodeArrayIterator* iterator) {
- Handle<ObjectBoilerplateDescription> constant_properties =
- Handle<ObjectBoilerplateDescription>::cast(
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- MakeRef(broker(), constant_properties);
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- FeedbackSource source(feedback_vector(), slot);
- broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitCreateFunctionContext(
- BytecodeArrayIterator* iterator) {
- ProcessCreateContext(iterator, 0);
-}
-
-void SerializerForBackgroundCompilation::VisitCreateBlockContext(
- BytecodeArrayIterator* iterator) {
- ProcessCreateContext(iterator, 0);
-}
-
-void SerializerForBackgroundCompilation::VisitCreateEvalContext(
- BytecodeArrayIterator* iterator) {
- ProcessCreateContext(iterator, 0);
-}
-
-void SerializerForBackgroundCompilation::VisitCreateWithContext(
- BytecodeArrayIterator* iterator) {
- ProcessCreateContext(iterator, 1);
-}
-
-void SerializerForBackgroundCompilation::VisitCreateCatchContext(
- BytecodeArrayIterator* iterator) {
- ProcessCreateContext(iterator, 1);
-}
-
-void SerializerForBackgroundCompilation::VisitForInNext(
- BytecodeArrayIterator* iterator) {
- FeedbackSlot slot = iterator->GetSlotOperand(3);
- ProcessForIn(slot);
-}
-
-void SerializerForBackgroundCompilation::VisitForInPrepare(
- BytecodeArrayIterator* iterator) {
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- ProcessForIn(slot);
-}
-
-void SerializerForBackgroundCompilation::ProcessCreateContext(
- interpreter::BytecodeArrayIterator* iterator, int scopeinfo_operand_index) {
- Hints const& current_context_hints = environment()->current_context_hints();
- Hints result_hints;
-
- // For each constant context, we must create a virtual context from
- // it of distance one.
- for (auto x : current_context_hints.constants()) {
- if (x->IsContext()) {
- Handle<Context> as_context(Handle<Context>::cast(x));
- result_hints.AddVirtualContext(VirtualContext(1, as_context), zone(),
- broker());
- }
- }
-
- // For each virtual context, we must create a virtual context from
- // it of distance {existing distance} + 1.
- for (auto x : current_context_hints.virtual_contexts()) {
- result_hints.AddVirtualContext(VirtualContext(x.distance + 1, x.context),
- zone(), broker());
- }
-
- environment()->accumulator_hints() = result_hints;
-}
-
-void SerializerForBackgroundCompilation::VisitCreateClosure(
- BytecodeArrayIterator* iterator) {
- Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- Handle<FeedbackCell> feedback_cell =
- feedback_vector()->GetClosureFeedbackCell(iterator->GetIndexOperand(1));
- MakeRef(broker(), feedback_cell);
- Handle<Object> cell_value(feedback_cell->value(), broker()->isolate());
- MakeRef(broker(), cell_value);
-
- Hints result_hints;
- if (cell_value->IsFeedbackVector()) {
- VirtualClosure virtual_closure(shared,
- Handle<FeedbackVector>::cast(cell_value),
- environment()->current_context_hints());
- result_hints.AddVirtualClosure(virtual_closure, zone(), broker());
- }
- environment()->accumulator_hints() = result_hints;
-}
-
-void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
- FeedbackSlot slot = iterator->GetSlotOperand(3);
- ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg,
- reg_count, slot);
-}
-
-void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- FeedbackSlot slot = iterator->GetSlotOperand(1);
-
- Hints const receiver = Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone());
- HintsVector parameters({receiver}, zone());
-
- ProcessCallOrConstruct(callee, base::nullopt, &parameters, slot,
- kMissingArgumentsAreUndefined);
-}
-
-namespace {
-void PrepareArgumentsHintsInternal(Zone* zone, HintsVector* args) {}
-
-template <typename... MoreHints>
-void PrepareArgumentsHintsInternal(Zone* zone, HintsVector* args, Hints* hints,
- MoreHints... more) {
- hints->EnsureShareable(zone);
- args->push_back(*hints);
- PrepareArgumentsHintsInternal(zone, args, more...);
-}
-} // namespace
-
-template <typename... MoreHints>
-HintsVector SerializerForBackgroundCompilation::PrepareArgumentsHints(
- Hints* hints, MoreHints... more) {
- HintsVector args(zone());
- PrepareArgumentsHintsInternal(zone(), &args, hints, more...);
- return args;
-}
-
-HintsVector SerializerForBackgroundCompilation::PrepareArgumentsHints(
- interpreter::Register first, size_t count) {
- HintsVector result(zone());
- const int reg_base = first.index();
- for (int i = 0; i < static_cast<int>(count); ++i) {
- Hints& hints = register_hints(interpreter::Register(reg_base + i));
- hints.EnsureShareable(zone());
- result.push_back(hints);
- }
- return result;
-}
-
-void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- Hints* arg0 = &register_hints(iterator->GetRegisterOperand(1));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
-
- Hints receiver = Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone());
- HintsVector args = PrepareArgumentsHints(&receiver, arg0);
-
- ProcessCallOrConstruct(callee, base::nullopt, &args, slot,
- kMissingArgumentsAreUndefined);
-}
-
-void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- Hints* arg0 = &register_hints(iterator->GetRegisterOperand(1));
- Hints* arg1 = &register_hints(iterator->GetRegisterOperand(2));
- FeedbackSlot slot = iterator->GetSlotOperand(3);
-
- Hints receiver = Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone());
- HintsVector args = PrepareArgumentsHints(&receiver, arg0, arg1);
-
- ProcessCallOrConstruct(callee, base::nullopt, &args, slot,
- kMissingArgumentsAreUndefined);
-}
-
-void SerializerForBackgroundCompilation::VisitCallAnyReceiver(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
- FeedbackSlot slot = iterator->GetSlotOperand(3);
- ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count,
- slot);
-}
-
-void SerializerForBackgroundCompilation::VisitCallProperty(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
- FeedbackSlot slot = iterator->GetSlotOperand(3);
- ProcessCallVarArgs(ConvertReceiverMode::kNotNullOrUndefined, callee,
- first_reg, reg_count, slot);
-}
-
-void SerializerForBackgroundCompilation::VisitCallProperty0(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(1));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
-
- HintsVector args = PrepareArgumentsHints(receiver);
-
- ProcessCallOrConstruct(callee, base::nullopt, &args, slot,
- kMissingArgumentsAreUndefined);
-}
-
-void SerializerForBackgroundCompilation::VisitCallProperty1(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(1));
- Hints* arg0 = &register_hints(iterator->GetRegisterOperand(2));
- FeedbackSlot slot = iterator->GetSlotOperand(3);
-
- HintsVector args = PrepareArgumentsHints(receiver, arg0);
-
- ProcessCallOrConstruct(callee, base::nullopt, &args, slot,
- kMissingArgumentsAreUndefined);
-}
-
-void SerializerForBackgroundCompilation::VisitCallProperty2(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(1));
- Hints* arg0 = &register_hints(iterator->GetRegisterOperand(2));
- Hints* arg1 = &register_hints(iterator->GetRegisterOperand(3));
- FeedbackSlot slot = iterator->GetSlotOperand(4);
-
- HintsVector args = PrepareArgumentsHints(receiver, arg0, arg1);
-
- ProcessCallOrConstruct(callee, base::nullopt, &args, slot,
- kMissingArgumentsAreUndefined);
-}
-
-void SerializerForBackgroundCompilation::VisitCallWithSpread(
- BytecodeArrayIterator* iterator) {
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
- FeedbackSlot slot = iterator->GetSlotOperand(3);
- ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count,
- slot, kMissingArgumentsAreUnknown);
-}
-
-void SerializerForBackgroundCompilation::VisitCallJSRuntime(
- BytecodeArrayIterator* iterator) {
- const int runtime_index = iterator->GetNativeContextIndexOperand(0);
- ObjectRef constant =
- broker()->target_native_context().get(runtime_index).value();
- Hints const callee = Hints::SingleConstant(constant.object(), zone());
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
- ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg,
- reg_count, FeedbackSlot::Invalid());
-}
-
-Hints SerializerForBackgroundCompilation::RunChildSerializer(
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments, MissingArgumentsPolicy padding) {
- SerializerForBackgroundCompilation child_serializer(
- zone_scope_.zone_stats(), broker(), dependencies(), function, new_target,
- arguments, padding, flags(), nesting_level_ + 1);
- Hints result = child_serializer.Run();
- // The Hints returned by the call to Run are allocated in the zone
- // created by the child serializer. Adding those hints to a hints
- // object created in our zone will preserve the information.
- return result.CopyToParentZone(zone(), broker());
-}
-
-void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
- Callee const& callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding, Hints* result_hints) {
- Handle<SharedFunctionInfo> shared = callee.shared(broker()->isolate());
- if (shared->IsApiFunction()) {
- ProcessApiCall(shared, arguments);
- DCHECK_NE(
- shared->GetInlineability(broker()->isolate(), broker()->is_turboprop()),
- SharedFunctionInfo::kIsInlineable);
- } else if (shared->HasBuiltinId()) {
- ProcessBuiltinCall(shared, new_target, arguments, speculation_mode, padding,
- result_hints);
- DCHECK_NE(
- shared->GetInlineability(broker()->isolate(), broker()->is_turboprop()),
- SharedFunctionInfo::kIsInlineable);
- } else if ((flags() &
- SerializerForBackgroundCompilationFlag::kEnableTurboInlining) &&
- shared->GetInlineability(broker()->isolate(),
- broker()->is_turboprop()) ==
- SharedFunctionInfo::kIsInlineable &&
- callee.HasFeedbackVector()) {
- CompilationSubject subject =
- callee.ToCompilationSubject(broker()->isolate(), zone());
- result_hints->Add(
- RunChildSerializer(subject, new_target, arguments, padding), zone(),
- broker());
- }
-}
-
-namespace {
-// Returns the innermost bound target and inserts all bound arguments and
-// {original_arguments} into {expanded_arguments} in the appropriate order.
-JSReceiverRef UnrollBoundFunction(JSBoundFunctionRef const& bound_function,
- JSHeapBroker* broker,
- const HintsVector& original_arguments,
- HintsVector* expanded_arguments, Zone* zone) {
- DCHECK(expanded_arguments->empty());
-
- JSReceiverRef target = bound_function.AsJSReceiver();
- HintsVector reversed_bound_arguments(zone);
- for (; target.IsJSBoundFunction();
- target = target.AsJSBoundFunction().bound_target_function().value()) {
- for (int i = target.AsJSBoundFunction().bound_arguments().length() - 1;
- i >= 0; --i) {
- Hints const arg = Hints::SingleConstant(
- target.AsJSBoundFunction().bound_arguments().get(i).object(), zone);
- reversed_bound_arguments.push_back(arg);
- }
- Hints const arg = Hints::SingleConstant(
- target.AsJSBoundFunction().bound_this().value().object(), zone);
- reversed_bound_arguments.push_back(arg);
- }
-
- expanded_arguments->insert(expanded_arguments->end(),
- reversed_bound_arguments.rbegin(),
- reversed_bound_arguments.rend());
- expanded_arguments->insert(expanded_arguments->end(),
- original_arguments.begin(),
- original_arguments.end());
-
- return target;
-}
-} // namespace
-
-void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
- Handle<Object> callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding, Hints* result_hints) {
- const HintsVector* actual_arguments = &arguments;
- HintsVector expanded_arguments(zone());
- if (callee->IsJSBoundFunction()) {
- JSBoundFunctionRef bound_function =
- MakeRef(broker(), Handle<JSBoundFunction>::cast(callee));
- if (!bound_function.Serialize()) return;
- callee = UnrollBoundFunction(bound_function, broker(), arguments,
- &expanded_arguments, zone())
- .object();
- actual_arguments = &expanded_arguments;
- }
- if (!callee->IsJSFunction()) return;
-
- JSFunctionRef function = MakeRef(broker(), Handle<JSFunction>::cast(callee));
- function.Serialize();
- Callee new_callee(function.object());
- ProcessCalleeForCallOrConstruct(new_callee, new_target, *actual_arguments,
- speculation_mode, padding, result_hints);
-}
-
-void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
- Hints callee, base::Optional<Hints> new_target, HintsVector* arguments,
- FeedbackSlot slot, MissingArgumentsPolicy padding) {
- SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation;
-
- if (!slot.IsInvalid()) {
- FeedbackSource source(feedback_vector(), slot);
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForCall(source);
- if (BailoutOnUninitialized(feedback)) return;
-
- if (!feedback.IsInsufficient()) {
- // Incorporate feedback into hints copy to simplify processing.
- // TODO(neis): Modify the original hints instead?
- speculation_mode = feedback.AsCall().speculation_mode();
- // Incorporate target feedback into hints copy to simplify processing.
- base::Optional<HeapObjectRef> target = feedback.AsCall().target();
- if (target.has_value() &&
- (target->map().is_callable() || target->IsFeedbackCell())) {
- callee = callee.Copy(zone());
- // TODO(mvstanton): if the map isn't callable then we have an allocation
- // site, and it may make sense to add the Array JSFunction constant.
- if (new_target.has_value()) {
- // Construct; feedback is new_target, which often is also the callee.
- new_target = new_target->Copy(zone());
- new_target->AddConstant(target->object(), zone(), broker());
- callee.AddConstant(target->object(), zone(), broker());
- } else {
- // Call; target is feedback cell or callee.
- if (target->IsFeedbackCell() && target->AsFeedbackCell().value()) {
- FeedbackVectorRef vector = *target->AsFeedbackCell().value();
- vector.Serialize();
- VirtualClosure virtual_closure(
- vector.shared_function_info().object(), vector.object(),
- Hints());
- callee.AddVirtualClosure(virtual_closure, zone(), broker());
- } else {
- callee.AddConstant(target->object(), zone(), broker());
- }
- }
- }
- }
- }
-
- Hints result_hints_from_new_target;
- if (new_target.has_value()) {
- ProcessNewTargetForConstruct(*new_target, &result_hints_from_new_target);
- // These hints are a good guess at the resulting object, so they are useful
- // for both the accumulator and the constructor call's receiver. The latter
- // is still missing completely in {arguments} so add it now.
- arguments->insert(arguments->begin(), result_hints_from_new_target);
- }
-
- // For JSNativeContextSpecialization::InferRootMap
- Hints new_accumulator_hints = result_hints_from_new_target.Copy(zone());
-
- ProcessCallOrConstructRecursive(callee, new_target, *arguments,
- speculation_mode, padding,
- &new_accumulator_hints);
- environment()->accumulator_hints() = new_accumulator_hints;
-}
-
-void SerializerForBackgroundCompilation::ProcessCallOrConstructRecursive(
- Hints const& callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding, Hints* result_hints) {
- // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
- for (auto constant : callee.constants()) {
- ProcessCalleeForCallOrConstruct(constant, new_target, arguments,
- speculation_mode, padding, result_hints);
- }
-
- // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
- for (auto hint : callee.virtual_closures()) {
- ProcessCalleeForCallOrConstruct(Callee(hint), new_target, arguments,
- speculation_mode, padding, result_hints);
- }
-
- for (auto hint : callee.virtual_bound_functions()) {
- HintsVector new_arguments = hint.bound_arguments;
- new_arguments.insert(new_arguments.end(), arguments.begin(),
- arguments.end());
- ProcessCallOrConstructRecursive(hint.bound_target, new_target,
- new_arguments, speculation_mode, padding,
- result_hints);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessNewTargetForConstruct(
- Hints const& new_target_hints, Hints* result_hints) {
- for (Handle<Object> target : new_target_hints.constants()) {
- if (target->IsJSBoundFunction()) {
- // Unroll the bound function.
- while (target->IsJSBoundFunction()) {
- target = handle(
- Handle<JSBoundFunction>::cast(target)->bound_target_function(),
- broker()->isolate());
- }
- }
- if (target->IsJSFunction()) {
- Handle<JSFunction> new_target(Handle<JSFunction>::cast(target));
- if (new_target->has_prototype_slot(broker()->isolate()) &&
- new_target->has_initial_map()) {
- result_hints->AddMap(
- handle(new_target->initial_map(), broker()->isolate()), zone(),
- broker());
- }
- }
- }
-
- for (auto const& virtual_bound_function :
- new_target_hints.virtual_bound_functions()) {
- ProcessNewTargetForConstruct(virtual_bound_function.bound_target,
- result_hints);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessCallVarArgs(
- ConvertReceiverMode receiver_mode, Hints const& callee,
- interpreter::Register first_reg, int reg_count, FeedbackSlot slot,
- MissingArgumentsPolicy padding) {
- HintsVector args = PrepareArgumentsHints(first_reg, reg_count);
- // The receiver is either given in the first register or it is implicitly
- // the {undefined} value.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- args.insert(args.begin(),
- Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone()));
- }
- ProcessCallOrConstruct(callee, base::nullopt, &args, slot, padding);
-}
-
-void SerializerForBackgroundCompilation::ProcessApiCall(
- Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
- for (const auto b :
- {Builtin::kCallFunctionTemplate_CheckAccess,
- Builtin::kCallFunctionTemplate_CheckCompatibleReceiver,
- Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver}) {
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(b));
- }
- FunctionTemplateInfoRef target_template_info =
- MakeRef(broker(),
- FunctionTemplateInfo::cast(target->function_data(kAcquireLoad)));
- if (!target_template_info.has_call_code()) return;
- target_template_info.SerializeCallCode();
-
- if (target_template_info.accept_any_receiver() &&
- target_template_info.is_signature_undefined()) {
- return;
- }
-
- if (arguments.empty()) return;
- Hints const& receiver_hints = arguments[0];
- for (auto hint : receiver_hints.constants()) {
- if (hint->IsUndefined()) {
- // The receiver is the global proxy.
- Handle<JSGlobalProxy> global_proxy =
- broker()->target_native_context().global_proxy_object().object();
- ProcessReceiverMapForApiCall(
- target_template_info,
- handle(global_proxy->map(), broker()->isolate()));
- continue;
- }
-
- if (!hint->IsJSReceiver()) continue;
- Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint));
-
- ProcessReceiverMapForApiCall(target_template_info,
- handle(receiver->map(), broker()->isolate()));
- }
-
- for (auto receiver_map : receiver_hints.maps()) {
- ProcessReceiverMapForApiCall(target_template_info, receiver_map);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall(
- FunctionTemplateInfoRef target, Handle<Map> receiver) {
- if (!receiver->is_access_check_needed()) {
- MapRef receiver_map = MakeRef(broker(), receiver);
- TRACE_BROKER(broker(), "Serializing holder for target: " << target);
- target.LookupHolderOfExpectedType(receiver_map,
- SerializationPolicy::kSerializeIfNeeded);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessHintsForObjectCreate(
- Hints const& prototype) {
- for (Handle<Object> constant_handle : prototype.constants()) {
- ObjectRef constant = MakeRef(broker(), constant_handle);
- if (constant.IsJSObject()) constant.AsJSObject().SerializeObjectCreateMap();
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessBuiltinCall(
- Handle<SharedFunctionInfo> target, base::Optional<Hints> new_target,
- const HintsVector& arguments, SpeculationMode speculation_mode,
- MissingArgumentsPolicy padding, Hints* result_hints) {
- DCHECK(target->HasBuiltinId());
- const Builtin builtin = target->builtin_id();
- const char* name = Builtins::name(builtin);
- TRACE_BROKER(broker(), "Serializing for call to builtin " << name);
- switch (builtin) {
- case Builtin::kObjectCreate: {
- if (arguments.size() >= 2) {
- ProcessHintsForObjectCreate(arguments[1]);
- } else {
- ProcessHintsForObjectCreate(Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone()));
- }
- break;
- }
- case Builtin::kPromisePrototypeCatch: {
- // For JSCallReducer::ReducePromisePrototypeCatch.
- if (speculation_mode != SpeculationMode::kDisallowSpeculation) {
- if (arguments.size() >= 1) {
- ProcessMapHintsForPromises(arguments[0]);
- }
- }
- break;
- }
- case Builtin::kPromisePrototypeFinally: {
- // For JSCallReducer::ReducePromisePrototypeFinally.
- if (speculation_mode != SpeculationMode::kDisallowSpeculation) {
- if (arguments.size() >= 1) {
- ProcessMapHintsForPromises(arguments[0]);
- }
- MakeRef(
- broker(),
- broker()->isolate()->factory()->promise_catch_finally_shared_fun());
- MakeRef(
- broker(),
- broker()->isolate()->factory()->promise_then_finally_shared_fun());
- }
- break;
- }
- case Builtin::kPromisePrototypeThen: {
- // For JSCallReducer::ReducePromisePrototypeThen.
- if (speculation_mode != SpeculationMode::kDisallowSpeculation) {
- if (arguments.size() >= 1) {
- ProcessMapHintsForPromises(arguments[0]);
- }
- }
- break;
- }
- case Builtin::kPromiseResolveTrampoline:
- // For JSCallReducer::ReducePromiseInternalResolve and
- // JSNativeContextSpecialization::ReduceJSResolvePromise.
- if (arguments.size() >= 1) {
- Hints const resolution_hints =
- arguments.size() >= 2
- ? arguments[1]
- : Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(),
- zone());
- ProcessHintsForPromiseResolve(resolution_hints);
- }
- break;
- case Builtin::kRegExpPrototypeTest:
- case Builtin::kRegExpPrototypeTestFast:
- // For JSCallReducer::ReduceRegExpPrototypeTest.
- if (arguments.size() >= 1 &&
- speculation_mode != SpeculationMode::kDisallowSpeculation) {
- Hints const& regexp_hints = arguments[0];
- ProcessHintsForRegExpTest(regexp_hints);
- }
- break;
- case Builtin::kArrayEvery:
- case Builtin::kArrayFilter:
- case Builtin::kArrayForEach:
- case Builtin::kArrayPrototypeFind:
- case Builtin::kArrayPrototypeFindIndex:
- case Builtin::kArrayMap:
- case Builtin::kArraySome:
- if (arguments.size() >= 2 &&
- speculation_mode != SpeculationMode::kDisallowSpeculation) {
- Hints const& callback = arguments[1];
- // "Call(callbackfn, T, « kValue, k, O »)"
- HintsVector new_arguments(zone());
- new_arguments.push_back(
- arguments.size() < 3
- ? Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone())
- : arguments[2]); // T
- new_arguments.push_back(Hints()); // kValue
- new_arguments.push_back(Hints()); // k
- new_arguments.push_back(arguments[0]); // O
- for (auto constant : callback.constants()) {
- ProcessCalleeForCallOrConstruct(
- constant, base::nullopt, new_arguments, speculation_mode,
- kMissingArgumentsAreUndefined, result_hints);
- }
- for (auto virtual_closure : callback.virtual_closures()) {
- ProcessCalleeForCallOrConstruct(
- Callee(virtual_closure), base::nullopt, new_arguments,
- speculation_mode, kMissingArgumentsAreUndefined, result_hints);
- }
- }
- break;
- case Builtin::kArrayReduce:
- case Builtin::kArrayReduceRight:
- if (arguments.size() >= 2 &&
- speculation_mode != SpeculationMode::kDisallowSpeculation) {
- Hints const& callback = arguments[1];
- // "Call(callbackfn, undefined, « accumulator, kValue, k, O »)"
- HintsVector new_arguments(zone());
- new_arguments.push_back(Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone()));
- new_arguments.push_back(Hints()); // accumulator
- new_arguments.push_back(Hints()); // kValue
- new_arguments.push_back(Hints()); // k
- new_arguments.push_back(arguments[0]); // O
- for (auto constant : callback.constants()) {
- ProcessCalleeForCallOrConstruct(
- constant, base::nullopt, new_arguments, speculation_mode,
- kMissingArgumentsAreUndefined, result_hints);
- }
- for (auto virtual_closure : callback.virtual_closures()) {
- ProcessCalleeForCallOrConstruct(
- Callee(virtual_closure), base::nullopt, new_arguments,
- speculation_mode, kMissingArgumentsAreUndefined, result_hints);
- }
- }
- break;
- case Builtin::kFunctionPrototypeApply:
- if (arguments.size() >= 1) {
- // Drop hints for all arguments except the user-given receiver.
- Hints const new_receiver =
- arguments.size() >= 2
- ? arguments[1]
- : Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(),
- zone());
- HintsVector new_arguments({new_receiver}, zone());
- for (auto constant : arguments[0].constants()) {
- ProcessCalleeForCallOrConstruct(
- constant, base::nullopt, new_arguments, speculation_mode,
- kMissingArgumentsAreUnknown, result_hints);
- }
- for (auto const& virtual_closure : arguments[0].virtual_closures()) {
- ProcessCalleeForCallOrConstruct(
- Callee(virtual_closure), base::nullopt, new_arguments,
- speculation_mode, kMissingArgumentsAreUnknown, result_hints);
- }
- }
- break;
- case Builtin::kPromiseConstructor:
- if (arguments.size() >= 1) {
- // "Call(executor, undefined, « resolvingFunctions.[[Resolve]],
- // resolvingFunctions.[[Reject]] »)"
- HintsVector new_arguments(
- {Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone())},
- zone());
- for (auto constant : arguments[0].constants()) {
- ProcessCalleeForCallOrConstruct(
- constant, base::nullopt, new_arguments,
- SpeculationMode::kDisallowSpeculation,
- kMissingArgumentsAreUnknown, result_hints);
- }
- for (auto const& virtual_closure : arguments[0].virtual_closures()) {
- ProcessCalleeForCallOrConstruct(
- Callee(virtual_closure), base::nullopt, new_arguments,
- SpeculationMode::kDisallowSpeculation,
- kMissingArgumentsAreUnknown, result_hints);
- }
- }
- MakeRef(broker(), broker()
- ->isolate()
- ->factory()
- ->promise_capability_default_reject_shared_fun());
- MakeRef(broker(), broker()
- ->isolate()
- ->factory()
- ->promise_capability_default_resolve_shared_fun());
-
- break;
- case Builtin::kFunctionPrototypeCall:
- if (arguments.size() >= 1) {
- HintsVector new_arguments(arguments.begin() + 1, arguments.end(),
- zone());
- for (auto constant : arguments[0].constants()) {
- ProcessCalleeForCallOrConstruct(constant, base::nullopt,
- new_arguments, speculation_mode,
- padding, result_hints);
- }
- for (auto const& virtual_closure : arguments[0].virtual_closures()) {
- ProcessCalleeForCallOrConstruct(
- Callee(virtual_closure), base::nullopt, new_arguments,
- speculation_mode, padding, result_hints);
- }
- }
- break;
- case Builtin::kReflectApply:
- if (arguments.size() >= 2) {
- // Drop hints for all arguments except the user-given receiver.
- Hints const new_receiver =
- arguments.size() >= 3
- ? arguments[2]
- : Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(),
- zone());
- HintsVector new_arguments({new_receiver}, zone());
- for (auto constant : arguments[1].constants()) {
- ProcessCalleeForCallOrConstruct(
- constant, base::nullopt, new_arguments, speculation_mode,
- kMissingArgumentsAreUnknown, result_hints);
- }
- for (auto const& virtual_closure : arguments[1].virtual_closures()) {
- ProcessCalleeForCallOrConstruct(
- Callee(virtual_closure), base::nullopt, new_arguments,
- speculation_mode, kMissingArgumentsAreUnknown, result_hints);
- }
- }
- break;
-
- case Builtin::kReflectConstruct:
- if (arguments.size() >= 2) {
- for (auto constant : arguments[1].constants()) {
- if (constant->IsJSFunction()) {
- MakeRef(broker(), Handle<JSFunction>::cast(constant)).Serialize();
- }
- }
- }
- break;
- case Builtin::kObjectPrototypeIsPrototypeOf:
- if (arguments.size() >= 2) {
- ProcessHintsForHasInPrototypeChain(arguments[1]);
- }
- break;
- case Builtin::kFunctionPrototypeHasInstance:
- // For JSCallReducer::ReduceFunctionPrototypeHasInstance.
- if (arguments.size() >= 2) {
- ProcessHintsForOrdinaryHasInstance(arguments[0], arguments[1]);
- }
- break;
- case Builtin::kFastFunctionPrototypeBind:
- if (arguments.size() >= 1 &&
- speculation_mode != SpeculationMode::kDisallowSpeculation) {
- Hints const& bound_target = arguments[0];
- ProcessHintsForFunctionBind(bound_target);
- HintsVector new_arguments(arguments.begin() + 1, arguments.end(),
- zone());
- result_hints->AddVirtualBoundFunction(
- VirtualBoundFunction(bound_target, new_arguments), zone(),
- broker());
-
- broker()
- ->target_native_context()
- .bound_function_with_constructor_map()
- .SerializePrototype();
- broker()
- ->target_native_context()
- .bound_function_without_constructor_map()
- .SerializePrototype();
- }
- break;
- case Builtin::kObjectGetPrototypeOf:
- case Builtin::kReflectGetPrototypeOf:
- if (arguments.size() >= 2) {
- ProcessHintsForObjectGetPrototype(arguments[1]);
- } else {
- Hints const undefined_hint = Hints::SingleConstant(
- broker()->isolate()->factory()->undefined_value(), zone());
- ProcessHintsForObjectGetPrototype(undefined_hint);
- }
- break;
- case Builtin::kObjectPrototypeGetProto:
- if (arguments.size() >= 1) {
- ProcessHintsForObjectGetPrototype(arguments[0]);
- }
- break;
- case Builtin::kMapIteratorPrototypeNext:
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kOrderedHashTableHealIndex));
- MakeRef<FixedArray>(
- broker(), broker()->isolate()->factory()->empty_ordered_hash_map());
- break;
- case Builtin::kSetIteratorPrototypeNext:
- MakeRef(broker(), broker()->isolate()->builtins()->code_handle(
- Builtin::kOrderedHashTableHealIndex));
- MakeRef<FixedArray>(
- broker(), broker()->isolate()->factory()->empty_ordered_hash_set());
- break;
- default:
- break;
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessHintsForOrdinaryHasInstance(
- Hints const& constructor_hints, Hints const& instance_hints) {
- bool walk_prototypes = false;
- for (Handle<Object> constructor : constructor_hints.constants()) {
- // For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance.
- if (constructor->IsHeapObject()) {
- ProcessConstantForOrdinaryHasInstance(
- MakeRef(broker(), Handle<HeapObject>::cast(constructor)),
- &walk_prototypes);
- }
- }
- // For JSNativeContextSpecialization::ReduceJSHasInPrototypeChain.
- if (walk_prototypes) ProcessHintsForHasInPrototypeChain(instance_hints);
-}
-
-void SerializerForBackgroundCompilation::ProcessHintsForHasInPrototypeChain(
- Hints const& instance_hints) {
- auto processMap = [&](Handle<Map> map_handle) {
- MapRef map = MakeRef(broker(), map_handle);
- while (map.IsJSObjectMap()) {
- map.SerializePrototype();
- map = map.prototype().value().map();
- }
- };
-
- for (auto hint : instance_hints.constants()) {
- if (!hint->IsHeapObject()) continue;
- Handle<HeapObject> object(Handle<HeapObject>::cast(hint));
- processMap(handle(object->map(), broker()->isolate()));
- }
- for (auto map_hint : instance_hints.maps()) {
- processMap(map_hint);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve(
- Hints const& resolution_hints) {
- auto processMap = [&](Handle<Map> map) {
- broker()->GetPropertyAccessInfo(
- MakeRef(broker(), map),
- MakeRef(broker(), broker()->isolate()->factory()->then_string()),
- AccessMode::kLoad, dependencies(),
- SerializationPolicy::kSerializeIfNeeded);
- };
-
- for (auto hint : resolution_hints.constants()) {
- if (!hint->IsHeapObject()) continue;
- Handle<HeapObject> resolution(Handle<HeapObject>::cast(hint));
- processMap(handle(resolution->map(), broker()->isolate()));
- }
- for (auto map_hint : resolution_hints.maps()) {
- processMap(map_hint);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessMapHintsForPromises(
- Hints const& receiver_hints) {
- // We need to serialize the prototypes on each receiver map.
- for (auto constant : receiver_hints.constants()) {
- if (!constant->IsJSPromise()) continue;
- Handle<Map> map(Handle<HeapObject>::cast(constant)->map(),
- broker()->isolate());
- MakeRef(broker(), map).SerializePrototype();
- }
- for (auto map : receiver_hints.maps()) {
- if (!map->IsJSPromiseMap()) continue;
- MakeRef(broker(), map).SerializePrototype();
- }
-}
-
-PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest(
- MapRef map) {
- PropertyAccessInfo ai_exec = broker()->GetPropertyAccessInfo(
- map, MakeRef(broker(), broker()->isolate()->factory()->exec_string()),
- AccessMode::kLoad, dependencies(),
- SerializationPolicy::kSerializeIfNeeded);
-
- Handle<JSObject> holder;
- if (ai_exec.IsFastDataConstant() && ai_exec.holder().ToHandle(&holder)) {
- // The property is on the prototype chain.
- JSObjectRef holder_ref = MakeRef(broker(), holder);
- holder_ref.GetOwnFastDataProperty(ai_exec.field_representation(),
- ai_exec.field_index(), nullptr,
- SerializationPolicy::kSerializeIfNeeded);
- }
- return ai_exec;
-}
-
-void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
- Hints const& regexp_hints) {
- for (auto hint : regexp_hints.constants()) {
- if (!hint->IsJSRegExp()) continue;
- Handle<JSObject> regexp(Handle<JSObject>::cast(hint));
- Handle<Map> regexp_map(regexp->map(), broker()->isolate());
- PropertyAccessInfo ai_exec =
- ProcessMapForRegExpTest(MakeRef(broker(), regexp_map));
- Handle<JSObject> holder;
- if (ai_exec.IsFastDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
- // The property is on the object itself.
- JSObjectRef holder_ref = MakeRef(broker(), regexp);
- holder_ref.GetOwnFastDataProperty(
- ai_exec.field_representation(), ai_exec.field_index(), nullptr,
- SerializationPolicy::kSerializeIfNeeded);
- }
- }
-
- for (auto map : regexp_hints.maps()) {
- if (!map->IsJSRegExpMap()) continue;
- ProcessMapForRegExpTest(MakeRef(broker(), map));
- }
-}
-
-namespace {
-void ProcessMapForFunctionBind(MapRef map) {
- map.SerializePrototype();
- int min_nof_descriptors = std::max({JSFunction::kLengthDescriptorIndex,
- JSFunction::kNameDescriptorIndex}) +
- 1;
- if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) {
- map.SerializeOwnDescriptor(
- InternalIndex(JSFunctionOrBoundFunction::kLengthDescriptorIndex));
- map.SerializeOwnDescriptor(
- InternalIndex(JSFunctionOrBoundFunction::kNameDescriptorIndex));
- }
-}
-} // namespace
-
-void SerializerForBackgroundCompilation::ProcessHintsForFunctionBind(
- Hints const& receiver_hints) {
- for (auto constant : receiver_hints.constants()) {
- if (constant->IsJSFunction()) {
- JSFunctionRef function =
- MakeRef(broker(), Handle<JSFunction>::cast(constant));
- function.Serialize();
- ProcessMapForFunctionBind(function.map());
- } else if (constant->IsJSBoundFunction()) {
- JSBoundFunctionRef function =
- MakeRef(broker(), Handle<JSBoundFunction>::cast(constant));
- function.Serialize();
- ProcessMapForFunctionBind(function.map());
- }
- }
-
- for (auto map : receiver_hints.maps()) {
- if (!map->IsJSFunctionMap() && !map->IsJSBoundFunctionMap()) continue;
- ProcessMapForFunctionBind(MakeRef(broker(), map));
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessHintsForObjectGetPrototype(
- Hints const& object_hints) {
- for (auto constant : object_hints.constants()) {
- if (!constant->IsHeapObject()) continue;
- HeapObjectRef object =
- MakeRef(broker(), Handle<HeapObject>::cast(constant));
- object.map().SerializePrototype();
- }
-
- for (auto map : object_hints.maps()) {
- MakeRef(broker(), map).SerializePrototype();
- }
-}
-
-void SerializerForBackgroundCompilation::ContributeToJumpTargetEnvironment(
- int target_offset) {
- auto it = jump_target_environments_.find(target_offset);
- if (it == jump_target_environments_.end()) {
- jump_target_environments_[target_offset] =
- zone()->New<Environment>(*environment());
- } else {
- it->second->Merge(environment(), zone(), broker());
- }
-}
-
-void SerializerForBackgroundCompilation::IncorporateJumpTargetEnvironment(
- int target_offset) {
- auto it = jump_target_environments_.find(target_offset);
- if (it != jump_target_environments_.end()) {
- environment()->Merge(it->second, zone(), broker());
- jump_target_environments_.erase(it);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessJump(
- interpreter::BytecodeArrayIterator* iterator) {
- int jump_target = iterator->GetJumpTargetOffset();
- if (iterator->current_offset() < jump_target) {
- ContributeToJumpTargetEnvironment(jump_target);
- }
-}
-
-void SerializerForBackgroundCompilation::VisitReturn(
- BytecodeArrayIterator* iterator) {
- return_value_hints().Add(environment()->accumulator_hints(), zone(),
- broker());
- environment()->Kill();
-}
-
-void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback(
- interpreter::BytecodeArrayIterator* iterator) {
- interpreter::JumpTableTargetOffsets targets =
- iterator->GetJumpTableTargetOffsets();
- for (interpreter::JumpTableTargetOffset target : targets) {
- ContributeToJumpTargetEnvironment(target.target_offset);
- }
-}
-
-void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState(
- interpreter::BytecodeArrayIterator* iterator) {
- for (const auto& target : bytecode_analysis().resume_jump_targets()) {
- ContributeToJumpTargetEnvironment(target.target_offset());
- }
-}
-
-void SerializerForBackgroundCompilation::VisitConstruct(
- BytecodeArrayIterator* iterator) {
- Hints& new_target = environment()->accumulator_hints();
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- size_t reg_count = iterator->GetRegisterCountOperand(2);
- FeedbackSlot slot = iterator->GetSlotOperand(3);
-
- HintsVector args = PrepareArgumentsHints(first_reg, reg_count);
-
- ProcessCallOrConstruct(callee, new_target, &args, slot,
- kMissingArgumentsAreUndefined);
-}
-
-void SerializerForBackgroundCompilation::VisitConstructWithSpread(
- BytecodeArrayIterator* iterator) {
- Hints const& new_target = environment()->accumulator_hints();
- Hints const& callee = register_hints(iterator->GetRegisterOperand(0));
- interpreter::Register first_reg = iterator->GetRegisterOperand(1);
- size_t reg_count = iterator->GetRegisterCountOperand(2);
- FeedbackSlot slot = iterator->GetSlotOperand(3);
-
- DCHECK_GT(reg_count, 0);
- reg_count--; // Pop the spread element.
- HintsVector args = PrepareArgumentsHints(first_reg, reg_count);
-
- ProcessCallOrConstruct(callee, new_target, &args, slot,
- kMissingArgumentsAreUnknown);
-}
-
-void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot,
- bool is_load) {
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(feedback_vector(), slot);
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForGlobalAccess(source);
-
- if (is_load) {
- Hints result_hints;
- if (feedback.kind() == ProcessedFeedback::kGlobalAccess) {
- // We may be able to contribute to accumulator constant hints.
- base::Optional<ObjectRef> value =
- feedback.AsGlobalAccess().GetConstantHint();
- if (value.has_value()) {
- result_hints.AddConstant(value->object(), zone(), broker());
- }
- } else {
- DCHECK(feedback.IsInsufficient());
- }
- environment()->accumulator_hints() = result_hints;
- }
-}
-
-void SerializerForBackgroundCompilation::VisitLdaGlobal(
- BytecodeArrayIterator* iterator) {
- MakeRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- ProcessGlobalAccess(slot, true);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof(
- BytecodeArrayIterator* iterator) {
- VisitLdaGlobal(iterator);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaLookupSlot(
- BytecodeArrayIterator* iterator) {
- MakeRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitLdaLookupSlotInsideTypeof(
- BytecodeArrayIterator* iterator) {
- MakeRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::ProcessCheckContextExtensions(
- int depth) {
- // for BytecodeGraphBuilder::CheckContextExtensions.
- Hints const& context_hints = environment()->current_context_hints();
- for (int i = 0; i < depth; i++) {
- ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i,
- kSerializeSlot);
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessLdaLookupGlobalSlot(
- BytecodeArrayIterator* iterator) {
- ProcessCheckContextExtensions(iterator->GetUnsignedImmediateOperand(2));
- // TODO(neis): BytecodeGraphBilder may insert a JSLoadGlobal.
- VisitLdaGlobal(iterator);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot(
- BytecodeArrayIterator* iterator) {
- ProcessLdaLookupGlobalSlot(iterator);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof(
- BytecodeArrayIterator* iterator) {
- ProcessLdaLookupGlobalSlot(iterator);
-}
-
-void SerializerForBackgroundCompilation::VisitStaGlobal(
- BytecodeArrayIterator* iterator) {
- MakeRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- ProcessGlobalAccess(slot, false);
-}
-
-void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot(
- BytecodeArrayIterator* iterator) {
- const int slot_index = iterator->GetIndexOperand(1);
- const int depth = iterator->GetUnsignedImmediateOperand(2);
- MakeRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- ProcessCheckContextExtensions(depth);
- environment()->accumulator_hints() = Hints();
- ProcessContextAccess(environment()->current_context_hints(), slot_index,
- depth, kIgnoreSlot);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot(
- BytecodeArrayIterator* iterator) {
- ProcessLdaLookupContextSlot(iterator);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof(
- BytecodeArrayIterator* iterator) {
- ProcessLdaLookupContextSlot(iterator);
-}
-
-void SerializerForBackgroundCompilation::ProcessCompareOperation(
- FeedbackSlot slot) {
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(function().feedback_vector(), slot);
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForCompareOperation(source);
- if (BailoutOnUninitialized(feedback)) return;
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::ProcessForIn(FeedbackSlot slot) {
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(feedback_vector(), slot);
- ProcessedFeedback const& feedback = broker()->ProcessFeedbackForForIn(source);
- if (BailoutOnUninitialized(feedback)) return;
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::ProcessUnaryOrBinaryOperation(
- FeedbackSlot slot, bool honor_bailout_on_uninitialized) {
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(feedback_vector(), slot);
- // Internally V8 uses binary op feedback also for unary ops.
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForBinaryOperation(source);
- if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) {
- return;
- }
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
- Hints* receiver, base::Optional<MapRef> receiver_map,
- MapRef lookup_start_object_map, NameRef const& name, AccessMode access_mode,
- base::Optional<JSObjectRef> concrete_receiver, Hints* result_hints) {
- DCHECK_IMPLIES(concrete_receiver.has_value(), receiver_map.has_value());
-
- {
- Handle<Map> map;
- if (!Map::TryUpdate(broker()->isolate(), lookup_start_object_map.object())
- .ToHandle(&map) ||
- map->is_abandoned_prototype_map()) {
- return;
- }
- lookup_start_object_map = MakeRef(broker(), map);
- }
- CHECK(!lookup_start_object_map.is_deprecated());
-
- // For JSNativeContextSpecialization::InferRootMap
- lookup_start_object_map.SerializeRootMap();
-
- // For JSNativeContextSpecialization::ReduceNamedAccess.
- JSGlobalProxyRef global_proxy =
- broker()->target_native_context().global_proxy_object();
- JSGlobalObjectRef global_object =
- broker()->target_native_context().global_object();
- if (lookup_start_object_map.equals(global_proxy.map())) {
- base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell(
- name, SerializationPolicy::kSerializeIfNeeded);
- if (cell.has_value()) {
- CHECK(cell->Serialize());
- if (access_mode == AccessMode::kLoad) {
- result_hints->AddConstant(
- handle(cell->object()->value(), broker()->isolate()), zone(),
- broker());
- }
- }
- }
-
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- lookup_start_object_map, name, access_mode, dependencies(),
- SerializationPolicy::kSerializeIfNeeded);
-
- // For JSNativeContextSpecialization::InlinePropertySetterCall
- // and InlinePropertyGetterCall.
- if ((access_info.IsFastAccessorConstant() ||
- access_info.IsDictionaryProtoAccessorConstant()) &&
- !access_info.constant().is_null()) {
- if (access_info.constant()->IsJSFunction()) {
- JSFunctionRef function =
- MakeRef(broker(), Handle<JSFunction>::cast(access_info.constant()));
-
- if (receiver_map.has_value()) {
- // For JSCallReducer and JSInlining(Heuristic).
- HintsVector arguments(
- {Hints::SingleMap(receiver_map->object(), zone())}, zone());
- // In the case of a setter any added result hints won't make sense, but
- // they will be ignored anyways by Process*PropertyAccess due to the
- // access mode not being kLoad.
- ProcessCalleeForCallOrConstruct(
- function.object(), base::nullopt, arguments,
- SpeculationMode::kDisallowSpeculation,
- kMissingArgumentsAreUndefined, result_hints);
-
- // For JSCallReducer::ReduceCallApiFunction.
- Handle<SharedFunctionInfo> sfi = function.shared().object();
- if (sfi->IsApiFunction()) {
- FunctionTemplateInfoRef fti_ref =
- MakeRef(broker(), sfi->get_api_func_data());
- if (fti_ref.has_call_code()) {
- fti_ref.SerializeCallCode();
- ProcessReceiverMapForApiCall(fti_ref, receiver_map->object());
- }
- }
- }
- } else if (access_info.constant()->IsJSBoundFunction()) {
- // For JSCallReducer::ReduceJSCall.
- JSBoundFunctionRef function = MakeRef(
- broker(), Handle<JSBoundFunction>::cast(access_info.constant()));
- function.Serialize();
- } else {
- FunctionTemplateInfoRef fti = MakeRef(
- broker(), FunctionTemplateInfo::cast(*access_info.constant()));
- if (fti.has_call_code()) fti.SerializeCallCode();
- }
- } else if (access_info.IsModuleExport()) {
- // For JSNativeContextSpecialization::BuildPropertyLoad
- DCHECK(!access_info.constant().is_null());
- MakeRef(broker(), Handle<Cell>::cast(access_info.constant()));
- }
-
- switch (access_mode) {
- case AccessMode::kLoad:
- // For PropertyAccessBuilder::TryBuildLoadConstantDataField and
- // PropertyAccessBuilder::BuildLoadDictPrototypeConstant
- if (access_info.IsFastDataConstant() ||
- access_info.IsDictionaryProtoDataConstant()) {
- base::Optional<JSObjectRef> holder;
- Handle<JSObject> prototype;
- if (access_info.holder().ToHandle(&prototype)) {
- holder = MakeRef(broker(), prototype);
- } else {
- CHECK_IMPLIES(concrete_receiver.has_value(),
- concrete_receiver->map().equals(*receiver_map));
- holder = concrete_receiver;
- }
-
- if (holder.has_value()) {
- SerializationPolicy policy = SerializationPolicy::kSerializeIfNeeded;
- base::Optional<ObjectRef> constant =
- access_info.IsFastDataConstant()
- ? holder->GetOwnFastDataProperty(
- access_info.field_representation(),
- access_info.field_index(), nullptr, policy)
- : holder->GetOwnDictionaryProperty(
- access_info.dictionary_index(), nullptr, policy);
- if (constant.has_value()) {
- result_hints->AddConstant(constant->object(), zone(), broker());
- }
- }
- }
- break;
- case AccessMode::kStore:
- case AccessMode::kStoreInLiteral:
- // For MapInference (StoreField case).
- if (access_info.IsDataField() || access_info.IsFastDataConstant()) {
- Handle<Map> transition_map;
- if (access_info.transition_map().ToHandle(&transition_map)) {
- MapRef map_ref = MakeRef(broker(), transition_map);
- TRACE_BROKER(broker(), "Propagating transition map "
- << map_ref << " to receiver hints.");
- receiver->AddMap(transition_map, zone(), broker_, false);
- }
- }
- break;
- case AccessMode::kHas:
- break;
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessMinimorphicPropertyAccess(
- MinimorphicLoadPropertyAccessFeedback const& feedback,
- FeedbackSource const& source) {
- broker()->GetPropertyAccessInfo(feedback, source,
- SerializationPolicy::kSerializeIfNeeded);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaKeyedProperty(
- BytecodeArrayIterator* iterator) {
- Hints const& key = environment()->accumulator_hints();
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kLoad, true);
-}
-
-void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
- Hints* receiver, Hints const& key, FeedbackSlot slot,
- AccessMode access_mode, bool honor_bailout_on_uninitialized) {
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(feedback_vector(), slot);
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForPropertyAccess(source, access_mode,
- base::nullopt);
- if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) {
- return;
- }
-
- Hints new_accumulator_hints;
- switch (feedback.kind()) {
- case ProcessedFeedback::kElementAccess:
- ProcessElementAccess(*receiver, key, feedback.AsElementAccess(),
- access_mode);
- break;
- case ProcessedFeedback::kNamedAccess:
- ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode,
- &new_accumulator_hints);
- break;
- case ProcessedFeedback::kInsufficient:
- break;
- default:
- UNREACHABLE();
- }
-
- if (access_mode == AccessMode::kLoad) {
- environment()->accumulator_hints() = new_accumulator_hints;
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
- Hints* receiver, NameRef const& name, FeedbackSlot slot,
- AccessMode access_mode) {
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(feedback_vector(), slot);
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name);
- if (BailoutOnUninitialized(feedback)) return;
-
- Hints new_accumulator_hints;
- switch (feedback.kind()) {
- case ProcessedFeedback::kNamedAccess:
- DCHECK(name.equals(feedback.AsNamedAccess().name()));
- ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode,
- &new_accumulator_hints);
- break;
- case ProcessedFeedback::kMinimorphicPropertyAccess:
- DCHECK(name.equals(feedback.AsMinimorphicPropertyAccess().name()));
- ProcessMinimorphicPropertyAccess(feedback.AsMinimorphicPropertyAccess(),
- source);
- break;
- case ProcessedFeedback::kInsufficient:
- break;
- default:
- UNREACHABLE();
- }
-
- if (access_mode == AccessMode::kLoad) {
- environment()->accumulator_hints() = new_accumulator_hints;
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessNamedSuperPropertyAccess(
- Hints* receiver, NameRef const& name, FeedbackSlot slot,
- AccessMode access_mode) {
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(feedback_vector(), slot);
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name);
- if (BailoutOnUninitialized(feedback)) return;
-
- Hints new_accumulator_hints;
- switch (feedback.kind()) {
- case ProcessedFeedback::kNamedAccess:
- DCHECK(name.equals(feedback.AsNamedAccess().name()));
- ProcessNamedSuperAccess(receiver, feedback.AsNamedAccess(), access_mode,
- &new_accumulator_hints);
- break;
- case ProcessedFeedback::kMinimorphicPropertyAccess:
- DCHECK(name.equals(feedback.AsMinimorphicPropertyAccess().name()));
- ProcessMinimorphicPropertyAccess(feedback.AsMinimorphicPropertyAccess(),
- source);
- break;
- case ProcessedFeedback::kInsufficient:
- break;
- default:
- UNREACHABLE();
- }
-
- if (access_mode == AccessMode::kLoad) {
- environment()->accumulator_hints() = new_accumulator_hints;
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessNamedAccess(
- Hints* receiver, NamedAccessFeedback const& feedback,
- AccessMode access_mode, Hints* result_hints) {
- for (Handle<Map> map : feedback.maps()) {
- MapRef map_ref = MakeRef(broker(), map);
- TRACE_BROKER(broker(), "Propagating feedback map "
- << map_ref << " to receiver hints.");
- receiver->AddMap(map, zone(), broker_, false);
- }
-
- for (Handle<Map> map : receiver->maps()) {
- MapRef map_ref = MakeRef(broker(), map);
- ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref,
- feedback.name(), access_mode,
- base::nullopt, result_hints);
- }
-
- for (Handle<Object> hint : receiver->constants()) {
- ObjectRef object = MakeRef(broker(), hint);
- if (access_mode == AccessMode::kLoad && object.IsJSObject()) {
- MapRef map_ref = object.AsJSObject().map();
- ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref,
- feedback.name(), access_mode,
- object.AsJSObject(), result_hints);
- }
- // For JSNativeContextSpecialization::ReduceJSLoadNamed.
- if (access_mode == AccessMode::kLoad && object.IsJSFunction() &&
- feedback.name().equals(MakeRef(
- broker(), broker()->isolate()->factory()->prototype_string()))) {
- JSFunctionRef function = object.AsJSFunction();
- function.Serialize();
- if (result_hints != nullptr && function.has_prototype()) {
- result_hints->AddConstant(function.prototype().object(), zone(),
- broker());
- }
- }
- // TODO(neis): Also record accumulator hint for string.length and maybe
- // more?
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessNamedSuperAccess(
- Hints* receiver, NamedAccessFeedback const& feedback,
- AccessMode access_mode, Hints* result_hints) {
- MapsSet receiver_maps = receiver->maps();
- for (Handle<Map> receiver_map : receiver_maps) {
- MapRef receiver_map_ref = MakeRef(broker(), receiver_map);
- for (Handle<Map> feedback_map : feedback.maps()) {
- MapRef feedback_map_ref = MakeRef(broker(), feedback_map);
- ProcessMapForNamedPropertyAccess(
- receiver, receiver_map_ref, feedback_map_ref, feedback.name(),
- access_mode, base::nullopt, result_hints);
- }
- }
- if (receiver_maps.IsEmpty()) {
- for (Handle<Map> feedback_map : feedback.maps()) {
- MapRef feedback_map_ref = MakeRef(broker(), feedback_map);
- ProcessMapForNamedPropertyAccess(
- receiver, base::nullopt, feedback_map_ref, feedback.name(),
- access_mode, base::nullopt, result_hints);
- }
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessElementAccess(
- Hints const& receiver, Hints const& key,
- ElementAccessFeedback const& feedback, AccessMode access_mode) {
- for (auto const& group : feedback.transition_groups()) {
- for (Handle<Map> map_handle : group) {
- MapRef map = MakeRef(broker(), map_handle);
- switch (access_mode) {
- case AccessMode::kHas:
- case AccessMode::kLoad:
- map.SerializePrototype();
- break;
- case AccessMode::kStore:
- map.SerializeForElementStore();
- break;
- case AccessMode::kStoreInLiteral:
- // This operation is fairly local and simple, nothing to serialize.
- break;
- }
- }
- }
-
- for (Handle<Object> hint : receiver.constants()) {
- ObjectRef receiver_ref = MakeRef(broker(), hint);
-
- // For JSNativeContextSpecialization::InferRootMap
- if (receiver_ref.IsHeapObject()) {
- receiver_ref.AsHeapObject().map().SerializeRootMap();
- }
-
- // For JSNativeContextSpecialization::ReduceElementAccess.
- if (receiver_ref.IsJSTypedArray()) {
- receiver_ref.AsJSTypedArray().Serialize();
- }
-
- // For JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant.
- if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
- for (Handle<Object> hint : key.constants()) {
- ObjectRef key_ref = MakeRef(broker(), hint);
- // TODO(neis): Do this for integer-HeapNumbers too?
- if (key_ref.IsSmi() && key_ref.AsSmi() >= 0) {
- base::Optional<ObjectRef> element;
- if (receiver_ref.IsJSObject()) {
- JSObjectRef jsobject_ref = receiver_ref.AsJSObject();
- jsobject_ref.SerializeElements();
- element = receiver_ref.AsJSObject().GetOwnConstantElement(
- jsobject_ref.elements(kRelaxedLoad).value(), key_ref.AsSmi(),
- nullptr, SerializationPolicy::kSerializeIfNeeded);
- if (!element.has_value() && receiver_ref.IsJSArray()) {
- // We didn't find a constant element, but if the receiver is a
- // cow-array we can exploit the fact that any future write to the
- // element will replace the whole elements storage.
- JSArrayRef array_ref = receiver_ref.AsJSArray();
- array_ref.GetOwnCowElement(
- array_ref.elements(kRelaxedLoad).value(), key_ref.AsSmi(),
- SerializationPolicy::kSerializeIfNeeded);
- }
- } else if (receiver_ref.IsString()) {
- element = receiver_ref.AsString().GetCharAsStringOrUndefined(
- key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
- }
- }
- }
- }
- }
-
- // For JSNativeContextSpecialization::InferRootMap
- for (Handle<Map> map : receiver.maps()) {
- MapRef map_ref = MakeRef(broker(), map);
- map_ref.SerializeRootMap();
- }
-}
-
-void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name =
- MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
- 1, broker()->isolate())));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
- ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kLoad);
-}
-
-void SerializerForBackgroundCompilation::VisitLdaNamedPropertyFromSuper(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name =
- MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
- 1, broker()->isolate())));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
- ProcessNamedSuperPropertyAccess(receiver, name, slot, AccessMode::kLoad);
-}
-
-void SerializerForBackgroundCompilation::VisitStaNamedProperty(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name =
- MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
- 1, broker()->isolate())));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
- ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStore);
-}
-
-void SerializerForBackgroundCompilation::VisitStaNamedOwnProperty(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name =
- MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
- 1, broker()->isolate())));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
- ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStoreInLiteral);
-}
-
-void SerializerForBackgroundCompilation::VisitTestIn(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &environment()->accumulator_hints();
- Hints const& key = register_hints(iterator->GetRegisterOperand(0));
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kHas, false);
-}
-
-// For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance.
-void SerializerForBackgroundCompilation::ProcessConstantForOrdinaryHasInstance(
- HeapObjectRef const& constructor, bool* walk_prototypes) {
- if (constructor.IsJSBoundFunction()) {
- constructor.AsJSBoundFunction().Serialize();
- ProcessConstantForInstanceOf(
- constructor.AsJSBoundFunction().bound_target_function().value(),
- walk_prototypes);
- } else if (constructor.IsJSFunction()) {
- constructor.AsJSFunction().Serialize();
- *walk_prototypes =
- *walk_prototypes ||
- (constructor.map().has_prototype_slot() &&
- constructor.AsJSFunction().has_prototype() &&
- !constructor.AsJSFunction().PrototypeRequiresRuntimeLookup());
- }
-}
-
-void SerializerForBackgroundCompilation::ProcessConstantForInstanceOf(
- ObjectRef const& constructor, bool* walk_prototypes) {
- if (!constructor.IsHeapObject()) return;
- HeapObjectRef constructor_heap_object = constructor.AsHeapObject();
-
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- constructor_heap_object.map(),
- MakeRef(broker(), broker()->isolate()->factory()->has_instance_symbol()),
- AccessMode::kLoad, dependencies(),
- SerializationPolicy::kSerializeIfNeeded);
-
- if (access_info.IsNotFound()) {
- ProcessConstantForOrdinaryHasInstance(constructor_heap_object,
- walk_prototypes);
- } else if (access_info.IsFastDataConstant()) {
- Handle<JSObject> holder;
- bool found_on_proto = access_info.holder().ToHandle(&holder);
- JSObjectRef holder_ref =
- found_on_proto ? MakeRef(broker(), holder) : constructor.AsJSObject();
- base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
- access_info.field_representation(), access_info.field_index(), nullptr,
- SerializationPolicy::kSerializeIfNeeded);
- CHECK(constant.has_value());
- if (constant->IsJSFunction()) {
- JSFunctionRef function = constant->AsJSFunction();
- function.Serialize();
- if (function.shared().HasBuiltinId() &&
- function.shared().builtin_id() ==
- Builtin::kFunctionPrototypeHasInstance) {
- // For JSCallReducer::ReduceFunctionPrototypeHasInstance.
- ProcessConstantForOrdinaryHasInstance(constructor_heap_object,
- walk_prototypes);
- }
- }
- }
-}
-
-void SerializerForBackgroundCompilation::VisitTestInstanceOf(
- BytecodeArrayIterator* iterator) {
- Hints const& lhs = register_hints(iterator->GetRegisterOperand(0));
- Hints rhs = environment()->accumulator_hints();
- FeedbackSlot slot = iterator->GetSlotOperand(1);
-
- if (slot.IsInvalid() || feedback_vector().is_null()) return;
- FeedbackSource source(feedback_vector(), slot);
- ProcessedFeedback const& feedback =
- broker()->ProcessFeedbackForInstanceOf(source);
-
- // Incorporate feedback (about rhs) into hints copy to simplify processing.
- // TODO(neis): Propagate into original hints?
- if (!feedback.IsInsufficient()) {
- InstanceOfFeedback const& rhs_feedback = feedback.AsInstanceOf();
- if (rhs_feedback.value().has_value()) {
- rhs = rhs.Copy(zone());
- Handle<JSObject> constructor = rhs_feedback.value()->object();
- rhs.AddConstant(constructor, zone(), broker());
- }
- }
-
- bool walk_prototypes = false;
- for (Handle<Object> constant : rhs.constants()) {
- ProcessConstantForInstanceOf(MakeRef(broker(), constant), &walk_prototypes);
- }
- if (walk_prototypes) ProcessHintsForHasInPrototypeChain(lhs);
-
- environment()->accumulator_hints() = Hints();
-}
-
-void SerializerForBackgroundCompilation::VisitToNumeric(
- BytecodeArrayIterator* iterator) {
- FeedbackSlot slot = iterator->GetSlotOperand(0);
- ProcessUnaryOrBinaryOperation(slot, false);
-}
-
-void SerializerForBackgroundCompilation::VisitToNumber(
- BytecodeArrayIterator* iterator) {
- FeedbackSlot slot = iterator->GetSlotOperand(0);
- ProcessUnaryOrBinaryOperation(slot, false);
-}
-
-void SerializerForBackgroundCompilation::VisitThrowReferenceErrorIfHole(
- BytecodeArrayIterator* iterator) {
- MakeRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
-}
-
-void SerializerForBackgroundCompilation::VisitStaKeyedProperty(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- Hints const& key = register_hints(iterator->GetRegisterOperand(1));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
- ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStore, true);
-}
-
-void SerializerForBackgroundCompilation::VisitStaInArrayLiteral(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- Hints const& key = register_hints(iterator->GetRegisterOperand(1));
- FeedbackSlot slot = iterator->GetSlotOperand(2);
- ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral,
- true);
-}
-
-void SerializerForBackgroundCompilation::VisitStaDataPropertyInLiteral(
- BytecodeArrayIterator* iterator) {
- Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- Hints const& key = register_hints(iterator->GetRegisterOperand(1));
- FeedbackSlot slot = iterator->GetSlotOperand(3);
- ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral,
- false);
-}
-
-#define DEFINE_CLEAR_ACCUMULATOR(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- environment()->accumulator_hints() = Hints(); \
- }
-CLEAR_ACCUMULATOR_LIST(DEFINE_CLEAR_ACCUMULATOR)
-#undef DEFINE_CLEAR_ACCUMULATOR
-
-#define DEFINE_CONDITIONAL_JUMP(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- ProcessJump(iterator); \
- }
-CONDITIONAL_JUMPS_LIST(DEFINE_CONDITIONAL_JUMP)
-#undef DEFINE_CONDITIONAL_JUMP
-
-#define DEFINE_UNCONDITIONAL_JUMP(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- ProcessJump(iterator); \
- environment()->Kill(); \
- }
-UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP)
-#undef DEFINE_UNCONDITIONAL_JUMP
-
-#define DEFINE_IGNORE(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) {}
-IGNORED_BYTECODE_LIST(DEFINE_IGNORE)
-#undef DEFINE_IGNORE
-
-#define DEFINE_UNREACHABLE(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- UNREACHABLE(); \
- }
-UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE)
-#undef DEFINE_UNREACHABLE
-
-#define DEFINE_KILL(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- environment()->Kill(); \
- }
-KILL_ENVIRONMENT_LIST(DEFINE_KILL)
-#undef DEFINE_KILL
-
-#define DEFINE_BINARY_OP(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- FeedbackSlot slot = iterator->GetSlotOperand(1); \
- ProcessUnaryOrBinaryOperation(slot, true); \
- }
-BINARY_OP_LIST(DEFINE_BINARY_OP)
-#undef DEFINE_BINARY_OP
-
-#define DEFINE_COMPARE_OP(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- FeedbackSlot slot = iterator->GetSlotOperand(1); \
- ProcessCompareOperation(slot); \
- }
-COMPARE_OP_LIST(DEFINE_COMPARE_OP)
-#undef DEFINE_COMPARE_OP
-
-#define DEFINE_UNARY_OP(name, ...) \
- void SerializerForBackgroundCompilation::Visit##name( \
- BytecodeArrayIterator* iterator) { \
- FeedbackSlot slot = iterator->GetSlotOperand(0); \
- ProcessUnaryOrBinaryOperation(slot, true); \
- }
-UNARY_OP_LIST(DEFINE_UNARY_OP)
-#undef DEFINE_UNARY_OP
-
-#undef BINARY_OP_LIST
-#undef CLEAR_ACCUMULATOR_LIST
-#undef COMPARE_OP_LIST
-#undef CONDITIONAL_JUMPS_LIST
-#undef IGNORED_BYTECODE_LIST
-#undef KILL_ENVIRONMENT_LIST
-#undef SUPPORTED_BYTECODE_LIST
-#undef UNARY_OP_LIST
-#undef UNCONDITIONAL_JUMPS_LIST
-#undef UNREACHABLE_BYTECODE_LIST
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
deleted file mode 100644
index f01e73452e..0000000000
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
-#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
-
-#include "src/handles/handles.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeOffset;
-class Zone;
-
-namespace compiler {
-
-class CompilationDependencies;
-class JSHeapBroker;
-class ZoneStats;
-
-enum class SerializerForBackgroundCompilationFlag : uint8_t {
- kBailoutOnUninitialized = 1 << 0,
- kCollectSourcePositions = 1 << 1,
- kAnalyzeEnvironmentLiveness = 1 << 2,
- kEnableTurboInlining = 1 << 3,
-};
-using SerializerForBackgroundCompilationFlags =
- base::Flags<SerializerForBackgroundCompilationFlag>;
-
-void RunSerializerForBackgroundCompilation(
- ZoneStats* zone_stats, JSHeapBroker* broker,
- CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset);
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
diff --git a/deps/v8/src/compiler/serializer-hints.h b/deps/v8/src/compiler/serializer-hints.h
deleted file mode 100644
index 4cb1309832..0000000000
--- a/deps/v8/src/compiler/serializer-hints.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file defines the hints classed gathered temporarily by the
-// SerializerForBackgroundCompilation while it's analysing the bytecode
-// and copying the necessary data to the JSHeapBroker for further usage
-// by the reducers that run on the background thread.
-
-#ifndef V8_COMPILER_SERIALIZER_HINTS_H_
-#define V8_COMPILER_SERIALIZER_HINTS_H_
-
-#include "src/compiler/functional-list.h"
-#include "src/handles/handles.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class Context;
-class Object;
-class Map;
-
-namespace compiler {
-
-template <typename T, typename EqualTo>
-class FunctionalSet {
- public:
- void Add(T const& elem, Zone* zone) {
- for (auto const& l : data_) {
- if (equal_to(l, elem)) return;
- }
- data_.PushFront(elem, zone);
- }
-
- void Union(FunctionalSet<T, EqualTo> other, Zone* zone) {
- if (!data_.TriviallyEquals(other.data_)) {
- // Choose the larger side as tail.
- if (data_.Size() < other.data_.Size()) std::swap(data_, other.data_);
- for (auto const& elem : other.data_) Add(elem, zone);
- }
- }
-
- bool IsEmpty() const { return data_.begin() == data_.end(); }
-
- // Warning: quadratic time complexity.
- bool Includes(FunctionalSet<T, EqualTo> const& other) const {
- return std::all_of(other.begin(), other.end(), [&](T const& other_elem) {
- return std::any_of(this->begin(), this->end(), [&](T const& this_elem) {
- return equal_to(this_elem, other_elem);
- });
- });
- }
- bool operator==(const FunctionalSet<T, EqualTo>& other) const {
- return this->data_.TriviallyEquals(other.data_) ||
- (this->data_.Size() == other.data_.Size() && this->Includes(other) &&
- other.Includes(*this));
- }
- bool operator!=(const FunctionalSet<T, EqualTo>& other) const {
- return !(*this == other);
- }
-
- size_t Size() const { return data_.Size(); }
-
- using iterator = typename FunctionalList<T>::iterator;
-
- iterator begin() const { return data_.begin(); }
- iterator end() const { return data_.end(); }
-
- private:
- static EqualTo equal_to;
- FunctionalList<T> data_;
-};
-
-template <typename T, typename EqualTo>
-EqualTo FunctionalSet<T, EqualTo>::equal_to;
-
-struct VirtualContext {
- unsigned int distance;
- Handle<Context> context;
-
- VirtualContext(unsigned int distance_in, Handle<Context> context_in)
- : distance(distance_in), context(context_in) {
- CHECK_GT(distance, 0);
- }
- bool operator==(const VirtualContext& other) const {
- return context.equals(other.context) && distance == other.distance;
- }
-};
-
-class VirtualClosure;
-struct VirtualBoundFunction;
-
-using ConstantsSet = FunctionalSet<Handle<Object>, Handle<Object>::equal_to>;
-using VirtualContextsSet =
- FunctionalSet<VirtualContext, std::equal_to<VirtualContext>>;
-using MapsSet = FunctionalSet<Handle<Map>, Handle<Map>::equal_to>;
-using VirtualClosuresSet =
- FunctionalSet<VirtualClosure, std::equal_to<VirtualClosure>>;
-using VirtualBoundFunctionsSet =
- FunctionalSet<VirtualBoundFunction, std::equal_to<VirtualBoundFunction>>;
-
-struct HintsImpl;
-class JSHeapBroker;
-
-class Hints {
- public:
- Hints() = default; // Empty.
- static Hints SingleConstant(Handle<Object> constant, Zone* zone);
- static Hints SingleMap(Handle<Map> map, Zone* zone);
-
- // For inspection only.
- ConstantsSet constants() const;
- MapsSet maps() const;
- VirtualClosuresSet virtual_closures() const;
- VirtualContextsSet virtual_contexts() const;
- VirtualBoundFunctionsSet virtual_bound_functions() const;
-
- bool IsEmpty() const;
- bool operator==(Hints const& other) const;
- bool operator!=(Hints const& other) const;
-
-#ifdef ENABLE_SLOW_DCHECKS
- bool Includes(Hints const& other) const;
-#endif
-
- Hints Copy(Zone* zone) const; // Shallow.
- Hints CopyToParentZone(Zone* zone, JSHeapBroker* broker) const; // Deep.
-
- // As an optimization, empty hints can be represented as {impl_} being
- // {nullptr}, i.e., as not having allocated a {HintsImpl} object. As a
- // consequence, some operations need to force allocation prior to doing their
- // job. In particular, backpropagation from a child serialization
- // can only work if the hints were already allocated in the parent zone.
- bool IsAllocated() const { return impl_ != nullptr; }
- void EnsureShareable(Zone* zone) { EnsureAllocated(zone, false); }
-
- // Make {this} an alias of {other}.
- void Reset(Hints* other, Zone* zone);
-
- void Merge(Hints const& other, Zone* zone, JSHeapBroker* broker);
-
- // Destructive updates: if the hints are shared by several registers,
- // then the following updates will be seen by all of them:
- void AddConstant(Handle<Object> constant, Zone* zone, JSHeapBroker* broker);
- void AddMap(Handle<Map> map, Zone* zone, JSHeapBroker* broker,
- bool check_zone_equality = true);
- void AddVirtualClosure(VirtualClosure const& virtual_closure, Zone* zone,
- JSHeapBroker* broker);
- void AddVirtualContext(VirtualContext const& virtual_context, Zone* zone,
- JSHeapBroker* broker);
- void AddVirtualBoundFunction(VirtualBoundFunction const& bound_function,
- Zone* zone, JSHeapBroker* broker);
- void Add(Hints const& other, Zone* zone, JSHeapBroker* broker);
-
- private:
- friend std::ostream& operator<<(std::ostream&, const Hints& hints);
- HintsImpl* impl_ = nullptr;
-
- void EnsureAllocated(Zone* zone, bool check_zone_equality = true);
-
- // Helper for Add and Merge.
- bool Union(Hints const& other);
-
- static const size_t kMaxHintsSize = 50;
- static_assert(kMaxHintsSize >= 1, "must allow for at least one hint");
-};
-
-using HintsVector = ZoneVector<Hints>;
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_SERIALIZER_HINTS_H_
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 381bf2a75a..529f1cc7bb 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1510,10 +1510,6 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
return Type::NonInternal();
}
JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction();
- if (!function.serialized()) {
- TRACE_BROKER_MISSING(t->broker(), "data for function " << function);
- return Type::NonInternal();
- }
if (!function.shared().HasBuiltinId()) {
return Type::NonInternal();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 39f54763ba..a1f9b93dce 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -258,7 +258,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_PROMISE_TYPE:
#if V8_ENABLE_WEBASSEMBLY
case WASM_ARRAY_TYPE:
- case WASM_EXCEPTION_OBJECT_TYPE:
+ case WASM_TAG_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_INSTANCE_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 5010a221a9..f91c21fd1d 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -244,6 +244,21 @@ class WasmGraphAssembler : public GraphAssembler {
// Rule of thumb: if access to a given field in an object is required in
// at least two places, put a helper function here.
+ Node* Allocate(int size) {
+ AllowLargeObjects allow_large = size < kMaxRegularHeapObjectSize
+ ? AllowLargeObjects::kFalse
+ : AllowLargeObjects::kTrue;
+ return Allocate(Int32Constant(size), allow_large);
+ }
+
+ Node* Allocate(Node* size,
+ AllowLargeObjects allow_large = AllowLargeObjects::kTrue) {
+ return AddNode(
+ graph()->NewNode(simplified_.AllocateRaw(
+ Type::Any(), AllocationType::kYoung, allow_large),
+ size, effect(), control()));
+ }
+
Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
return AddNode(graph()->NewNode(
simplified_.LoadFromObject(ObjectAccess(type, kNoWriteBarrier)), base,
@@ -2349,12 +2364,11 @@ Node* WasmGraphBuilder::MemoryGrow(Node* input) {
return diamond_result;
}
-Node* WasmGraphBuilder::Throw(uint32_t exception_index,
- const wasm::WasmException* exception,
+Node* WasmGraphBuilder::Throw(uint32_t tag_index, const wasm::WasmTag* tag,
const base::Vector<Node*> values,
wasm::WasmCodePosition position) {
needs_stack_check_ = true;
- uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
+ uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(tag);
Node* values_array =
gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAllocateFixedArray,
@@ -2362,7 +2376,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
SetSourcePosition(values_array, position);
uint32_t index = 0;
- const wasm::WasmExceptionSig* sig = exception->sig;
+ const wasm::WasmTagSig* sig = tag->sig;
MachineOperatorBuilder* m = mcgraph()->machine();
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value = values[i];
@@ -2414,7 +2428,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
}
DCHECK_EQ(encoded_size, index);
- Node* exception_tag = LoadExceptionTagFromTable(exception_index);
+ Node* exception_tag = LoadTagFromTable(tag_index);
Node* throw_call = gasm_->CallRuntimeStub(wasm::WasmCode::kWasmThrow,
exception_tag, values_array);
@@ -2471,11 +2485,10 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
return gasm_->WordEqual(caught_tag, expected_tag);
}
-Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
- Node* exceptions_table =
- LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
- Node* tag =
- gasm_->LoadFixedArrayElementPtr(exceptions_table, exception_index);
+Node* WasmGraphBuilder::LoadTagFromTable(uint32_t tag_index) {
+ Node* tags_table =
+ LOAD_INSTANCE_FIELD(TagsTable, MachineType::TaggedPointer());
+ Node* tag = gasm_->LoadFixedArrayElementPtr(tags_table, tag_index);
return tag;
}
@@ -2487,14 +2500,14 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
}
Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
- const wasm::WasmException* exception,
+ const wasm::WasmTag* tag,
base::Vector<Node*> values) {
Node* values_array = gasm_->CallBuiltin(
Builtin::kWasmGetOwnProperty, Operator::kEliminatable, except_obj,
LOAD_ROOT(wasm_exception_values_symbol, wasm_exception_values_symbol),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
uint32_t index = 0;
- const wasm::WasmExceptionSig* sig = exception->sig;
+ const wasm::WasmTagSig* sig = tag->sig;
DCHECK_EQ(sig->parameter_count(), values.size());
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value;
@@ -2544,7 +2557,7 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
}
values[i] = value;
}
- DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
+ DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(tag));
return values_array;
}
@@ -5560,8 +5573,13 @@ Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
const wasm::StructType* type,
Node* rtt,
base::Vector<Node*> fields) {
- Node* s = gasm_->CallBuiltin(Builtin::kWasmAllocateStructWithRtt,
- Operator::kEliminatable, rtt);
+ int size = WasmStruct::Size(type);
+ Node* s = gasm_->Allocate(size);
+ gasm_->StoreMap(s, TNode<Map>::UncheckedCast(rtt));
+ gasm_->StoreToObject(
+ ObjectAccess(MachineType::TaggedPointer(), kNoWriteBarrier), s,
+ wasm::ObjectAccess::ToTagged(JSReceiver::kPropertiesOrHashOffset),
+ LOAD_ROOT(EmptyFixedArray, empty_fixed_array));
for (uint32_t i = 0; i < type->field_count(); i++) {
gasm_->StoreStructField(s, type, i, fields[i]);
}
@@ -5600,6 +5618,9 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
gasm_->CallBuiltin(stub, Operator::kEliminatable, rtt, length,
Int32Constant(element_type.element_size_bytes()));
if (initial_value != nullptr) {
+ // TODO(manoskouk): If the loop is ever removed here, we have to update
+ // ArrayNewWithRtt() in graph-builder-interface.cc to not mark the current
+ // loop as non-innermost.
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
Node* start_offset =
@@ -7897,6 +7918,8 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
std::vector<WasmLoopInfo> loop_infos;
+ wasm::WasmFeatures unused_detected_features;
+ if (!detected) detected = &unused_detected_features;
if (!BuildGraphForWasmFunction(env, func_body, func_index, detected, mcgraph,
&loop_infos, node_origins, source_positions)) {
return wasm::WasmCompilationResult{};
@@ -7921,8 +7944,13 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
func_body, env->module, func_index, &loop_infos);
if (counters) {
- counters->wasm_compile_function_peak_memory_bytes()->AddSample(
- static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
+ int zone_bytes =
+ static_cast<int>(mcgraph->graph()->zone()->allocation_size());
+ counters->wasm_compile_function_peak_memory_bytes()->AddSample(zone_bytes);
+ if (func_body.end - func_body.start >= 100 * KB) {
+ counters->wasm_compile_huge_function_peak_memory_bytes()->AddSample(
+ zone_bytes);
+ }
}
auto result = info.ReleaseWasmCompilationResult();
CHECK_NOT_NULL(result); // Compilation expected to succeed.
@@ -8173,7 +8201,7 @@ AssemblerOptions WasmAssemblerOptions() {
AssemblerOptions options;
// Relocation info required to serialize {WasmCode} for proper functions.
options.record_reloc_info_for_serialization = true;
- options.enable_root_array_delta_access = false;
+ options.enable_root_relative_access = false;
return options;
}
@@ -8181,7 +8209,7 @@ AssemblerOptions WasmStubAssemblerOptions() {
AssemblerOptions options;
// Relocation info not necessary because stubs are not serialized.
options.record_reloc_info_for_serialization = false;
- options.enable_root_array_delta_access = false;
+ options.enable_root_relative_access = false;
return options;
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index e1993fbf42..71e3111c8c 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -266,15 +266,14 @@ class WasmGraphBuilder {
Node* Unop(wasm::WasmOpcode opcode, Node* input,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
Node* MemoryGrow(Node* input);
- Node* Throw(uint32_t exception_index, const wasm::WasmException* exception,
+ Node* Throw(uint32_t tag_index, const wasm::WasmTag* tag,
const base::Vector<Node*> values,
wasm::WasmCodePosition position);
Node* Rethrow(Node* except_obj);
Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag);
- Node* LoadExceptionTagFromTable(uint32_t exception_index);
+ Node* LoadTagFromTable(uint32_t tag_index);
Node* GetExceptionTag(Node* except_obj);
- Node* GetExceptionValues(Node* except_obj,
- const wasm::WasmException* exception,
+ Node* GetExceptionValues(Node* except_obj, const wasm::WasmTag* tag,
base::Vector<Node*> values_out);
bool IsPhiWithMerge(Node* phi, Node* merge);
bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
index dced0b9424..635a1f4514 100644
--- a/deps/v8/src/d8/d8-test.cc
+++ b/deps/v8/src/d8/d8-test.cc
@@ -119,8 +119,8 @@ class FastCApiObject {
}
Type buffer[1024];
- bool result =
- CopyAndConvertArrayToCppBuffer<&type_info, Type>(seq_arg, buffer, 1024);
+ bool result = TryCopyAndConvertArrayToCppBuffer<&type_info, Type>(
+ seq_arg, buffer, 1024);
if (!result) {
options.fallback = 1;
return 0;
@@ -140,16 +140,20 @@ class FastCApiObject {
FastCApiObject* self = UnwrapObject(args.This());
CHECK_SELF_OR_THROW();
- self->slow_call_count_++;
HandleScope handle_scope(isolate);
if (args.Length() < 2) {
+ self->slow_call_count_++;
isolate->ThrowError("This method expects at least 2 arguments.");
return;
}
if (args[1]->IsTypedArray()) {
- // Not supported yet.
+ AddAllTypedArraySlowCallback(args);
+ return;
+ }
+ self->slow_call_count_++;
+ if (args[1]->IsUndefined()) {
Type dummy_result = 0;
args.GetReturnValue().Set(Number::New(isolate, dummy_result));
return;
@@ -166,28 +170,33 @@ class FastCApiObject {
"Invalid length of array, must be between 0 and 1024.");
return;
}
- Type buffer[1024];
- bool result =
- CopyAndConvertArrayToCppBuffer<&type_info, Type>(seq_arg, buffer, 1024);
- if (!result) {
- isolate->ThrowError("Array conversion unsuccessful.");
- return;
- }
- DCHECK_EQ(seq_arg->Length(), length);
Type sum = 0;
for (uint32_t i = 0; i < length; ++i) {
- sum += buffer[i];
+ v8::Local<v8::Value> element =
+ seq_arg
+ ->Get(isolate->GetCurrentContext(),
+ v8::Integer::NewFromUnsigned(isolate, i))
+ .ToLocalChecked();
+ if (element->IsNumber()) {
+ double value = element->ToNumber(isolate->GetCurrentContext())
+ .ToLocalChecked()
+ ->Value();
+ sum += value;
+ } else if (element->IsUndefined()) {
+ // Hole: ignore the element.
+ } else {
+ isolate->ThrowError("unexpected element type in JSArray");
+ return;
+ }
}
args.GetReturnValue().Set(Number::New(isolate, sum));
}
-
- // TODO(mslekova) - The typed array param should be a
- // {size_t length, uint32_t* data}
- static Type AddAllTypedArrayFastCallback(Local<Object> receiver,
- bool should_fallback,
- Local<Uint32Array> typed_array_arg,
- FastApiCallbackOptions& options) {
+ template <typename T>
+ static Type AddAllTypedArrayFastCallback(
+ Local<Object> receiver, bool should_fallback,
+ const FastApiTypedArray<T>& typed_array_arg,
+ FastApiCallbackOptions& options) {
FastCApiObject* self = UnwrapObject(receiver);
CHECK_SELF_OR_FALLBACK(0);
self->fast_call_count_++;
@@ -197,12 +206,67 @@ class FastCApiObject {
return 0;
}
- // Not implemented.
- return 0;
+ T sum = 0;
+ for (unsigned i = 0; i < typed_array_arg.length(); ++i) {
+ sum += typed_array_arg.get(i);
+ }
+ return static_cast<Type>(sum);
}
static void AddAllTypedArraySlowCallback(
const FunctionCallbackInfo<Value>& args) {
- // Not implemented.
+ Isolate* isolate = args.GetIsolate();
+
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+ self->slow_call_count_++;
+
+ HandleScope handle_scope(isolate);
+
+ if (args.Length() < 2) {
+ isolate->ThrowError("This method expects at least 2 arguments.");
+ return;
+ }
+ if (!args[1]->IsTypedArray()) {
+ isolate->ThrowError(
+ "This method expects a TypedArray as a second argument.");
+ return;
+ }
+
+ Local<TypedArray> typed_array_arg = args[1].As<TypedArray>();
+ size_t length = typed_array_arg->Length();
+
+ void* data = typed_array_arg->Buffer()->GetBackingStore()->Data();
+ if (typed_array_arg->IsInt32Array() || typed_array_arg->IsUint32Array() ||
+ typed_array_arg->IsBigInt64Array() ||
+ typed_array_arg->IsBigUint64Array()) {
+ int64_t sum = 0;
+ for (unsigned i = 0; i < length; ++i) {
+ if (typed_array_arg->IsInt32Array()) {
+ sum += static_cast<int32_t*>(data)[i];
+ } else if (typed_array_arg->IsUint32Array()) {
+ sum += static_cast<uint32_t*>(data)[i];
+ } else if (typed_array_arg->IsBigInt64Array()) {
+ sum += static_cast<int64_t*>(data)[i];
+ } else if (typed_array_arg->IsBigUint64Array()) {
+ sum += static_cast<uint64_t*>(data)[i];
+ }
+ }
+ args.GetReturnValue().Set(Number::New(isolate, sum));
+ } else if (typed_array_arg->IsFloat32Array() ||
+ typed_array_arg->IsFloat64Array()) {
+ double sum = 0;
+ for (unsigned i = 0; i < length; ++i) {
+ if (typed_array_arg->IsFloat32Array()) {
+ sum += static_cast<float*>(data)[i];
+ } else if (typed_array_arg->IsFloat64Array()) {
+ sum += static_cast<double*>(data)[i];
+ }
+ }
+ args.GetReturnValue().Set(Number::New(isolate, sum));
+ } else {
+ isolate->ThrowError("TypedArray type is not supported.");
+ return;
+ }
}
static int32_t AddAllIntInvalidCallback(Local<Object> receiver,
@@ -403,7 +467,8 @@ class FastCApiObject {
static bool IsValidApiObject(Local<Object> object) {
i::Address addr = *reinterpret_cast<i::Address*>(*object);
auto instance_type = i::Internals::GetInstanceType(addr);
- return (instance_type == i::Internals::kJSApiObjectType ||
+ return (base::IsInRange(instance_type, i::Internals::kFirstJSApiObjectType,
+ i::Internals::kLastJSApiObjectType) ||
instance_type == i::Internals::kJSSpecialApiObjectType);
}
static FastCApiObject* UnwrapObject(Local<Object> object) {
@@ -472,17 +537,46 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_seq_c_func));
- CFunction add_all_typed_array_c_func =
- CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback);
+ CFunction add_all_int32_typed_array_c_func =
+ CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int32_t>);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "add_all_int32_typed_array",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::AddAllTypedArraySlowCallback,
+ Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &add_all_int32_typed_array_c_func));
+
+ CFunction add_all_int64_typed_array_c_func =
+ CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int64_t>);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "add_all_int64_typed_array",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::AddAllTypedArraySlowCallback,
+ Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &add_all_int64_typed_array_c_func));
+
+ CFunction add_all_uint64_typed_array_c_func =
+ CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint64_t>);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "add_all_uint64_typed_array",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::AddAllTypedArraySlowCallback,
+ Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect,
+ &add_all_uint64_typed_array_c_func));
+
+ CFunction add_all_uint32_typed_array_c_func =
+ CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint32_t>);
api_obj_ctor->PrototypeTemplate()->Set(
- isolate, "add_all_typed_array",
+ isolate, "add_all_uint32_typed_array",
FunctionTemplate::New(
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
- SideEffectType::kHasSideEffect, &add_all_typed_array_c_func));
+ SideEffectType::kHasSideEffect,
+ &add_all_uint32_typed_array_c_func));
const CFunction add_all_overloads[] = {
- add_all_typed_array_c_func,
+ add_all_uint32_typed_array_c_func,
add_all_seq_c_func,
};
api_obj_ctor->PrototypeTemplate()->Set(
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index e2892a4e82..2b831bc747 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -644,8 +644,8 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, PrintResult print_result,
ReportExceptions report_exceptions,
ProcessMessageQueue process_message_queue) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (i::FLAG_parse_only) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::VMState<PARSER> state(i_isolate);
i::Handle<i::String> str = Utils::OpenHandle(*(source));
@@ -681,6 +681,15 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
TryCatch try_catch(isolate);
try_catch.SetVerbose(report_exceptions == kReportExceptions);
+ // Explicitly check for stack overflows. This method can be called
+ // recursively, and since we consume quite some stack space for the C++
+ // frames, the stack check in the called frame might be too late.
+ if (i::StackLimitCheck{i_isolate}.HasOverflowed()) {
+ i_isolate->StackOverflow();
+ i_isolate->OptionalRescheduleException(false);
+ return false;
+ }
+
MaybeLocal<Value> maybe_result;
bool success = true;
{
@@ -2832,19 +2841,8 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Local<ObjectTemplate> Shell::CreateOSTemplate(Isolate* isolate) {
Local<ObjectTemplate> os_template = ObjectTemplate::New(isolate);
AddOSMethods(isolate, os_template);
-#if defined(V8_TARGET_OS_LINUX)
- const char os_name[] = "linux";
-#elif defined(V8_TARGET_OS_WIN)
- const char os_name[] = "windows";
-#elif defined(V8_TARGET_OS_MACOSX)
- const char os_name[] = "macos";
-#elif defined(V8_TARGET_OS_ANDROID)
- const char os_name[] = "android";
-#else
- const char os_name[] = "unknown";
-#endif
os_template->Set(isolate, "name",
- v8::String::NewFromUtf8Literal(isolate, os_name),
+ v8::String::NewFromUtf8Literal(isolate, V8_TARGET_OS_STRING),
PropertyAttribute::ReadOnly);
os_template->Set(
isolate, "d8Path",
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 7ff5809a18..46f6c366cc 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/base/hashmap.h"
+#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
@@ -516,7 +517,7 @@ void CollectAndMaybeResetCounts(Isolate* isolate,
SharedFunctionInfo shared = vector.shared_function_info();
DCHECK(shared.IsSubjectToDebugging());
uint32_t count = static_cast<uint32_t>(vector.invocation_count());
- if (reset_count) vector.clear_invocation_count();
+ if (reset_count) vector.clear_invocation_count(kRelaxedStore);
counter_map->Add(shared, count);
}
break;
@@ -793,7 +794,7 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
shared.set_has_reported_binary_coverage(false);
} else if (o.IsFeedbackVector()) {
// In any case, clear any collected invocation counts.
- FeedbackVector::cast(o).clear_invocation_count();
+ FeedbackVector::cast(o).clear_invocation_count(kRelaxedStore);
}
}
}
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 3e3e17a61b..cecf46d7b7 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -7,6 +7,7 @@
#include "src/builtins/accessors.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compiler.h"
+#include "src/codegen/script-details.h"
#include "src/common/globals.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
@@ -29,11 +30,11 @@ namespace {
static MaybeHandle<SharedFunctionInfo> GetFunctionInfo(Isolate* isolate,
Handle<String> source,
REPLMode repl_mode) {
- Compiler::ScriptDetails script_details(isolate->factory()->empty_string());
+ ScriptDetails script_details(isolate->factory()->empty_string(),
+ ScriptOriginOptions(false, true));
script_details.repl_mode = repl_mode;
- ScriptOriginOptions origin_options(false, true);
return Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, origin_options, nullptr, nullptr,
+ isolate, source, script_details, nullptr, nullptr,
ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
NOT_NATIVES_CODE);
}
@@ -560,6 +561,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kArrayPrototypeValues:
case Builtin::kArrayIncludes:
case Builtin::kArrayPrototypeAt:
+ case Builtin::kArrayPrototypeConcat:
case Builtin::kArrayPrototypeEntries:
case Builtin::kArrayPrototypeFill:
case Builtin::kArrayPrototypeFind:
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 099c31e6ef..bcb8da6652 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -5,6 +5,7 @@
#include "src/debug/debug-frames.h"
#include "src/builtins/accessors.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#if V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index 9ef716c07e..5112c5ba73 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -6,6 +6,7 @@
#include "src/api/api-inl.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/script-details.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-property-iterator.h"
@@ -757,12 +758,11 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::SharedFunctionInfo> result;
{
- ScriptOriginOptions origin_options;
- i::ScriptData* script_data = nullptr;
+ i::AlignedCachedData* cached_data = nullptr;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, i::Compiler::ScriptDetails(), origin_options, nullptr,
- script_data, ScriptCompiler::kNoCompileOptions,
+ isolate, str, i::ScriptDetails(), nullptr, cached_data,
+ ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
: i::INSPECTOR_CODE);
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 833c26f691..81d38011cb 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -614,7 +614,7 @@ struct PropertyDescriptor {
v8::Local<v8::Value> set;
};
-class PropertyIterator {
+class V8_EXPORT_PRIVATE PropertyIterator {
public:
// Creating a PropertyIterator can potentially throw an exception.
// The returned std::unique_ptr is empty iff that happens.
diff --git a/deps/v8/src/debug/debug-property-iterator.cc b/deps/v8/src/debug/debug-property-iterator.cc
index 84b0bd015d..5d7ecda979 100644
--- a/deps/v8/src/debug/debug-property-iterator.cc
+++ b/deps/v8/src/debug/debug-property-iterator.cc
@@ -21,10 +21,9 @@ std::unique_ptr<DebugPropertyIterator> DebugPropertyIterator::Create(
new DebugPropertyIterator(isolate, receiver));
if (receiver->IsJSProxy()) {
- iterator->is_own_ = false;
- iterator->prototype_iterator_.AdvanceIgnoringProxies();
+ iterator->AdvanceToPrototype();
}
- if (iterator->prototype_iterator_.IsAtEnd()) return iterator;
+ if (iterator->Done()) return iterator;
if (!iterator->FillKeysForCurrentPrototypeAndStage()) return nullptr;
if (iterator->should_move_to_next_stage() && !iterator->AdvanceInternal()) {
@@ -40,8 +39,14 @@ DebugPropertyIterator::DebugPropertyIterator(Isolate* isolate,
prototype_iterator_(isolate, receiver, kStartAtReceiver,
PrototypeIterator::END_AT_NULL) {}
-bool DebugPropertyIterator::Done() const {
- return prototype_iterator_.IsAtEnd();
+bool DebugPropertyIterator::Done() const { return is_done_; }
+
+void DebugPropertyIterator::AdvanceToPrototype() {
+ stage_ = kExoticIndices;
+ is_own_ = false;
+ if (!prototype_iterator_.HasAccess()) is_done_ = true;
+ prototype_iterator_.AdvanceIgnoringProxies();
+ if (prototype_iterator_.IsAtEnd()) is_done_ = true;
}
bool DebugPropertyIterator::AdvanceInternal() {
@@ -56,9 +61,7 @@ bool DebugPropertyIterator::AdvanceInternal() {
stage_ = Stage::kAllProperties;
break;
case Stage::kAllProperties:
- stage_ = kExoticIndices;
- is_own_ = false;
- prototype_iterator_.AdvanceIgnoringProxies();
+ AdvanceToPrototype();
break;
}
if (!FillKeysForCurrentPrototypeAndStage()) return false;
@@ -145,7 +148,7 @@ bool DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
current_key_index_ = 0;
exotic_length_ = 0;
keys_ = Handle<FixedArray>::null();
- if (prototype_iterator_.IsAtEnd()) return true;
+ if (is_done_) return true;
Handle<JSReceiver> receiver =
PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
bool has_exotic_indices = receiver->IsJSTypedArray();
@@ -169,7 +172,7 @@ bool DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
}
bool DebugPropertyIterator::should_move_to_next_stage() const {
- if (prototype_iterator_.IsAtEnd()) return false;
+ if (is_done_) return false;
if (stage_ == kExoticIndices) return current_key_index_ >= exotic_length_;
return keys_.is_null() ||
current_key_index_ >= static_cast<size_t>(keys_->length());
diff --git a/deps/v8/src/debug/debug-property-iterator.h b/deps/v8/src/debug/debug-property-iterator.h
index 0c2a9afd97..38c78b12bd 100644
--- a/deps/v8/src/debug/debug-property-iterator.h
+++ b/deps/v8/src/debug/debug-property-iterator.h
@@ -45,6 +45,7 @@ class DebugPropertyIterator final : public debug::PropertyIterator {
bool should_move_to_next_stage() const;
void CalculateNativeAccessorFlags();
Handle<Name> raw_name() const;
+ void AdvanceToPrototype();
V8_WARN_UNUSED_RESULT bool AdvanceInternal();
Isolate* isolate_;
@@ -59,6 +60,7 @@ class DebugPropertyIterator final : public debug::PropertyIterator {
bool calculated_native_accessor_flags_ = false;
int native_accessor_flags_ = 0;
bool is_own_ = true;
+ bool is_done_ = false;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-wasm-objects.cc b/deps/v8/src/debug/debug-wasm-objects.cc
index e7fffb9324..3df53a8f5e 100644
--- a/deps/v8/src/debug/debug-wasm-objects.cc
+++ b/deps/v8/src/debug/debug-wasm-objects.cc
@@ -1135,17 +1135,17 @@ Handle<ArrayList> AddWasmTableObjectInternalProperties(
int length = table->current_length();
Handle<FixedArray> entries = isolate->factory()->NewFixedArray(length);
for (int i = 0; i < length; ++i) {
- entries->set(i, *WasmTableObject::Get(isolate, table, i));
+ auto entry = WasmTableObject::Get(isolate, table, i);
+ entries->set(i, *entry);
}
Handle<JSArray> final_entries = isolate->factory()->NewJSArrayWithElements(
entries, i::PACKED_ELEMENTS, length);
JSObject::SetPrototype(final_entries, isolate->factory()->null_value(), false,
kDontThrow)
.Check();
- result = ArrayList::Add(
- isolate, result,
- isolate->factory()->NewStringFromStaticChars("[[Entries]]"),
- final_entries);
+ Handle<String> entries_string =
+ isolate->factory()->NewStringFromStaticChars("[[Entries]]");
+ result = ArrayList::Add(isolate, result, entries_string, final_entries);
return result;
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 31321f88fc..41775c8965 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1301,12 +1301,13 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
JavaScriptFrame* frame = it.frame();
Address pc = frame->pc();
Builtin builtin = InstructionStream::TryLookupCode(isolate, pc);
- if (builtin == Builtin::kBaselineEnterAtBytecode ||
- builtin == Builtin::kBaselineEnterAtNextBytecode) {
+ if (builtin == Builtin::kBaselineOrInterpreterEnterAtBytecode ||
+ builtin == Builtin::kBaselineOrInterpreterEnterAtNextBytecode) {
Address* pc_addr = frame->pc_address();
- Builtin advance = builtin == Builtin::kBaselineEnterAtBytecode
- ? Builtin::kInterpreterEnterAtBytecode
- : Builtin::kInterpreterEnterAtNextBytecode;
+ Builtin advance =
+ builtin == Builtin::kBaselineOrInterpreterEnterAtBytecode
+ ? Builtin::kInterpreterEnterAtBytecode
+ : Builtin::kInterpreterEnterAtNextBytecode;
Address advance_pc =
isolate->builtins()->code(advance).InstructionStart();
PointerAuthentication::ReplacePC(pc_addr, advance_pc,
@@ -1973,7 +1974,7 @@ base::Optional<Object> Debug::OnThrow(Handle<Object> exception) {
maybe_promise->IsJSPromise() ? v8::debug::kPromiseRejection
: v8::debug::kException);
if (!scheduled_exception.is_null()) {
- isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
+ isolate_->set_scheduled_exception(*scheduled_exception);
}
PrepareStepOnThrow();
// If the OnException handler requested termination, then indicated this to
@@ -2298,6 +2299,7 @@ void Debug::UpdateState() {
// Note that the debug context could have already been loaded to
// bootstrap test cases.
isolate_->compilation_cache()->DisableScriptAndEval();
+ isolate_->CollectSourcePositionsForAllBytecodeArrays();
is_active = true;
feature_tracker()->Track(DebugFeatureTracker::kActive);
} else {
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 31d603e0dd..42c89c2c98 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -42,14 +42,16 @@ enum StepAction : int8_t {
// Type of exception break. NOTE: These values are in macros.py as well.
enum ExceptionBreakType { BreakException = 0, BreakUncaughtException = 1 };
+// Type of debug break. NOTE: The order matters for the predicates
+// below inside BreakLocation, so be careful when adding / removing.
enum DebugBreakType {
NOT_DEBUG_BREAK,
DEBUGGER_STATEMENT,
+ DEBUG_BREAK_AT_ENTRY,
DEBUG_BREAK_SLOT,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_SUSPEND,
- DEBUG_BREAK_AT_ENTRY,
};
enum IgnoreBreakMode {
@@ -67,25 +69,18 @@ class BreakLocation {
JavaScriptFrame* frame,
std::vector<BreakLocation>* result_out);
- inline bool IsSuspend() const { return type_ == DEBUG_BREAK_SLOT_AT_SUSPEND; }
- inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
- inline bool IsReturnOrSuspend() const {
- return type_ >= DEBUG_BREAK_SLOT_AT_RETURN;
- }
- inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
- inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
- inline bool IsDebuggerStatement() const {
- return type_ == DEBUGGER_STATEMENT;
- }
- inline bool IsDebugBreakAtEntry() const {
- bool result = type_ == DEBUG_BREAK_AT_ENTRY;
- return result;
- }
+ bool IsSuspend() const { return type_ == DEBUG_BREAK_SLOT_AT_SUSPEND; }
+ bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
+ bool IsReturnOrSuspend() const { return type_ >= DEBUG_BREAK_SLOT_AT_RETURN; }
+ bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
+ bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
+ bool IsDebuggerStatement() const { return type_ == DEBUGGER_STATEMENT; }
+ bool IsDebugBreakAtEntry() const { return type_ == DEBUG_BREAK_AT_ENTRY; }
bool HasBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info) const;
- inline int generator_suspend_id() { return generator_suspend_id_; }
- inline int position() const { return position_; }
+ int generator_suspend_id() { return generator_suspend_id_; }
+ int position() const { return position_; }
debug::BreakLocationType type() const;
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index a3065bb49f..a4c297ec5b 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -950,7 +950,8 @@ void TranslateSourcePositionTable(Isolate* isolate, Handle<BytecodeArray> code,
code->set_source_position_table(*new_source_position_table, kReleaseStore);
LOG_CODE_EVENT(isolate,
CodeLinePosInfoRecordEvent(code->GetFirstBytecodeAddress(),
- *new_source_position_table));
+ *new_source_position_table,
+ JitCodeEvent::BYTE_CODE));
}
void UpdatePositions(Isolate* isolate, Handle<SharedFunctionInfo> sfi,
diff --git a/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc b/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
index bbeacc561d..92bce90095 100644
--- a/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
@@ -14,8 +14,8 @@ void Builtins_ContinueToJavaScriptBuiltinWithResult();
void Builtins_ContinueToJavaScriptBuiltin();
void construct_stub_create_deopt_addr();
void construct_stub_invoke_deopt_addr();
-void Builtins_BaselineEnterAtBytecode();
-void Builtins_BaselineEnterAtNextBytecode();
+void Builtins_BaselineOrInterpreterEnterAtBytecode();
+void Builtins_BaselineOrInterpreterEnterAtNextBytecode();
typedef void (*function_ptr)();
}
@@ -32,8 +32,8 @@ constexpr function_ptr builtins[] = {
&Builtins_ContinueToJavaScriptBuiltin,
&construct_stub_create_deopt_addr,
&construct_stub_invoke_deopt_addr,
- &Builtins_BaselineEnterAtBytecode,
- &Builtins_BaselineEnterAtNextBytecode,
+ &Builtins_BaselineOrInterpreterEnterAtBytecode,
+ &Builtins_BaselineOrInterpreterEnterAtNextBytecode,
};
bool Deoptimizer::IsValidReturnAddress(Address address) {
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 72ca85f41e..ea460aa36f 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -430,7 +430,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
- function.ResetIfBytecodeFlushed();
+ function.ResetIfCodeFlushed();
if (code.is_null()) code = function.code();
if (CodeKindCanDeoptimize(code.kind())) {
@@ -745,11 +745,19 @@ void Deoptimizer::TraceDeoptBegin(int optimization_id,
PrintF(file, "%s", CodeKindToString(compiled_code_.kind()));
}
PrintF(file,
- ", opt id %d, bytecode offset %d, deopt exit %d, FP to SP delta %d, "
+ ", opt id %d, "
+#ifdef DEBUG
+ "node id %d, "
+#endif // DEBUG
+ "bytecode offset %d, deopt exit %d, FP to SP "
+ "delta %d, "
"caller SP " V8PRIxPTR_FMT ", pc " V8PRIxPTR_FMT "]\n",
- optimization_id, bytecode_offset.ToInt(), deopt_exit_index_,
- fp_to_sp_delta_, caller_frame_top_,
- PointerAuthentication::StripPAC(from_));
+ optimization_id,
+#ifdef DEBUG
+ info.node_id,
+#endif // DEBUG
+ bytecode_offset.ToInt(), deopt_exit_index_, fp_to_sp_delta_,
+ caller_frame_top_, PointerAuthentication::StripPAC(from_));
if (verbose_tracing_enabled() && deopt_kind_ != DeoptimizeKind::kLazy) {
PrintF(file, " ;;; deoptimize at ");
OFStream outstr(file);
@@ -996,8 +1004,8 @@ namespace {
// Get the dispatch builtin for unoptimized frames.
Builtin DispatchBuiltinFor(bool is_baseline, bool advance_bc) {
if (is_baseline) {
- return advance_bc ? Builtin::kBaselineEnterAtNextBytecode
- : Builtin::kBaselineEnterAtBytecode;
+ return advance_bc ? Builtin::kBaselineOrInterpreterEnterAtNextBytecode
+ : Builtin::kBaselineOrInterpreterEnterAtBytecode;
} else {
return advance_bc ? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode;
@@ -2067,11 +2075,13 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd());
SourcePosition last_position = SourcePosition::Unknown();
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
+ uint32_t last_node_id = 0;
int last_deopt_id = kNoDeoptimizationId;
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) |
- RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
+ RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID) |
+ RelocInfo::ModeMask(RelocInfo::DEOPT_NODE_ID);
for (RelocIterator it(code, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->pc() >= pc) break;
@@ -2085,9 +2095,11 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
last_deopt_id = static_cast<int>(info->data());
} else if (info->rmode() == RelocInfo::DEOPT_REASON) {
last_reason = static_cast<DeoptimizeReason>(info->data());
+ } else if (info->rmode() == RelocInfo::DEOPT_NODE_ID) {
+ last_node_id = static_cast<uint32_t>(info->data());
}
}
- return DeoptInfo(last_position, last_reason, last_deopt_id);
+ return DeoptInfo(last_position, last_reason, last_node_id, last_deopt_id);
}
// static
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 4ff5601e35..173a8a4e02 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -31,11 +31,15 @@ class Deoptimizer : public Malloced {
public:
struct DeoptInfo {
DeoptInfo(SourcePosition position, DeoptimizeReason deopt_reason,
- int deopt_id)
- : position(position), deopt_reason(deopt_reason), deopt_id(deopt_id) {}
+ uint32_t node_id, int deopt_id)
+ : position(position),
+ deopt_reason(deopt_reason),
+ node_id(node_id),
+ deopt_id(deopt_id) {}
const SourcePosition position;
const DeoptimizeReason deopt_reason;
+ const uint32_t node_id;
const int deopt_id;
};
diff --git a/deps/v8/src/diagnostics/arm/unwinder-arm.cc b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
index 846bbfe6bc..e0e2f0e91f 100644
--- a/deps/v8/src/diagnostics/arm/unwinder-arm.cc
+++ b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "include/v8-unwinder-state.h"
+#include "include/v8.h"
#include "src/diagnostics/unwinder.h"
#include "src/execution/frame-constants.h"
diff --git a/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc b/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc
index 5a92512a17..0314458005 100644
--- a/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc
@@ -6,6 +6,8 @@
namespace v8 {
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index b84fb2761f..596362b351 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -224,6 +224,13 @@ static void PrintRelocInfo(std::ostringstream& out, Isolate* isolate,
<< "'";
} else if (rmode == RelocInfo::DEOPT_ID) {
out << " ;; debug: deopt index " << static_cast<int>(relocinfo->data());
+ } else if (rmode == RelocInfo::DEOPT_NODE_ID) {
+#ifdef DEBUG
+ out << " ;; debug: deopt node id "
+ << static_cast<uint32_t>(relocinfo->data());
+#else // DEBUG
+ UNREACHABLE();
+#endif // DEBUG
} else if (RelocInfo::IsEmbeddedObjectMode(rmode)) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
@@ -319,13 +326,25 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
pc += 4;
} else if (it != nullptr && !it->done() &&
it->rinfo()->pc() == reinterpret_cast<Address>(pc) &&
- it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
+ (it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE ||
+ it->rinfo()->rmode() == RelocInfo::LITERAL_CONSTANT ||
+ it->rinfo()->rmode() == RelocInfo::DATA_EMBEDDED_OBJECT)) {
// raw pointer embedded in code stream, e.g., jump table
byte* ptr =
base::ReadUnalignedValue<byte*>(reinterpret_cast<Address>(pc));
- SNPrintF(decode_buffer, "%08" V8PRIxPTR " jump table entry %4zu",
- reinterpret_cast<intptr_t>(ptr),
- static_cast<size_t>(ptr - begin));
+ if (RelocInfo::IsInternalReference(it->rinfo()->rmode())) {
+ SNPrintF(decode_buffer,
+ "%08" V8PRIxPTR " jump table entry %4zu",
+ reinterpret_cast<intptr_t>(ptr),
+ static_cast<size_t>(ptr - begin));
+ } else {
+ const char* kType = RelocInfo::IsLiteralConstant(it->rinfo()->rmode())
+ ? " literal constant"
+ : "embedded data object";
+ SNPrintF(decode_buffer, "%08" V8PRIxPTR " %s 0x%08" V8PRIxPTR,
+ reinterpret_cast<intptr_t>(ptr), kType,
+ reinterpret_cast<intptr_t>(ptr));
+ }
pc += sizeof(ptr);
} else {
decode_buffer[0] = '\0';
diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc
index 4125dfaa4c..53c29cfb24 100644
--- a/deps/v8/src/diagnostics/gdb-jit.cc
+++ b/deps/v8/src/diagnostics/gdb-jit.cc
@@ -8,6 +8,7 @@
#include <memory>
#include <vector>
+#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/base/bits.h"
#include "src/base/hashmap.h"
diff --git a/deps/v8/src/diagnostics/gdb-jit.h b/deps/v8/src/diagnostics/gdb-jit.h
index e1bc852f0a..82f5ce892c 100644
--- a/deps/v8/src/diagnostics/gdb-jit.h
+++ b/deps/v8/src/diagnostics/gdb-jit.h
@@ -5,8 +5,6 @@
#ifndef V8_DIAGNOSTICS_GDB_JIT_H_
#define V8_DIAGNOSTICS_GDB_JIT_H_
-#include "include/v8.h"
-
//
// GDB has two ways of interacting with JIT code. With the "JIT compilation
// interface", V8 can tell GDB when it emits JIT code. Unfortunately to do so,
@@ -25,6 +23,9 @@
//
namespace v8 {
+
+struct JitCodeEvent;
+
namespace internal {
namespace GDBJITInterface {
#ifdef ENABLE_GDB_JIT_INTERFACE
diff --git a/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc b/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc
index 5a92512a17..0314458005 100644
--- a/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc
@@ -6,6 +6,8 @@
namespace v8 {
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
diff --git a/deps/v8/src/diagnostics/mips/unwinder-mips.cc b/deps/v8/src/diagnostics/mips/unwinder-mips.cc
index 5a92512a17..0314458005 100644
--- a/deps/v8/src/diagnostics/mips/unwinder-mips.cc
+++ b/deps/v8/src/diagnostics/mips/unwinder-mips.cc
@@ -6,6 +6,8 @@
namespace v8 {
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
diff --git a/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc b/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc
index 5a92512a17..0314458005 100644
--- a/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc
+++ b/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc
@@ -6,6 +6,8 @@
namespace v8 {
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 8a87c7e98d..e45d7580c8 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -289,6 +289,10 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
StoreHandler::cast(*this).StoreHandlerVerify(isolate);
break;
+ case BIG_INT_BASE_TYPE:
+ BigIntBase::cast(*this).BigIntBaseVerify(isolate);
+ break;
+
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -298,6 +302,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
JSFunction::cast(*this).JSFunctionVerify(isolate);
break;
+ case JS_LAST_DUMMY_API_OBJECT_TYPE:
+ UNREACHABLE();
}
}
@@ -311,7 +317,7 @@ void HeapObject::VerifyHeapPointer(Isolate* isolate, Object p) {
// static
void HeapObject::VerifyCodePointer(Isolate* isolate, Object p) {
CHECK(p.IsHeapObject());
- CHECK(isolate->heap()->InCodeSpace(HeapObject::cast(p)));
+ CHECK(IsValidCodeObject(isolate->heap(), HeapObject::cast(p)));
CHECK(HeapObject::cast(p).IsCode());
}
@@ -330,15 +336,7 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
// - Jumps must go to new instructions starts.
// - No Illegal bytecodes.
// - No consecutive sequences of prefix Wide / ExtraWide.
- CHECK(IsBytecodeArray(isolate));
- CHECK(constant_pool(isolate).IsFixedArray(isolate));
- VerifyHeapPointer(isolate, constant_pool(isolate));
- {
- Object table = source_position_table(isolate, kAcquireLoad);
- CHECK(table.IsUndefined(isolate) || table.IsException(isolate) ||
- table.IsByteArray(isolate));
- }
- CHECK(handler_table(isolate).IsByteArray(isolate));
+ TorqueGeneratedClassVerifiers::BytecodeArrayVerify(*this, isolate);
for (int i = 0; i < constant_pool(isolate).length(); ++i) {
// No ThinStrings in the constant pool.
CHECK(!constant_pool(isolate).get(isolate, i).IsThinString(isolate));
@@ -1490,7 +1488,6 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestVerify(Isolate* isolate) {
}
void BigIntBase::BigIntBaseVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::BigIntBaseVerify(*this, isolate);
CHECK_GE(length(), 0);
CHECK_IMPLIES(is_zero(), !sign()); // There is no -0n.
}
@@ -1512,7 +1509,7 @@ void Module::ModuleVerify(Isolate* isolate) {
CHECK(module_namespace().IsUndefined(isolate) ||
module_namespace().IsJSModuleNamespace());
if (module_namespace().IsJSModuleNamespace()) {
- CHECK_LE(Module::kInstantiating, status());
+ CHECK_LE(Module::kLinking, status());
CHECK_EQ(JSModuleNamespace::cast(module_namespace()).module(), *this);
}
@@ -1545,13 +1542,13 @@ void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) {
} else if (status() == kEvaluating || status() == kEvaluated) {
CHECK(code().IsJSGeneratorObject());
} else {
- if (status() == kInstantiated) {
+ if (status() == kLinked) {
CHECK(code().IsJSGeneratorObject());
- } else if (status() == kInstantiating) {
+ } else if (status() == kLinking) {
CHECK(code().IsJSFunction());
- } else if (status() == kPreInstantiating) {
+ } else if (status() == kPreLinking) {
CHECK(code().IsSharedFunctionInfo());
- } else if (status() == kUninstantiated) {
+ } else if (status() == kUnlinked) {
CHECK(code().IsSharedFunctionInfo());
}
CHECK(!AsyncParentModuleCount());
@@ -1751,8 +1748,6 @@ void PreparseData::PreparseDataVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(InterpreterData)
-
void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::StackFrameInfoVerify(*this, isolate);
#if V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 048a5c6006..46fccedde7 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -231,6 +231,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case FEEDBACK_METADATA_TYPE:
FeedbackMetadata::cast(*this).FeedbackMetadataPrint(os);
break;
+ case BIG_INT_BASE_TYPE:
+ BigIntBase::cast(*this).BigIntBasePrint(os);
+ break;
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -258,6 +261,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case THIN_ONE_BYTE_STRING_TYPE:
case UNCACHED_EXTERNAL_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case JS_LAST_DUMMY_API_OBJECT_TYPE:
// TODO(all): Handle these types too.
os << "UNKNOWN TYPE " << map().instance_type();
UNREACHABLE();
@@ -1423,6 +1427,7 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSArrayBuffer");
os << "\n - backing_store: " << backing_store();
os << "\n - byte_length: " << byte_length();
+ os << "\n - max_byte_length: " << max_byte_length();
if (is_external()) os << "\n - external";
if (is_detachable()) os << "\n - detachable";
if (was_detached()) os << "\n - detached";
@@ -1854,6 +1859,7 @@ void WasmTypeInfo::WasmTypeInfoPrint(std::ostream& os) {
os << "\n - type address: " << reinterpret_cast<void*>(foreign_address());
os << "\n - supertypes: " << Brief(supertypes());
os << "\n - subtypes: " << Brief(subtypes());
+ os << "\n - instance: " << Brief(instance());
os << "\n";
}
@@ -2071,10 +2077,10 @@ void WasmMemoryObject::WasmMemoryObjectPrint(std::ostream& os) {
os << "\n";
}
-void WasmExceptionObject::WasmExceptionObjectPrint(std::ostream& os) {
- PrintHeader(os, "WasmExceptionObject");
+void WasmTagObject::WasmTagObjectPrint(std::ostream& os) {
+ PrintHeader(os, "WasmTagObject");
os << "\n - serialized_signature: " << Brief(serialized_signature());
- os << "\n - exception_tag: " << Brief(exception_tag());
+ os << "\n - tag: " << Brief(tag());
os << "\n";
}
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index a369704b2b..affbc0fc8e 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -435,6 +435,13 @@ void Decoder::DecodeExt0(Instruction* instr) {
}
PPC_VX_OPCODE_D_FORM_LIST(DECODE_VX_D_FORM__INSTRUCTIONS)
#undef DECODE_VX_D_FORM__INSTRUCTIONS
+#define DECODE_VX_F_FORM__INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'rt, 'Vb"); \
+ return; \
+ }
+ PPC_VX_OPCODE_F_FORM_LIST(DECODE_VX_F_FORM__INSTRUCTIONS)
+#undef DECODE_VX_F_FORM__INSTRUCTIONS
}
// Some encodings are 5-0 bits, handle those first
switch (EXT0 | (instr->BitField(5, 0))) {
@@ -485,6 +492,13 @@ void Decoder::DecodeExt0(Instruction* instr) {
}
PPC_VX_OPCODE_E_FORM_LIST(DECODE_VX_E_FORM__INSTRUCTIONS)
#undef DECODE_VX_E_FORM__INSTRUCTIONS
+#define DECODE_VX_G_FORM__INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Vt, 'rb, 'UIM"); \
+ return; \
+ }
+ PPC_VX_OPCODE_G_FORM_LIST(DECODE_VX_G_FORM__INSTRUCTIONS)
+#undef DECODE_VX_G_FORM__INSTRUCTIONS
}
}
@@ -891,12 +905,18 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "cntlzw'. 'ra, 'rs");
return;
}
-#if V8_TARGET_ARCH_PPC64
case CNTLZDX: {
Format(instr, "cntlzd'. 'ra, 'rs");
return;
}
-#endif
+ case CNTTZWX: {
+ Format(instr, "cnttzw'. 'ra, 'rs");
+ return;
+ }
+ case CNTTZDX: {
+ Format(instr, "cnttzd'. 'ra, 'rs");
+ return;
+ }
case ANDX: {
Format(instr, "and'. 'ra, 'rs, 'rb");
return;
@@ -1111,7 +1131,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
return;
}
case MTVSRDD: {
- Format(instr, "mtvsrdd 'Xt, 'ra");
+ Format(instr, "mtvsrdd 'Xt, 'ra, 'rb");
return;
}
case LDBRX: {
@@ -1289,6 +1309,10 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fneg'. 'Dt, 'Db");
break;
}
+ case FCPSGN: {
+ Format(instr, "fcpsgn'. 'Dt, 'Da, 'Db");
+ break;
+ }
case MCRFS: {
Format(instr, "mcrfs ?,?");
break;
@@ -1343,13 +1367,20 @@ void Decoder::DecodeExt6(Instruction* instr) {
}
}
switch (EXT6 | (instr->BitField(10, 3))) {
-#define DECODE_XX3_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: { \
- Format(instr, #name " 'Xt, 'Xa, 'Xb"); \
- return; \
+#define DECODE_XX3_VECTOR_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Xt, 'Xa, 'Xb"); \
+ return; \
+ }
+ PPC_XX3_OPCODE_VECTOR_LIST(DECODE_XX3_VECTOR_INSTRUCTIONS)
+#undef DECODE_XX3_VECTOR_INSTRUCTIONS
+#define DECODE_XX3_SCALAR_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Dt, 'Da, 'Db"); \
+ return; \
}
- PPC_XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS)
-#undef DECODE_XX3_INSTRUCTIONS
+ PPC_XX3_OPCODE_SCALAR_LIST(DECODE_XX3_SCALAR_INSTRUCTIONS)
+#undef DECODE_XX3_SCALAR_INSTRUCTIONS
}
// Some encodings have integers hard coded in the middle, handle those first.
switch (EXT6 | (instr->BitField(20, 16)) | (instr->BitField(10, 2))) {
diff --git a/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc b/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc
index 43c6acb609..52c22221ff 100644
--- a/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/diagnostics/unwinder.h"
namespace v8 {
+
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
} // namespace v8
diff --git a/deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc b/deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc
index ccfb9268ea..84d2e41cfc 100644
--- a/deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc
+++ b/deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc
@@ -6,6 +6,8 @@
namespace v8 {
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
diff --git a/deps/v8/src/diagnostics/s390/unwinder-s390.cc b/deps/v8/src/diagnostics/s390/unwinder-s390.cc
index 43c6acb609..52c22221ff 100644
--- a/deps/v8/src/diagnostics/s390/unwinder-s390.cc
+++ b/deps/v8/src/diagnostics/s390/unwinder-s390.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/diagnostics/unwinder.h"
namespace v8 {
+
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
} // namespace v8
diff --git a/deps/v8/src/diagnostics/system-jit-win.cc b/deps/v8/src/diagnostics/system-jit-win.cc
index 120020597a..c77c223183 100644
--- a/deps/v8/src/diagnostics/system-jit-win.cc
+++ b/deps/v8/src/diagnostics/system-jit-win.cc
@@ -4,6 +4,7 @@
#include "src/diagnostics/system-jit-win.h"
+#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/diagnostics/system-jit-win.h b/deps/v8/src/diagnostics/system-jit-win.h
index dffd34df6c..68410079bb 100644
--- a/deps/v8/src/diagnostics/system-jit-win.h
+++ b/deps/v8/src/diagnostics/system-jit-win.h
@@ -5,9 +5,10 @@
#ifndef V8_DIAGNOSTICS_SYSTEM_JIT_WIN_H_
#define V8_DIAGNOSTICS_SYSTEM_JIT_WIN_H_
-#include "include/v8.h"
-
namespace v8 {
+
+struct JitCodeEvent;
+
namespace internal {
namespace ETWJITInterface {
void Register();
diff --git a/deps/v8/src/diagnostics/unwinder.cc b/deps/v8/src/diagnostics/unwinder.cc
index 1dd122a118..68ff679595 100644
--- a/deps/v8/src/diagnostics/unwinder.cc
+++ b/deps/v8/src/diagnostics/unwinder.cc
@@ -6,6 +6,8 @@
#include <algorithm>
+#include "include/v8.h"
+#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
namespace v8 {
diff --git a/deps/v8/src/diagnostics/unwinder.h b/deps/v8/src/diagnostics/unwinder.h
index 4cad2897fd..893415a813 100644
--- a/deps/v8/src/diagnostics/unwinder.h
+++ b/deps/v8/src/diagnostics/unwinder.h
@@ -5,12 +5,11 @@
#ifndef V8_DIAGNOSTICS_UNWINDER_H_
#define V8_DIAGNOSTICS_UNWINDER_H_
-#include "include/v8.h"
-#include "src/common/globals.h"
+#include "include/v8-internal.h"
namespace v8 {
-i::Address Load(i::Address address);
+internal::Address Load(internal::Address address);
} // namespace v8
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 458264032a..d50767421a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -17,36 +17,10 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
+#include <windows.h>
+// This has to come after windows.h.
+#include <versionhelpers.h> // For IsWindows8OrGreater().
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 72d6d7fe27..3ddb29e064 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -2799,8 +2799,9 @@ int DisassemblerX64::InstructionDecode(v8::base::Vector<char> out_buffer,
for (byte* bp = instr; bp < data; bp++) {
outp += v8::base::SNPrintF(out_buffer + outp, "%02x", *bp);
}
- // Indent instruction, leaving space for 7 bytes, i.e. 14 characters in hex.
- while (outp < 14) {
+ // Indent instruction, leaving space for 9 bytes, i.e. 18 characters in hex.
+ // 9-byte nop and rip-relative mov are (probably) the largest we emit.
+ while (outp < 18) {
outp += v8::base::SNPrintF(out_buffer + outp, " ");
}
diff --git a/deps/v8/src/diagnostics/x64/unwinder-x64.cc b/deps/v8/src/diagnostics/x64/unwinder-x64.cc
index 5a92512a17..0314458005 100644
--- a/deps/v8/src/diagnostics/x64/unwinder-x64.cc
+++ b/deps/v8/src/diagnostics/x64/unwinder-x64.cc
@@ -6,6 +6,8 @@
namespace v8 {
+struct RegisterState;
+
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
diff --git a/deps/v8/src/execution/arm64/pointer-authentication-arm64.h b/deps/v8/src/execution/arm64/pointer-authentication-arm64.h
index 6af8974788..3ac184ee72 100644
--- a/deps/v8/src/execution/arm64/pointer-authentication-arm64.h
+++ b/deps/v8/src/execution/arm64/pointer-authentication-arm64.h
@@ -5,10 +5,10 @@
#ifndef V8_EXECUTION_ARM64_POINTER_AUTHENTICATION_ARM64_H_
#define V8_EXECUTION_ARM64_POINTER_AUTHENTICATION_ARM64_H_
-#include "src/execution/pointer-authentication.h"
-
#include "src/common/globals.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arm64/simulator-arm64.h"
+#include "src/execution/pointer-authentication.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 9e8733e525..324bdd99a8 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -27,6 +27,14 @@
#include "src/runtime/runtime-utils.h"
#include "src/utils/ostreams.h"
+#if V8_OS_WIN
+#include <windows.h>
+#endif
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/trap-handler/trap-handler-simulator.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -65,6 +73,20 @@ TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
Simulator::GlobalMonitor::Get)
+bool Simulator::ProbeMemory(uintptr_t address, uintptr_t access_size) {
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
+ uintptr_t last_accessed_byte = address + access_size - 1;
+ uintptr_t current_pc = reinterpret_cast<uintptr_t>(pc_);
+ uintptr_t landing_pad =
+ trap_handler::ProbeMemory(last_accessed_byte, current_pc);
+ if (!landing_pad) return true;
+ set_pc(landing_pad);
+ return false;
+#else
+ return true;
+#endif
+}
+
// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
void Simulator::TraceSim(const char* format, ...) {
if (FLAG_trace_sim) {
@@ -1801,6 +1823,10 @@ void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t stack = 0;
+ unsigned access_size = 1 << instr->SizeLS();
+ // First, check whether the memory is accessible (for wasm trap handling).
+ if (!ProbeMemory(address, access_size)) return;
+
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (instr->IsLoad()) {
@@ -1907,7 +1933,6 @@ void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
// Print a detailed trace (including the memory address) instead of the basic
// register:value trace generated by set_*reg().
- unsigned access_size = 1 << instr->SizeLS();
if (instr->IsLoad()) {
if ((op == LDR_s) || (op == LDR_d)) {
LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
@@ -4886,6 +4911,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD1R:
case NEON_LD1R_post: {
vf = vf_t;
+ if (!ProbeMemory(addr, LaneSizeInBytesFromFormat(vf))) return;
ld1r(vf, vreg(rt), addr);
do_load = true;
break;
@@ -4894,6 +4920,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD2R:
case NEON_LD2R_post: {
vf = vf_t;
+ if (!ProbeMemory(addr, 2 * LaneSizeInBytesFromFormat(vf))) return;
int rt2 = (rt + 1) % kNumberOfVRegisters;
ld2r(vf, vreg(rt), vreg(rt2), addr);
do_load = true;
@@ -4903,6 +4930,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3R:
case NEON_LD3R_post: {
vf = vf_t;
+ if (!ProbeMemory(addr, 3 * LaneSizeInBytesFromFormat(vf))) return;
int rt2 = (rt + 1) % kNumberOfVRegisters;
int rt3 = (rt2 + 1) % kNumberOfVRegisters;
ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
@@ -4913,6 +4941,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD4R:
case NEON_LD4R_post: {
vf = vf_t;
+ if (!ProbeMemory(addr, 4 * LaneSizeInBytesFromFormat(vf))) return;
int rt2 = (rt + 1) % kNumberOfVRegisters;
int rt3 = (rt2 + 1) % kNumberOfVRegisters;
int rt4 = (rt3 + 1) % kNumberOfVRegisters;
@@ -4940,6 +4969,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
case NEONLoadStoreSingle1:
scale = 1;
+ if (!ProbeMemory(addr, scale * esize)) return;
if (do_load) {
ld1(vf, vreg(rt), lane, addr);
LogVRead(addr, rt, print_format, lane);
@@ -4950,6 +4980,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
break;
case NEONLoadStoreSingle2:
scale = 2;
+ if (!ProbeMemory(addr, scale * esize)) return;
if (do_load) {
ld2(vf, vreg(rt), vreg(rt2), lane, addr);
LogVRead(addr, rt, print_format, lane);
@@ -4962,6 +4993,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
break;
case NEONLoadStoreSingle3:
scale = 3;
+ if (!ProbeMemory(addr, scale * esize)) return;
if (do_load) {
ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
LogVRead(addr, rt, print_format, lane);
@@ -4976,6 +5008,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
break;
case NEONLoadStoreSingle4:
scale = 4;
+ if (!ProbeMemory(addr, scale * esize)) return;
if (do_load) {
ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
LogVRead(addr, rt, print_format, lane);
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index f7e24fae1a..73f3c2d62c 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -761,7 +761,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// Simulation helpers.
template <typename T>
void set_pc(T new_pc) {
- DCHECK(sizeof(T) == sizeof(pc_));
+ STATIC_ASSERT(sizeof(T) == sizeof(pc_));
memcpy(&pc_, &new_pc, sizeof(T));
pc_modified_ = true;
}
@@ -1502,6 +1502,18 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
AddrMode addr_mode);
void CheckMemoryAccess(uintptr_t address, uintptr_t stack);
+ // "Probe" if an address range can be read. This is currently implemented
+ // by doing a 1-byte read of the last accessed byte, since the assumption is
+ // that if the last byte is accessible, also all lower bytes are accessible
+ // (which holds true for Wasm).
+ // Returns true if the access was successful, false if the access raised a
+ // signal which was then handled by the trap handler (also see
+ // {trap_handler::ProbeMemory}). If the access raises a signal which is not
+ // handled by the trap handler (e.g. because the current PC is not registered
+ // as a protected instruction), the signal will propagate and make the process
+ // crash. If no trap handler is available, this always returns true.
+ bool ProbeMemory(uintptr_t address, uintptr_t access_size);
+
// Memory read helpers.
template <typename T, typename A>
T MemoryRead(A address) {
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index c450395752..f24f183706 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -230,8 +230,8 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
(builtin == Builtin::kInterpreterEntryTrampoline ||
builtin == Builtin::kInterpreterEnterAtBytecode ||
builtin == Builtin::kInterpreterEnterAtNextBytecode ||
- builtin == Builtin::kBaselineEnterAtBytecode ||
- builtin == Builtin::kBaselineEnterAtNextBytecode)) {
+ builtin == Builtin::kBaselineOrInterpreterEnterAtBytecode ||
+ builtin == Builtin::kBaselineOrInterpreterEnterAtNextBytecode)) {
return true;
} else if (FLAG_interpreted_frames_native_stack) {
intptr_t marker = Memory<intptr_t>(
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index a4dad038f0..63f9ea5947 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -33,6 +33,22 @@ NativeContext Isolate::raw_native_context() {
return context().native_context();
}
+void Isolate::set_pending_message(Object message_obj) {
+ thread_local_top()->pending_message_ = message_obj;
+}
+
+Object Isolate::pending_message() {
+ return thread_local_top()->pending_message_;
+}
+
+void Isolate::clear_pending_message() {
+ set_pending_message(ReadOnlyRoots(this).the_hole_value());
+}
+
+bool Isolate::has_pending_message() {
+ return !pending_message().IsTheHole(this);
+}
+
Object Isolate::pending_exception() {
DCHECK(has_pending_exception());
DCHECK(!thread_local_top()->pending_exception_.IsException(this));
@@ -54,11 +70,6 @@ bool Isolate::has_pending_exception() {
return !thread_local_top()->pending_exception_.IsTheHole(this);
}
-void Isolate::clear_pending_message() {
- thread_local_top()->pending_message_obj_ =
- ReadOnlyRoots(this).the_hole_value();
-}
-
Object Isolate::scheduled_exception() {
DCHECK(has_scheduled_exception());
DCHECK(!thread_local_top()->scheduled_exception_.IsException(this));
@@ -73,8 +84,11 @@ bool Isolate::has_scheduled_exception() {
void Isolate::clear_scheduled_exception() {
DCHECK(!thread_local_top()->scheduled_exception_.IsException(this));
- thread_local_top()->scheduled_exception_ =
- ReadOnlyRoots(this).the_hole_value();
+ set_scheduled_exception(ReadOnlyRoots(this).the_hole_value());
+}
+
+void Isolate::set_scheduled_exception(Object exception) {
+ thread_local_top()->scheduled_exception_ = exception;
}
bool Isolate::is_catchable_by_javascript(Object exception) {
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index d94be5d234..8363c52c49 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -32,7 +32,7 @@
#include "src/codegen/flush-instruction-cache.h"
#include "src/common/assert-scope.h"
#include "src/common/ptr-compr.h"
-#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/date/date.h"
#include "src/debug/debug-frames.h"
@@ -511,7 +511,7 @@ void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
v->VisitRootPointer(Root::kStackRoots, nullptr,
FullObjectSlot(&thread->pending_exception_));
v->VisitRootPointer(Root::kStackRoots, nullptr,
- FullObjectSlot(&thread->pending_message_obj_));
+ FullObjectSlot(&thread->pending_message_));
v->VisitRootPointer(Root::kStackRoots, nullptr,
FullObjectSlot(&thread->context_));
v->VisitRootPointer(Root::kStackRoots, nullptr,
@@ -1392,6 +1392,21 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
}
Object Isolate::StackOverflow() {
+ // Whoever calls this method should not have overflown the stack limit by too
+ // much. Otherwise we risk actually running out of stack space.
+ // We allow for up to 8kB overflow, because we typically allow up to 4KB
+ // overflow per frame in generated code, but might call through more smaller
+ // frames until we reach this method.
+ // If this DCHECK fails, one of the frames on the stack should be augmented by
+ // an additional stack check.
+#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER)
+ // Allow for a bit more overflow in sanitizer builds, because C++ frames take
+ // significantly more space there.
+ DCHECK_GE(GetCurrentStackPosition(), stack_guard()->real_climit() - 32 * KB);
+#else
+ DCHECK_GE(GetCurrentStackPosition(), stack_guard()->real_climit() - 8 * KB);
+#endif
+
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on stack overflow");
}
@@ -1658,7 +1673,7 @@ Object Isolate::ThrowInternal(Object raw_exception, MessageLocation* location) {
ReportBootstrappingException(exception, location);
} else {
Handle<Object> message_obj = CreateMessageOrAbort(exception, location);
- thread_local_top()->pending_message_obj_ = *message_obj;
+ set_pending_message(*message_obj);
}
}
@@ -2086,7 +2101,7 @@ void Isolate::ScheduleThrow(Object exception) {
Throw(exception);
PropagatePendingExceptionToExternalTryCatch();
if (has_pending_exception()) {
- thread_local_top()->scheduled_exception_ = pending_exception();
+ set_scheduled_exception(pending_exception());
thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
}
@@ -2099,7 +2114,7 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
DCHECK(handler->capture_message_);
Object message(reinterpret_cast<Address>(handler->message_obj_));
DCHECK(message.IsJSMessageObject() || message.IsTheHole(this));
- thread_local_top()->pending_message_obj_ = message;
+ set_pending_message(message);
}
void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
@@ -2118,7 +2133,7 @@ void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
clear_scheduled_exception();
}
}
- if (reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr()) ==
+ if (reinterpret_cast<void*>(thread_local_top()->pending_message_.ptr()) ==
handler->message_obj_) {
clear_pending_message();
}
@@ -2331,7 +2346,7 @@ void Isolate::ReportPendingMessages() {
if (!has_been_propagated) return;
// Clear the pending message object early to avoid endless recursion.
- Object message_obj = thread_local_top()->pending_message_obj_;
+ Object message_obj = pending_message();
clear_pending_message();
// For uncatchable exceptions we do nothing. If needed, the exception and the
@@ -2402,7 +2417,7 @@ bool Isolate::OptionalRescheduleException(bool clear_exception) {
}
// Reschedule the exception.
- thread_local_top()->scheduled_exception_ = pending_exception();
+ set_scheduled_exception(pending_exception());
clear_pending_exception();
return true;
}
@@ -2991,6 +3006,7 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator,
#endif
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
+ detailed_source_positions_for_profiling_(FLAG_detailed_line_info),
persistent_handles_list_(new PersistentHandlesList()),
jitless_(FLAG_jitless),
#if V8_SFI_HAS_UNIQUE_ID
@@ -3122,12 +3138,6 @@ void Isolate::Deinit() {
// All client isolates should already be detached.
DCHECK_NULL(client_isolate_head_);
- // Help sweeper threads complete sweeping to stop faster.
- heap_.mark_compact_collector()->DrainSweepingWorklists();
- heap_.mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
-
- heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
-
DumpAndResetStats();
if (FLAG_print_deopt_stress) {
@@ -3336,16 +3346,14 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
SetTerminationOnExternalTryCatch();
} else {
v8::TryCatch* handler = try_catch_handler();
- DCHECK(thread_local_top()->pending_message_obj_.IsJSMessageObject() ||
- thread_local_top()->pending_message_obj_.IsTheHole(this));
+ DCHECK(pending_message().IsJSMessageObject() ||
+ pending_message().IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
// Propagate to the external try-catch only if we got an actual message.
- if (thread_local_top()->pending_message_obj_.IsTheHole(this)) return true;
-
- handler->message_obj_ =
- reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr());
+ if (!has_pending_message()) return true;
+ handler->message_obj_ = reinterpret_cast<void*>(pending_message().ptr());
}
return true;
}
@@ -3625,8 +3633,8 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
string_table_.reset(new StringTable(this));
bigint_processor_ = bigint::Processor::New(new BigIntPlatform(this));
- compiler_dispatcher_ =
- new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
+ compiler_dispatcher_ = new LazyCompileDispatcher(
+ this, V8::GetCurrentPlatform(), FLAG_stack_size);
baseline_batch_compiler_ = new baseline::BaselineBatchCompiler(this);
// Enable logging before setting up the heap
@@ -3845,6 +3853,8 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
}
+ initialized_ = true;
+
return true;
}
@@ -4005,10 +4015,14 @@ bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
}
bool Isolate::NeedsSourcePositionsForProfiling() const {
- return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
- FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
- debug_->is_active() || logger_->is_logging() || FLAG_log_maps ||
- FLAG_log_ic;
+ return
+ // Static conditions.
+ FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
+ FLAG_turbo_profiling || FLAG_perf_prof || FLAG_log_maps || FLAG_log_ic ||
+ // Dynamic conditions; changing any of these conditions triggers source
+ // position collection for the entire heap
+ // (CollectSourcePositionsForAllBytecodeArrays).
+ is_profiling() || debug_->is_active() || logger_->is_logging();
}
void Isolate::SetFeedbackVectorsForProfilingTools(Object value) {
@@ -4214,8 +4228,9 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
call_completed_callbacks_.erase(pos);
}
-void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
- if (!thread_local_top()->CallDepthIsZero()) return;
+void Isolate::FireCallCompletedCallbackInternal(
+ MicrotaskQueue* microtask_queue) {
+ DCHECK(thread_local_top()->CallDepthIsZero());
bool perform_checkpoint =
microtask_queue &&
@@ -4801,6 +4816,8 @@ void Isolate::SetIdle(bool is_idle) {
}
void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
+ if (!initialized_) return;
+
HandleScope scope(this);
std::vector<Handle<SharedFunctionInfo>> sfis;
{
@@ -4808,12 +4825,10 @@ void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
HeapObjectIterator iterator(heap());
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
- if (obj.IsSharedFunctionInfo()) {
- SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
- if (sfi.HasBytecodeArray()) {
- sfis.push_back(Handle<SharedFunctionInfo>(sfi, this));
- }
- }
+ if (!obj.IsSharedFunctionInfo()) continue;
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
+ if (!sfi.CanCollectSourcePosition(this)) continue;
+ sfis.push_back(Handle<SharedFunctionInfo>(sfi, this));
}
}
for (auto sfi : sfis) {
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 585ee491d8..e543c72718 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -82,7 +82,6 @@ class CodeTracer;
class CommonFrame;
class CompilationCache;
class CompilationStatistics;
-class CompilerDispatcher;
class Counters;
class Debug;
class Deoptimizer;
@@ -93,6 +92,7 @@ class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
class InnerPointerToCodeCache;
+class LazyCompileDispatcher;
class LocalIsolate;
class Logger;
class MaterializedObjectStore;
@@ -470,7 +470,6 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(v8_inspector::V8Inspector*, inspector, nullptr) \
V(bool, next_v8_call_is_safe_for_termination, false) \
V(bool, only_terminate_in_safe_scope, false) \
- V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info) \
V(int, embedder_wrapper_type_index, -1) \
V(int, embedder_wrapper_object_index, -1) \
V(compiler::NodeObserver*, node_observer, nullptr) \
@@ -706,11 +705,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return thread_local_top()->thread_id_.load(std::memory_order_relaxed);
}
- // Interface to pending exception.
- inline Object pending_exception();
- inline void set_pending_exception(Object exception_obj);
- inline void clear_pending_exception();
-
void InstallConditionalFeatures(Handle<Context> context);
bool IsSharedArrayBufferConstructorEnabled(Handle<Context> context);
@@ -718,10 +712,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsWasmSimdEnabled(Handle<Context> context);
bool AreWasmExceptionsEnabled(Handle<Context> context);
- THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
-
- inline bool has_pending_exception();
-
THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
@@ -733,20 +723,27 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::TryCatch* try_catch_handler() {
return thread_local_top()->try_catch_handler_;
}
- bool* external_caught_exception_address() {
- return &thread_local_top()->external_caught_exception_;
- }
- THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
+ THREAD_LOCAL_TOP_ADDRESS(bool, external_caught_exception)
+ // Interface to pending exception.
+ THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
+ inline Object pending_exception();
+ inline void set_pending_exception(Object exception_obj);
+ inline void clear_pending_exception();
+ inline bool has_pending_exception();
+
+ THREAD_LOCAL_TOP_ADDRESS(Object, pending_message)
inline void clear_pending_message();
- Address pending_message_obj_address() {
- return reinterpret_cast<Address>(&thread_local_top()->pending_message_obj_);
- }
+ inline Object pending_message();
+ inline bool has_pending_message();
+ inline void set_pending_message(Object message_obj);
+ THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
inline Object scheduled_exception();
inline bool has_scheduled_exception();
inline void clear_scheduled_exception();
+ inline void set_scheduled_exception(Object exception);
bool IsJavaScriptHandlerOnTop(Object exception);
bool IsExternalHandlerOnTop(Object exception);
@@ -1008,6 +1005,17 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
#undef GLOBAL_ACCESSOR
+ void SetDetailedSourcePositionsForProfiling(bool value) {
+ if (value) {
+ CollectSourcePositionsForAllBytecodeArrays();
+ }
+ detailed_source_positions_for_profiling_ = value;
+ }
+
+ bool detailed_source_positions_for_profiling() const {
+ return detailed_source_positions_for_profiling_;
+ }
+
#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
inline type* name() { \
DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
@@ -1218,7 +1226,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return is_profiling_.load(std::memory_order_relaxed);
}
- void set_is_profiling(bool enabled) {
+ void SetIsProfiling(bool enabled) {
+ if (enabled) {
+ CollectSourcePositionsForAllBytecodeArrays();
+ }
is_profiling_.store(enabled, std::memory_order_relaxed);
}
@@ -1454,7 +1465,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
size_t heap_limit);
void AddCallCompletedCallback(CallCompletedCallback callback);
void RemoveCallCompletedCallback(CallCompletedCallback callback);
- void FireCallCompletedCallback(MicrotaskQueue* microtask_queue);
+ void FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
+ if (!thread_local_top()->CallDepthIsZero()) return;
+ FireCallCompletedCallbackInternal(microtask_queue);
+ }
void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
@@ -1614,7 +1628,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
AccountingAllocator* allocator() { return allocator_; }
- CompilerDispatcher* compiler_dispatcher() const {
+ LazyCompileDispatcher* lazy_compile_dispatcher() const {
return compiler_dispatcher_;
}
@@ -1768,6 +1782,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return main_thread_local_isolate_.get();
}
+ Isolate* AsIsolate() { return this; }
LocalIsolate* AsLocalIsolate() { return main_thread_local_isolate(); }
LocalHeap* main_thread_local_heap();
@@ -1831,6 +1846,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
static void RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data);
+ void FireCallCompletedCallbackInternal(MicrotaskQueue* microtask_queue);
+
class ThreadDataTable {
public:
ThreadDataTable() = default;
@@ -2081,7 +2098,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// through all compilations (and thus all JSHeapBroker instances).
Zone* compiler_zone_ = nullptr;
- CompilerDispatcher* compiler_dispatcher_ = nullptr;
+ LazyCompileDispatcher* compiler_dispatcher_ = nullptr;
baseline::BaselineBatchCompiler* baseline_batch_compiler_ = nullptr;
using InterruptEntry = std::pair<InterruptCallback, void*>;
@@ -2106,6 +2123,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
#undef ISOLATE_FIELD_OFFSET
#endif
+ bool detailed_source_positions_for_profiling_;
+
OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
std::unique_ptr<PersistentHandlesList> persistent_handles_list_;
@@ -2115,6 +2134,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool force_slow_path_ = false;
+ bool initialized_ = false;
bool jitless_ = false;
int next_optimization_id_ = 0;
diff --git a/deps/v8/src/execution/local-isolate-inl.h b/deps/v8/src/execution/local-isolate-inl.h
index 59a7b1f5e4..ca7c119b6b 100644
--- a/deps/v8/src/execution/local-isolate-inl.h
+++ b/deps/v8/src/execution/local-isolate-inl.h
@@ -22,6 +22,11 @@ Object LocalIsolate::root(RootIndex index) const {
return isolate_->root(index);
}
+Handle<Object> LocalIsolate::root_handle(RootIndex index) const {
+ DCHECK(RootsTable::IsImmortalImmovable(index));
+ return isolate_->root_handle(index);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/local-isolate.cc b/deps/v8/src/execution/local-isolate.cc
index 20a4344cfa..c9a7553acf 100644
--- a/deps/v8/src/execution/local-isolate.cc
+++ b/deps/v8/src/execution/local-isolate.cc
@@ -4,6 +4,7 @@
#include "src/execution/local-isolate.h"
+#include "src/bigint/bigint.h"
#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
#include "src/handles/handles-inl.h"
@@ -24,7 +25,16 @@ LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind,
: GetCurrentStackPosition() - FLAG_stack_size * KB),
runtime_call_stats_(runtime_call_stats) {}
-LocalIsolate::~LocalIsolate() = default;
+LocalIsolate::~LocalIsolate() {
+ if (bigint_processor_) bigint_processor_->Destroy();
+}
+
+void LocalIsolate::RegisterDeserializerStarted() {
+ return isolate_->RegisterDeserializerStarted();
+}
+void LocalIsolate::RegisterDeserializerFinished() {
+ return isolate_->RegisterDeserializerFinished();
+}
int LocalIsolate::GetNextScriptId() { return isolate_->GetNextScriptId(); }
@@ -39,6 +49,12 @@ bool LocalIsolate::is_collecting_type_profile() const {
return isolate_->is_collecting_type_profile();
}
+// Used for lazy initialization, based on an assumption that most
+// LocalIsolates won't be used to parse any BigInt literals.
+void LocalIsolate::InitializeBigIntProcessor() {
+ bigint_processor_ = bigint::Processor::New(new bigint::Platform());
+}
+
// static
bool StackLimitCheck::HasOverflowed(LocalIsolate* local_isolate) {
return GetCurrentStackPosition() < local_isolate->stack_limit();
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index 5392534d66..55891f87c5 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -15,6 +15,11 @@
#include "src/heap/local-heap.h"
namespace v8 {
+
+namespace bigint {
+class Processor;
+}
+
namespace internal {
class Isolate;
@@ -48,11 +53,14 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
OFFSET_OF(LocalIsolate, heap_));
}
+ bool is_main_thread() { return heap()->is_main_thread(); }
+
LocalHeap* heap() { return &heap_; }
inline Address cage_base() const;
inline ReadOnlyHeap* read_only_heap() const;
inline Object root(RootIndex index) const;
+ inline Handle<Object> root_handle(RootIndex index) const;
StringTable* string_table() const { return isolate_->string_table(); }
base::SharedMutex* internalized_string_access() {
@@ -67,6 +75,9 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
bool has_pending_exception() const { return false; }
+ void RegisterDeserializerStarted();
+ void RegisterDeserializerFinished();
+
template <typename T>
Handle<T> Throw(Handle<Object> exception) {
UNREACHABLE();
@@ -86,14 +97,30 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
ThreadId thread_id() const { return thread_id_; }
Address stack_limit() const { return stack_limit_; }
RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
+ bigint::Processor* bigint_processor() {
+ if (!bigint_processor_) InitializeBigIntProcessor();
+ return bigint_processor_;
+ }
bool is_main_thread() const { return heap_.is_main_thread(); }
+ // AsIsolate is only allowed on the main-thread.
+ Isolate* AsIsolate() {
+ DCHECK(is_main_thread());
+ DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
+ return isolate_;
+ }
LocalIsolate* AsLocalIsolate() { return this; }
+ Object* pending_message_address() {
+ return isolate_->pending_message_address();
+ }
+
private:
friend class v8::internal::LocalFactory;
+ void InitializeBigIntProcessor();
+
LocalHeap heap_;
// TODO(leszeks): Extract out the fields of the Isolate we want and store
@@ -105,6 +132,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
Address const stack_limit_;
RuntimeCallStats* runtime_call_stats_;
+ bigint::Processor* bigint_processor_{nullptr};
};
template <base::MutexSharedType kIsShared>
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index cae642e2c9..68c2c4a09e 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -74,7 +74,7 @@ Address MicrotaskQueue::CallEnqueueMicrotask(Isolate* isolate,
Microtask microtask = Microtask::cast(Object(raw_microtask));
reinterpret_cast<MicrotaskQueue*>(microtask_queue_pointer)
->EnqueueMicrotask(microtask);
- return ReadOnlyRoots(isolate).undefined_value().ptr();
+ return Smi::zero().ptr();
}
void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate,
@@ -110,23 +110,21 @@ void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
++size_;
}
-void MicrotaskQueue::PerformCheckpoint(v8::Isolate* v8_isolate) {
- if (!IsRunningMicrotasks() && !GetMicrotasksScopeDepth() &&
- !HasMicrotasksSuppressions()) {
- std::unique_ptr<MicrotasksScope> microtasks_scope;
- if (microtasks_policy_ == v8::MicrotasksPolicy::kScoped) {
- // If we're using microtask scopes to schedule microtask execution, V8
- // API calls will check that there's always a microtask scope on the
- // stack. As the microtasks we're about to execute could invoke embedder
- // callbacks which then calls back into V8, we create an artificial
- // microtask scope here to avoid running into the CallDepthScope check.
- microtasks_scope.reset(new v8::MicrotasksScope(
- v8_isolate, this, v8::MicrotasksScope::kDoNotRunMicrotasks));
- }
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- RunMicrotasks(isolate);
- isolate->ClearKeptObjects();
+void MicrotaskQueue::PerformCheckpointInternal(v8::Isolate* v8_isolate) {
+ DCHECK(ShouldPerfomCheckpoint());
+ std::unique_ptr<MicrotasksScope> microtasks_scope;
+ if (microtasks_policy_ == v8::MicrotasksPolicy::kScoped) {
+ // If we're using microtask scopes to schedule microtask execution, V8
+ // API calls will check that there's always a microtask scope on the
+ // stack. As the microtasks we're about to execute could invoke embedder
+ // callbacks which then calls back into V8, we create an artificial
+ // microtask scope here to avoid running into the CallDepthScope check.
+ microtasks_scope.reset(new v8::MicrotasksScope(
+ v8_isolate, this, v8::MicrotasksScope::kDoNotRunMicrotasks));
}
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ RunMicrotasks(isolate);
+ isolate->ClearKeptObjects();
}
namespace {
@@ -226,10 +224,6 @@ void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) {
}
}
-int MicrotaskQueue::GetMicrotasksScopeDepth() const {
- return microtasks_depth_;
-}
-
void MicrotaskQueue::AddMicrotasksCompletedCallback(
MicrotasksCompletedCallbackWithData callback, void* data) {
CallbackWithData callback_with_data(callback, data);
@@ -250,7 +244,7 @@ void MicrotaskQueue::RemoveMicrotasksCompletedCallback(
microtasks_completed_callbacks_.erase(pos);
}
-void MicrotaskQueue::FireMicrotasksCompletedCallback(Isolate* isolate) const {
+void MicrotaskQueue::OnCompleted(Isolate* isolate) const {
std::vector<CallbackWithData> callbacks(microtasks_completed_callbacks_);
for (auto& callback : callbacks) {
callback.first(reinterpret_cast<v8::Isolate*>(isolate), callback.second);
@@ -263,10 +257,6 @@ Microtask MicrotaskQueue::get(intptr_t index) const {
return Microtask::cast(microtask);
}
-void MicrotaskQueue::OnCompleted(Isolate* isolate) {
- FireMicrotasksCompletedCallback(isolate);
-}
-
void MicrotaskQueue::ResizeBuffer(intptr_t new_capacity) {
DCHECK_LE(size_, new_capacity);
Address* new_ring_buffer = new Address[new_capacity];
diff --git a/deps/v8/src/execution/microtask-queue.h b/deps/v8/src/execution/microtask-queue.h
index 82840c2bed..e9d40a924f 100644
--- a/deps/v8/src/execution/microtask-queue.h
+++ b/deps/v8/src/execution/microtask-queue.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
// Uses raw Address values because it's called via ExternalReference.
// {raw_microtask} is a tagged Microtask pointer.
- // Returns a tagged Object pointer.
+ // Returns Smi::kZero due to CallCFunction.
static Address CallEnqueueMicrotask(Isolate* isolate,
intptr_t microtask_queue_pointer,
Address raw_microtask);
@@ -40,7 +40,15 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
v8::Local<Function> microtask) override;
void EnqueueMicrotask(v8::Isolate* isolate, v8::MicrotaskCallback callback,
void* data) override;
- void PerformCheckpoint(v8::Isolate* isolate) override;
+ void PerformCheckpoint(v8::Isolate* isolate) override {
+ if (!ShouldPerfomCheckpoint()) return;
+ PerformCheckpointInternal(isolate);
+ }
+
+ bool ShouldPerfomCheckpoint() const {
+ return !IsRunningMicrotasks() && !GetMicrotasksScopeDepth() &&
+ !HasMicrotasksSuppressions();
+ }
void EnqueueMicrotask(Microtask microtask);
void AddMicrotasksCompletedCallback(
@@ -62,7 +70,7 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
// invocation, which happens when depth reaches zero.
void IncrementMicrotasksScopeDepth() { ++microtasks_depth_; }
void DecrementMicrotasksScopeDepth() { --microtasks_depth_; }
- int GetMicrotasksScopeDepth() const override;
+ int GetMicrotasksScopeDepth() const override { return microtasks_depth_; }
// Possibly nested microtasks suppression scopes prevent microtasks
// from running.
@@ -87,8 +95,6 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
}
v8::MicrotasksPolicy microtasks_policy() const { return microtasks_policy_; }
- void FireMicrotasksCompletedCallback(Isolate* isolate) const;
-
intptr_t capacity() const { return capacity_; }
intptr_t size() const { return size_; }
intptr_t start() const { return start_; }
@@ -107,7 +113,9 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
static const intptr_t kMinimumCapacity;
private:
- void OnCompleted(Isolate* isolate);
+ void PerformCheckpointInternal(v8::Isolate* v8_isolate);
+
+ void OnCompleted(Isolate* isolate) const;
MicrotaskQueue();
void ResizeBuffer(intptr_t new_capacity);
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index 49ea63678c..c49172a564 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -1169,7 +1169,7 @@ void Simulator::set_fpu_register_invalid_result64(float original,
if (FCSR_ & kFCSRNaN2008FlagMask) {
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
if (std::isnan(original)) {
set_fpu_register(fd_reg(), 0);
@@ -1228,7 +1228,7 @@ void Simulator::set_fpu_register_invalid_result64(double original,
if (FCSR_ & kFCSRNaN2008FlagMask) {
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
if (std::isnan(original)) {
set_fpu_register(fd_reg(), 0);
@@ -1288,7 +1288,7 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
bool ret = false;
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
clear_fcsr_cause();
@@ -1366,7 +1366,7 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
bool ret = false;
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
clear_fcsr_cause();
@@ -5978,8 +5978,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
*dst = 0;
- } else if (element >= max_int || element <= min_int) {
- *dst = element >= max_int ? max_int : min_int;
+ } else if (element >= static_cast<T_fp>(max_int) || element <= min_int) {
+ *dst = element >= static_cast<T_fp>(max_int) ? max_int : min_int;
} else {
*dst = static_cast<T_int>(std::trunc(element));
}
@@ -5990,8 +5990,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
*dst = 0;
- } else if (element >= max_int || element <= 0) {
- *dst = element >= max_int ? max_int : 0;
+ } else if (element >= static_cast<T_fp>(max_int) || element <= 0) {
+ *dst = element >= static_cast<T_fp>(max_int) ? max_int : 0;
} else {
*dst = static_cast<T_uint>(std::trunc(element));
}
@@ -6066,8 +6066,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
*dst = 0;
- } else if (element < min_int || element > max_int) {
- *dst = element > max_int ? max_int : min_int;
+ } else if (element < min_int || element > static_cast<T_fp>(max_int)) {
+ *dst = element > static_cast<T_fp>(max_int) ? max_int : min_int;
} else {
sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
@@ -6078,8 +6078,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
*dst = 0;
- } else if (element < 0 || element > max_uint) {
- *dst = element > max_uint ? max_uint : 0;
+ } else if (element < 0 || element > static_cast<T_fp>(max_uint)) {
+ *dst = element > static_cast<T_fp>(max_uint) ? max_uint : 0;
} else {
T_uint res;
sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index fa4f0fd987..d45889e5a2 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -1099,7 +1099,7 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
bool ret = false;
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
clear_fcsr_cause();
@@ -1213,7 +1213,7 @@ void Simulator::set_fpu_register_invalid_result64(float original,
if (FCSR_ & kFCSRNaN2008FlagMask) {
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
if (std::isnan(original)) {
set_fpu_register(fd_reg(), 0);
@@ -1272,7 +1272,7 @@ void Simulator::set_fpu_register_invalid_result64(double original,
if (FCSR_ & kFCSRNaN2008FlagMask) {
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
if (std::isnan(original)) {
set_fpu_register(fd_reg(), 0);
@@ -1294,7 +1294,7 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
bool ret = false;
// The value of INT64_MAX (2^63-1) can't be represented as double exactly,
// loading the most accurate representation into max_int64, which is 2^63.
- double max_int64 = std::numeric_limits<int64_t>::max();
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
double min_int64 = std::numeric_limits<int64_t>::min();
clear_fcsr_cause();
@@ -6269,8 +6269,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
*dst = 0;
- } else if (element >= max_int || element <= min_int) {
- *dst = element >= max_int ? max_int : min_int;
+ } else if (element >= static_cast<T_fp>(max_int) || element <= min_int) {
+ *dst = element >= static_cast<T_fp>(max_int) ? max_int : min_int;
} else {
*dst = static_cast<T_int>(std::trunc(element));
}
@@ -6281,8 +6281,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
*dst = 0;
- } else if (element >= max_int || element <= 0) {
- *dst = element >= max_int ? max_int : 0;
+ } else if (element >= static_cast<T_fp>(max_int) || element <= 0) {
+ *dst = element >= static_cast<T_fp>(max_int) ? max_int : 0;
} else {
*dst = static_cast<T_uint>(std::trunc(element));
}
@@ -6357,8 +6357,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
*dst = 0;
- } else if (element < min_int || element > max_int) {
- *dst = element > max_int ? max_int : min_int;
+ } else if (element < min_int || element > static_cast<T_fp>(max_int)) {
+ *dst = element > static_cast<T_fp>(max_int) ? max_int : min_int;
} else {
sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
@@ -6369,8 +6369,8 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
*dst = 0;
- } else if (element < 0 || element > max_uint) {
- *dst = element > max_uint ? max_uint : 0;
+ } else if (element < 0 || element > static_cast<T_fp>(max_uint)) {
+ *dst = element > static_cast<T_fp>(max_uint) ? max_uint : 0;
} else {
T_uint res;
sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
diff --git a/deps/v8/src/execution/pointer-authentication-dummy.h b/deps/v8/src/execution/pointer-authentication-dummy.h
index 9948d8303d..4a19f41fbf 100644
--- a/deps/v8/src/execution/pointer-authentication-dummy.h
+++ b/deps/v8/src/execution/pointer-authentication-dummy.h
@@ -5,11 +5,9 @@
#ifndef V8_EXECUTION_POINTER_AUTHENTICATION_DUMMY_H_
#define V8_EXECUTION_POINTER_AUTHENTICATION_DUMMY_H_
-#include "src/execution/pointer-authentication.h"
-
-#include "include/v8.h"
+#include "include/v8-internal.h"
#include "src/base/macros.h"
-#include "src/common/globals.h"
+#include "src/execution/pointer-authentication.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/pointer-authentication.h b/deps/v8/src/execution/pointer-authentication.h
index 8caf1d0b9d..5052d92840 100644
--- a/deps/v8/src/execution/pointer-authentication.h
+++ b/deps/v8/src/execution/pointer-authentication.h
@@ -5,10 +5,9 @@
#ifndef V8_EXECUTION_POINTER_AUTHENTICATION_H_
#define V8_EXECUTION_POINTER_AUTHENTICATION_H_
-#include "include/v8.h"
+#include "include/v8-internal.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
-#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index 7e9ca5c766..7b0b4bc00c 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -44,7 +44,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
static constexpr RegList kPushedGpRegs =
- Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11);
+ Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11, cp);
static constexpr RegList kPushedFpRegs = DoubleRegister::ListOf(
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 23904593ad..5e9751c07a 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -2523,7 +2523,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
-#if V8_TARGET_ARCH_PPC64
case CNTLZDX: {
int rs = instr->RSValue();
int ra = instr->RAValue();
@@ -2549,7 +2548,42 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
-#endif
+ case CNTTZWX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint32_t rs_val = static_cast<uint32_t>(get_register(rs));
+ uintptr_t count = __builtin_ctz(rs_val);
+ set_register(ra, count);
+ if (instr->Bit(0)) { // RC Bit set
+ int bf = 0;
+ if (count > 0) {
+ bf |= 0x40000000;
+ }
+ if (count == 0) {
+ bf |= 0x20000000;
+ }
+ condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+ }
+ break;
+ }
+ case CNTTZDX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint64_t rs_val = get_register(rs);
+ uintptr_t count = __builtin_ctz(rs_val);
+ set_register(ra, count);
+ if (instr->Bit(0)) { // RC Bit set
+ int bf = 0;
+ if (count > 0) {
+ bf |= 0x40000000;
+ }
+ if (count == 0) {
+ bf |= 0x20000000;
+ }
+ condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+ }
+ break;
+ }
case ANDX: {
int rs = instr->RSValue();
int ra = instr->RAValue();
@@ -3643,6 +3677,16 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
+ case FCPSGN: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ int fra = instr->RAValue();
+ double frb_val = get_double_from_d_register(frb);
+ double fra_val = get_double_from_d_register(fra);
+ double frt_val = std::copysign(fra_val, frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
case FMR: {
int frt = instr->RTValue();
int frb = instr->RBValue();
@@ -4117,6 +4161,21 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
#undef VINSERT
+#define VINSERT_IMMEDIATE(type) \
+ uint8_t uim = instr->Bits(19, 16); \
+ int vrt = instr->RTValue(); \
+ int rb = instr->RBValue(); \
+ type src = static_cast<type>(get_register(rb)); \
+ set_simd_register_bytes<type>(vrt, uim, src);
+ case VINSD: {
+ VINSERT_IMMEDIATE(int64_t)
+ break;
+ }
+ case VINSW: {
+ VINSERT_IMMEDIATE(int32_t)
+ break;
+ }
+#undef VINSERT_IMMEDIATE
#define VEXTRACT(type, element) \
uint8_t uim = instr->Bits(19, 16); \
int vrt = instr->RTValue(); \
@@ -4218,6 +4277,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
VECTOR_ARITHMETIC_OP(int64_t, -)
break;
}
+ case VMULLD: {
+ VECTOR_ARITHMETIC_OP(int64_t, *)
+ break;
+ }
case VADDUWM: {
VECTOR_ARITHMETIC_OP(int32_t, +)
break;
@@ -4345,6 +4408,20 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
type b_val = get_simd_register_by_lane<type>(b, i); \
set_simd_register_by_lane<type>(t, i, a_val op b_val ? a_val : b_val); \
}
+ case XSMINDP: {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ double a_val = get_double_from_d_register(a);
+ double b_val = get_double_from_d_register(b);
+ set_d_register_from_double(t, VSXFPMin<double>(a_val, b_val));
+ break;
+ }
+ case XSMAXDP: {
+ DECODE_VX_INSTRUCTION(t, a, b, T)
+ double a_val = get_double_from_d_register(a);
+ double b_val = get_double_from_d_register(b);
+ set_d_register_from_double(t, VSXFPMax<double>(a_val, b_val));
+ break;
+ }
case XVMINDP: {
DECODE_VX_INSTRUCTION(t, a, b, T)
FOR_EACH_LANE(i, double) {
@@ -5010,6 +5087,32 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
+#define EXTRACT_MASK(type) \
+ int rt = instr->RTValue(); \
+ int vrb = instr->RBValue(); \
+ uint64_t result = 0; \
+ FOR_EACH_LANE(i, type) { \
+ if (i > 0) result <<= 1; \
+ result |= std::signbit(get_simd_register_by_lane<type>(vrb, i)); \
+ } \
+ set_register(rt, result);
+ case VEXTRACTDM: {
+ EXTRACT_MASK(int64_t)
+ break;
+ }
+ case VEXTRACTWM: {
+ EXTRACT_MASK(int32_t)
+ break;
+ }
+ case VEXTRACTHM: {
+ EXTRACT_MASK(int16_t)
+ break;
+ }
+ case VEXTRACTBM: {
+ EXTRACT_MASK(int8_t)
+ break;
+ }
+#undef EXTRACT_MASK
#undef FOR_EACH_LANE
#undef DECODE_VX_INSTRUCTION
#undef GET_ADDRESS
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
index 723ed5bdc6..3ec0c0e811 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -1271,7 +1271,7 @@ T Simulator::ReadMem(int64_t addr, Instruction* instr) {
}
#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
// check for natural alignment
- if ((addr & (sizeof(T) - 1)) != 0) {
+ if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
@@ -1293,7 +1293,7 @@ void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
}
#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
// check for natural alignment
- if ((addr & (sizeof(T) - 1)) != 0) {
+ if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
@@ -2826,6 +2826,14 @@ void Simulator::DecodeRVR4Type() {
}
}
+Builtin Simulator::LookUp(Address pc) {
+ for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
+ ++builtin) {
+ if (builtins_.code(builtin).contains(isolate_, pc)) return builtin;
+ }
+ return Builtin::kNoBuiltinId;
+}
+
void Simulator::DecodeRVIType() {
switch (instr_.InstructionBits() & kITypeMask) {
case RO_JALR: {
@@ -2834,29 +2842,34 @@ void Simulator::DecodeRVIType() {
int64_t next_pc = (rs1() + imm12()) & ~reg_t(1);
set_pc(next_pc);
if (::v8::internal::FLAG_trace_sim) {
- if ((rs1_reg() != ra || imm12() != 0)) {
- const char* name = builtins_.Lookup((Address)next_pc);
- if (name != nullptr) {
- int64_t arg0 = get_register(a0);
- int64_t arg1 = get_register(a1);
- int64_t arg2 = get_register(a2);
- int64_t arg3 = get_register(a3);
- int64_t arg4 = get_register(a4);
- int64_t arg5 = get_register(a5);
- int64_t arg6 = get_register(a6);
- int64_t arg7 = get_register(a7);
- int64_t* stack_pointer =
- reinterpret_cast<int64_t*>(get_register(sp));
- int64_t arg8 = stack_pointer[0];
- int64_t arg9 = stack_pointer[1];
- PrintF(
- "Call to Builtin at %s "
- "a0 %08" PRIx64 " ,a1 %08" PRIx64 " ,a2 %08" PRIx64
- " ,a3 %08" PRIx64 " ,a4 %08" PRIx64 " ,a5 %08" PRIx64
- " ,a6 %08" PRIx64 " ,a7 %08" PRIx64 " ,0(sp) %08" PRIx64
- " ,8(sp) %08" PRIx64 " ,sp %08" PRIx64 ",fp %08" PRIx64 " \n",
- name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8,
- arg9, get_register(sp), get_register(fp));
+ Builtin builtin = LookUp((Address)get_pc());
+ if (builtin != Builtin::kNoBuiltinId) {
+ auto code = builtins_.code(builtin);
+ if ((rs1_reg() != ra || imm12() != 0)) {
+ if ((Address)get_pc() == code.InstructionStart()) {
+ int64_t arg0 = get_register(a0);
+ int64_t arg1 = get_register(a1);
+ int64_t arg2 = get_register(a2);
+ int64_t arg3 = get_register(a3);
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t* stack_pointer =
+ reinterpret_cast<int64_t*>(get_register(sp));
+ int64_t arg8 = stack_pointer[0];
+ int64_t arg9 = stack_pointer[1];
+ PrintF(
+ "Call to Builtin at %s "
+ "a0 %08" PRIx64 " ,a1 %08" PRIx64 " ,a2 %08" PRIx64
+ " ,a3 %08" PRIx64 " ,a4 %08" PRIx64 " ,a5 %08" PRIx64
+ " ,a6 %08" PRIx64 " ,a7 %08" PRIx64 " ,0(sp) %08" PRIx64
+ " ,8(sp) %08" PRIx64 " ,sp %08" PRIx64 ",fp %08" PRIx64 " \n",
+ builtins_.name(builtin), arg0, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, get_register(sp), get_register(fp));
+ }
+ } else if (rd_reg() == zero_reg) {
+ PrintF("Return to Builtin at %s \n", builtins_.name(builtin));
}
}
}
@@ -3303,20 +3316,22 @@ void Simulator::DecodeCLType() {
switch (instr_.RvcOpcode()) {
case RO_C_LW: {
int64_t addr = rvc_rs1s() + rvc_imm5_w();
- auto val = ReadMem<int32_t>(addr, instr_.instr());
+ int64_t val = ReadMem<int32_t>(addr, instr_.instr());
set_rvc_rs2s(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rvc_rs2s_reg()));
break;
}
case RO_C_LD: {
int64_t addr = rvc_rs1s() + rvc_imm5_d();
- auto val = ReadMem<int64_t>(addr, instr_.instr());
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
set_rvc_rs2s(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rvc_rs2s_reg()));
break;
}
case RO_C_FLD: {
int64_t addr = rvc_rs1s() + rvc_imm5_d();
- auto val = ReadMem<double>(addr, instr_.instr());
- set_rvc_drs2s(sext_xlen(val), false);
+ double val = ReadMem<double>(addr, instr_.instr());
+ set_rvc_drs2s(val, false);
break;
}
default:
@@ -3468,8 +3483,8 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (::v8::internal::FLAG_trace_sim) {
- PrintF(" 0x%012" PRIxPTR " %ld %-44s %s\n",
- reinterpret_cast<intptr_t>(instr), icount_, buffer.begin(),
+ PrintF(" 0x%012" PRIxPTR " %-44s %s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.begin(),
trace_buf_.begin());
}
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.h b/deps/v8/src/execution/riscv64/simulator-riscv64.h
index 05e403e5e5..2fa40cea4e 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.h
@@ -522,11 +522,15 @@ class Simulator : public SimulatorBase {
set_register(rvc_rs1s_reg(), value);
if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), DWORD);
}
+ inline void set_rvc_rs2(int64_t value, bool trace = true) {
+ set_register(rvc_rs2_reg(), value);
+ if (trace) TraceRegWr(get_register(rvc_rs2_reg()), DWORD);
+ }
inline void set_rvc_drd(double value, bool trace = true) {
set_fpu_register_double(rvc_rd_reg(), value);
if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
}
- inline void set_rvc_rs2s(double value, bool trace = true) {
+ inline void set_rvc_rs2s(int64_t value, bool trace = true) {
set_register(rvc_rs2s_reg(), value);
if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), DWORD);
}
@@ -610,6 +614,7 @@ class Simulator : public SimulatorBase {
return alu_out;
}
+ Builtin LookUp(Address pc);
// RISCV decoding routine
void DecodeRVRType();
void DecodeRVR4Type();
diff --git a/deps/v8/src/execution/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index 64962c8a84..c2b09c67b1 100644
--- a/deps/v8/src/execution/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -20,7 +20,7 @@ void ThreadLocalTop::Clear() {
pending_handler_fp_ = kNullAddress;
pending_handler_sp_ = kNullAddress;
last_api_entry_ = kNullAddress;
- pending_message_obj_ = Object();
+ pending_message_ = Object();
rethrowing_message_ = false;
external_caught_exception_ = false;
c_entry_fp_ = kNullAddress;
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index 27ff661fd5..f903747aeb 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -116,7 +116,7 @@ class ThreadLocalTop {
Address last_api_entry_;
// Communication channel between Isolate::Throw and message consumers.
- Object pending_message_obj_;
+ Object pending_message_;
bool rethrowing_message_;
// Use a separate value for scheduled exceptions to preserve the
diff --git a/deps/v8/src/execution/vm-state-inl.h b/deps/v8/src/execution/vm-state-inl.h
index 1781917764..91fcbf30c7 100644
--- a/deps/v8/src/execution/vm-state-inl.h
+++ b/deps/v8/src/execution/vm-state-inl.h
@@ -54,7 +54,9 @@ VMState<Tag>::~VMState() {
ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
: isolate_(isolate),
callback_(callback),
- previous_scope_(isolate->external_callback_scope()) {
+ previous_scope_(isolate->external_callback_scope()),
+ vm_state_(isolate),
+ pause_timed_histogram_scope_(isolate->counters()->execute_precise()) {
#ifdef USE_SIMULATOR
scope_address_ = Simulator::current(isolate)->get_sp();
#endif
diff --git a/deps/v8/src/execution/vm-state.h b/deps/v8/src/execution/vm-state.h
index f342c0b0d0..9621bee421 100644
--- a/deps/v8/src/execution/vm-state.h
+++ b/deps/v8/src/execution/vm-state.h
@@ -7,6 +7,7 @@
#include "include/v8.h"
#include "src/common/globals.h"
+#include "src/logging/counters-scopes.h"
namespace v8 {
namespace internal {
@@ -46,6 +47,8 @@ class V8_NODISCARD ExternalCallbackScope {
Isolate* isolate_;
Address callback_;
ExternalCallbackScope* previous_scope_;
+ VMState<EXTERNAL> vm_state_;
+ PauseNestedTimedHistogramScope pause_timed_histogram_scope_;
#ifdef USE_SIMULATOR
Address scope_address_;
#endif
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index dceb46ada7..312d17b52f 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -190,6 +190,13 @@ struct MaybeBoolFlag {
#define ENABLE_SPARKPLUG false
#endif
+#if ENABLE_SPARKPLUG && !defined(ANDROID)
+// Enable Sparkplug by default on desktop-only.
+#define ENABLE_SPARKPLUG_BY_DEFAULT true
+#else
+#define ENABLE_SPARKPLUG_BY_DEFAULT false
+#endif
+
// Supported ARM configurations are:
// "armv6": ARMv6 + VFPv2
// "armv7": ARMv7 + VFPv3-D32 + NEON
@@ -276,7 +283,8 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
"harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
V(harmony_import_assertions, "harmony import assertions") \
V(harmony_rab_gsab, \
- "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer")
+ "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer") \
+ V(harmony_array_find_last, "harmony array find last helpers")
#ifdef V8_INTL_SUPPORT
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
@@ -285,8 +293,7 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#endif
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_class_static_blocks, "harmony static initializer blocks")
+#define HARMONY_STAGED_BASE(V)
#ifdef V8_INTL_SUPPORT
#define HARMONY_STAGED(V) \
@@ -304,12 +311,12 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#define HARMONY_SHIPPING_BASE(V) \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_atomics, "harmony atomics") \
- V(harmony_regexp_match_indices, "harmony regexp match indices") \
V(harmony_private_brand_checks, "harmony private brand checks") \
V(harmony_top_level_await, "harmony top level await") \
V(harmony_relative_indexing_methods, "harmony relative indexing methods") \
V(harmony_error_cause, "harmony error cause property") \
- V(harmony_object_has_own, "harmony Object.hasOwn")
+ V(harmony_object_has_own, "harmony Object.hasOwn") \
+ V(harmony_class_static_blocks, "harmony static initializer blocks")
#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) \
@@ -480,8 +487,6 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_WEAK_IMPLICATION(future, finalize_streaming_on_background)
-DEFINE_WEAK_IMPLICATION(future, super_ic)
DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
@@ -631,6 +636,7 @@ DEFINE_BOOL(
turboprop_as_toptier, false,
"enable experimental turboprop compiler without further tierup to turbofan")
DEFINE_IMPLICATION(turboprop_as_toptier, turboprop)
+DEFINE_WEAK_VALUE_IMPLICATION(turboprop, interrupt_budget, 115 * KB)
DEFINE_UINT_READONLY(max_minimorphic_map_checks, 4,
"max number of map checks to perform in minimorphic state")
DEFINE_INT(turboprop_inline_scaling_factor, 4,
@@ -638,7 +644,7 @@ DEFINE_INT(turboprop_inline_scaling_factor, 4,
"TurboProp compared to TurboFan")
// The scale factor determines the interrupt budget when tiering up from
// Turboprop to TurboFan.
-DEFINE_INT(interrupt_budget_scale_factor_for_top_tier, 5,
+DEFINE_INT(interrupt_budget_scale_factor_for_top_tier, 20,
"scale factor for profiler ticks when tiering up from midtier")
// Flags for Sparkplug
@@ -648,7 +654,8 @@ DEFINE_INT(interrupt_budget_scale_factor_for_top_tier, 5,
#else
#define FLAG FLAG_READONLY
#endif
-DEFINE_BOOL(sparkplug, false, "enable experimental Sparkplug baseline compiler")
+DEFINE_BOOL(sparkplug, ENABLE_SPARKPLUG_BY_DEFAULT,
+ "enable Sparkplug baseline compiler")
DEFINE_BOOL(always_sparkplug, false, "directly tier up to Sparkplug code")
DEFINE_BOOL(sparkplug_on_heap, false, "compile Sparkplug code directly on heap")
#if ENABLE_SPARKPLUG
@@ -698,9 +705,6 @@ DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
-DEFINE_BOOL(
- turbo_concurrent_get_property_access_info, false,
- "concurrently call GetPropertyAccessInfo (only with --concurrent-inlining)")
DEFINE_INT(max_serializer_nesting, 25,
"maximum levels for nesting child serializers")
DEFINE_WEAK_IMPLICATION(future, concurrent_inlining)
@@ -712,6 +716,7 @@ DEFINE_BOOL(trace_heap_broker, false,
"trace the heap broker (reports on missing data only)")
DEFINE_IMPLICATION(trace_heap_broker_verbose, trace_heap_broker)
DEFINE_IMPLICATION(trace_heap_broker_memory, trace_heap_broker)
+DEFINE_IMPLICATION(trace_heap_broker, trace_pending_allocations)
// Flags for stress-testing the compiler.
DEFINE_INT(stress_runs, 0, "number of stress runs")
@@ -858,8 +863,7 @@ DEFINE_BOOL(turbo_compress_translation_arrays, false,
"compress translation arrays (experimental)")
DEFINE_BOOL(turbo_inline_js_wasm_calls, false, "inline JS->Wasm calls")
-DEFINE_BOOL(turbo_optimize_apply, false, "optimize Function.prototype.apply")
-DEFINE_WEAK_IMPLICATION(future, turbo_optimize_apply)
+DEFINE_BOOL(turbo_optimize_apply, true, "optimize Function.prototype.apply")
DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
"enable experimental feedback collection in generic lowering.")
@@ -1123,6 +1127,8 @@ DEFINE_BOOL(
trace_allocations_origins, false,
"Show statistics about the origins of allocations. "
"Combine with --no-inline-new to track allocations from generated code")
+DEFINE_BOOL(trace_pending_allocations, false,
+ "trace calls to Heap::IsAllocationPending that return true")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
@@ -1238,11 +1244,12 @@ DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
+DEFINE_BOOL(flush_baseline_code, false,
+ "flush of baseline code when it has not been executed recently")
DEFINE_BOOL(flush_bytecode, true,
"flush of bytecode when it has not been executed recently")
-DEFINE_BOOL(stress_flush_bytecode, false, "stress bytecode flushing")
+DEFINE_BOOL(stress_flush_code, false, "stress code flushing")
DEFINE_BOOL(trace_flush_bytecode, false, "trace bytecode flushing")
-DEFINE_IMPLICATION(stress_flush_bytecode, flush_bytecode)
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
@@ -1378,12 +1385,17 @@ DEFINE_BOOL(stress_background_compile, false,
DEFINE_BOOL(
finalize_streaming_on_background, true,
"perform the script streaming finalization on the background thread")
+DEFINE_BOOL(concurrent_cache_deserialization, true,
+ "enable deserializing code caches on background")
// TODO(leszeks): Parallel compile tasks currently don't support off-thread
// finalization.
DEFINE_NEG_IMPLICATION(parallel_compile_tasks, finalize_streaming_on_background)
DEFINE_BOOL(disable_old_api_accessors, false,
"Disable old-style API accessors whose setters trigger through the "
"prototype chain")
+DEFINE_BOOL(
+ embedder_instance_types, false,
+ "enable type checks based on instance types provided by the embedder")
// bootstrapper.cc
DEFINE_BOOL(expose_gc, false, "expose gc extension")
@@ -1467,10 +1479,10 @@ DEFINE_BOOL(compilation_cache, true, "enable compilation cache")
DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
-// compiler-dispatcher.cc
+// lazy-compile-dispatcher.cc
DEFINE_BOOL(parallel_compile_tasks, false, "enable parallel compile tasks")
-DEFINE_BOOL(compiler_dispatcher, false, "enable compiler dispatcher")
-DEFINE_IMPLICATION(parallel_compile_tasks, compiler_dispatcher)
+DEFINE_BOOL(lazy_compile_dispatcher, false, "enable compiler dispatcher")
+DEFINE_IMPLICATION(parallel_compile_tasks, lazy_compile_dispatcher)
DEFINE_BOOL(trace_compiler_dispatcher, false,
"trace compiler dispatcher activity")
@@ -1667,7 +1679,6 @@ DEFINE_BOOL(serialization_statistics, false,
"Collect statistics on serialized objects.")
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
-DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code")
#ifdef V8_TARGET_BIG_ENDIAN
#define REGEXP_PEEPHOLE_OPTIMIZATION_BOOL false
@@ -1677,6 +1688,7 @@ DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code")
DEFINE_BOOL(regexp_tier_up, true,
"enable regexp interpreter and tier up to the compiler after the "
"number of executions set by the tier up ticks flag")
+DEFINE_NEG_IMPLICATION(regexp_interpret_all, regexp_tier_up)
DEFINE_INT(regexp_tier_up_ticks, 1,
"set the number of executions for the regexp interpreter before "
"tiering-up to the compiler")
@@ -1771,6 +1783,10 @@ DEFINE_BOOL_READONLY(minor_mc, false,
DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
+DEFINE_BOOL(slow_histograms, false,
+ "Enable slow histograms with more overhead.")
+DEFINE_IMPLICATION(dump_counters, slow_histograms)
+
DEFINE_BOOL(dump_counters_nvp, false,
"Dump counters as name-value pairs on exit")
DEFINE_BOOL(use_external_strings, false, "Use external strings for source code")
@@ -2058,7 +2074,7 @@ DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
// before. Audit them, and remove any unneeded implications.
DEFINE_IMPLICATION(predictable, single_threaded_gc)
DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(predictable, compiler_dispatcher)
+DEFINE_NEG_IMPLICATION(predictable, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(predictable, stress_concurrent_inlining)
DEFINE_BOOL(predictable_gc_schedule, false,
@@ -2076,7 +2092,7 @@ DEFINE_NEG_IMPLICATION(predictable_gc_schedule, memory_reducer)
DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
DEFINE_IMPLICATION(single_threaded, single_threaded_gc)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(single_threaded, compiler_dispatcher)
+DEFINE_NEG_IMPLICATION(single_threaded, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(single_threaded, stress_concurrent_inlining)
//
diff --git a/deps/v8/src/heap/combined-heap.h b/deps/v8/src/heap/combined-heap.h
index 9c9ed9039f..3390ffb95c 100644
--- a/deps/v8/src/heap/combined-heap.h
+++ b/deps/v8/src/heap/combined-heap.h
@@ -33,11 +33,20 @@ class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
HeapObject object) {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return third_party_heap::Heap::IsValidHeapObject(object);
- else
- return ReadOnlyHeap::Contains(object) || heap->Contains(object) ||
- heap->SharedHeapContains(object);
+ }
+ return ReadOnlyHeap::Contains(object) || heap->Contains(object) ||
+ heap->SharedHeapContains(object);
+}
+
+V8_WARN_UNUSED_RESULT inline bool IsValidCodeObject(Heap* heap,
+ HeapObject object) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ return third_party_heap::Heap::IsValidCodeObject(object);
+ }
+ return heap->ContainsCode(object);
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 3decc57882..0dfe024db9 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -86,11 +86,11 @@ class ConcurrentMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
- BytecodeFlushMode bytecode_flush_mode,
+ base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
- mark_compact_epoch, bytecode_flush_mode,
+ mark_compact_epoch, code_flush_mode,
embedder_tracing_enabled, is_forced_gc),
marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
@@ -177,12 +177,21 @@ class ConcurrentMarkingVisitor final
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (ObjectSlot p = start; p < end; ++p) {
- Object object = p.Relaxed_Load();
+ Object object = p.Relaxed_Load(cage_base);
slot_snapshot_->add(p, object);
}
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
+ Object code = slot.Relaxed_Load(code_cage_base);
+ slot_snapshot_->add(slot, code);
+ }
+
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
// This should never happen, because we don't use snapshotting for objects
@@ -359,10 +368,10 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask {
public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
- BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc)
+ base::EnumSet<CodeFlushMode> code_flush_mode, bool is_forced_gc)
: concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch),
- bytecode_flush_mode_(bytecode_flush_mode),
+ code_flush_mode_(code_flush_mode),
is_forced_gc_(is_forced_gc) {}
~JobTask() override = default;
@@ -373,14 +382,14 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
void Run(JobDelegate* delegate) override {
if (delegate->IsJoiningThread()) {
// TRACE_GC is not needed here because the caller opens the right scope.
- concurrent_marking_->Run(delegate, bytecode_flush_mode_,
- mark_compact_epoch_, is_forced_gc_);
+ concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
+ is_forced_gc_);
} else {
TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
GCTracer::Scope::MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
- concurrent_marking_->Run(delegate, bytecode_flush_mode_,
- mark_compact_epoch_, is_forced_gc_);
+ concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
+ is_forced_gc_);
}
}
@@ -391,7 +400,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
private:
ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_;
- BytecodeFlushMode bytecode_flush_mode_;
+ base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_forced_gc_;
};
@@ -412,7 +421,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
}
void ConcurrentMarking::Run(JobDelegate* delegate,
- BytecodeFlushMode bytecode_flush_mode,
+ base::EnumSet<CodeFlushMode> code_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
@@ -421,7 +430,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
- mark_compact_epoch, bytecode_flush_mode,
+ mark_compact_epoch, code_flush_mode,
heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
@@ -434,7 +443,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
isolate->PrintWithTimestamp("Starting concurrent marking task %d\n",
task_id);
}
- bool ephemeron_marked = false;
+ bool another_ephemeron_iteration = false;
{
TimedScope scope(&time_ms);
@@ -444,7 +453,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
- ephemeron_marked = true;
+ another_ephemeron_iteration = true;
}
}
}
@@ -497,6 +506,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
current_marked_bytes += visited_size;
}
}
+ if (objects_processed > 0) another_ephemeron_iteration = true;
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
@@ -512,7 +522,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
- ephemeron_marked = true;
+ another_ephemeron_iteration = true;
}
}
}
@@ -527,13 +537,14 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
weak_objects_->js_weak_refs.FlushToGlobal(task_id);
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
- weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
+ weak_objects_->code_flushing_candidates.FlushToGlobal(task_id);
+ weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
- if (ephemeron_marked) {
- set_ephemeron_marked(true);
+ if (another_ephemeron_iteration) {
+ set_another_ephemeron_iteration(true);
}
}
if (FLAG_trace_concurrent_marking) {
@@ -563,7 +574,7 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
job_handle_ = V8::GetCurrentPlatform()->PostJob(
priority, std::make_unique<JobTask>(
this, heap_->mark_compact_collector()->epoch(),
- heap_->mark_compact_collector()->bytecode_flush_mode(),
+ heap_->mark_compact_collector()->code_flush_mode(),
heap_->is_current_gc_forced()));
DCHECK(job_handle_->IsValid());
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index c685f5cca6..87d39ccdeb 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -91,10 +91,12 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
size_t TotalMarkedBytes();
- void set_ephemeron_marked(bool ephemeron_marked) {
- ephemeron_marked_.store(ephemeron_marked);
+ void set_another_ephemeron_iteration(bool another_ephemeron_iteration) {
+ another_ephemeron_iteration_.store(another_ephemeron_iteration);
+ }
+ bool another_ephemeron_iteration() {
+ return another_ephemeron_iteration_.load();
}
- bool ephemeron_marked() { return ephemeron_marked_.load(); }
private:
struct TaskState {
@@ -105,7 +107,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
char cache_line_padding[64];
};
class JobTask;
- void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode,
+ void Run(JobDelegate* delegate, base::EnumSet<CodeFlushMode> code_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count);
@@ -115,7 +117,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
- std::atomic<bool> ephemeron_marked_{false};
+ std::atomic<bool> another_ephemeron_iteration_{false};
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index c91dac2697..8c5813867f 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -401,8 +401,7 @@ bool ShouldReduceMemory(CppHeap::TraceFlags flags) {
} // namespace
void CppHeap::TracePrologue(TraceFlags flags) {
- // Finish sweeping in case it is still running.
- sweeper_.FinishIfRunning();
+ CHECK(!sweeper_.IsSweepingInProgress());
current_flags_ = flags;
const UnifiedHeapMarker::MarkingConfig marking_config{
@@ -481,6 +480,9 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
stats_collector_->marked_bytes(),
stats_collector_->marking_time().InMillisecondsF());
}
+ // The allocated bytes counter in v8 was reset to the current marked bytes, so
+ // any pending allocated bytes updates should be discarded.
+ buffered_allocated_bytes_ = 0;
ExecutePreFinalizers();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
@@ -685,5 +687,7 @@ CppHeap::MetricRecorderAdapter* CppHeap::GetMetricRecorder() const {
stats_collector_->GetMetricRecorder());
}
+void CppHeap::FinishSweepingIfRunning() { sweeper_.FinishIfRunning(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 84632b552d..8e4c047d1c 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -102,6 +102,8 @@ class V8_EXPORT_PRIVATE CppHeap final
std::vector<cppgc::CustomSpaceIndex>,
std::unique_ptr<CustomSpaceStatisticsReceiver>);
+ void FinishSweepingIfRunning();
+
// v8::EmbedderHeapTracer interface.
void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) final;
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index 68aa763f3c..dc55753ff6 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -45,6 +45,15 @@ class EmbedderNode : public v8::EmbedderGraph::Node {
size_t SizeInBytes() final { return name_.name_was_hidden ? 0 : size_; }
void SetWrapperNode(v8::EmbedderGraph::Node* wrapper_node) {
+ // An embedder node may only be merged with a single wrapper node, as
+ // consumers of the graph may merge a node and its wrapper node.
+ //
+ // TODO(chromium:1218404): Add a DCHECK() to avoid overriding an already
+ // set `wrapper_node_`. This can currently happen with global proxies that
+ // are rewired (and still kept alive) after reloading a page, see
+ // `AddEdge`. We accept overriding the wrapper node in such cases,
+ // leading to a random merged node and separated nodes for all other
+ // proxies.
wrapper_node_ = wrapper_node;
}
Node* WrapperNode() final { return wrapper_node_; }
@@ -119,6 +128,7 @@ class StateBase {
void set_node(EmbedderNode* node) {
CHECK_EQ(Visibility::kVisible, GetVisibility());
+ DCHECK_NULL(node_);
node_ = node;
}
@@ -452,16 +462,21 @@ class CppGraphBuilderImpl final {
reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
v8_value);
if (back_reference_object) {
+ auto& back_header = HeapObjectHeader::FromObject(back_reference_object);
+ auto& back_state = states_.GetExistingState(back_header);
+
// Generally the back reference will point to `parent.header()`. In the
// case of global proxy set up the backreference will point to a
- // different object. Merge the nodes nevertheless as Window objects need
- // to be able to query their detachedness state.
+ // different object, which may not have a node at t his point. Merge the
+ // nodes nevertheless as Window objects need to be able to query their
+ // detachedness state.
//
// TODO(chromium:1218404): See bug description on how to fix this
// inconsistency and only merge states when the backref points back
// to the same object.
- auto& back_state = states_.GetExistingState(
- HeapObjectHeader::FromObject(back_reference_object));
+ if (!back_state.get_node()) {
+ back_state.set_node(AddNode(back_header));
+ }
back_state.get_node()->SetWrapperNode(v8_node);
auto* profiler =
@@ -497,6 +512,12 @@ class CppGraphBuilderImpl final {
class VisitationItem;
class VisitationDoneItem;
+ struct MergedNodeItem {
+ EmbedderGraph::Node* node_;
+ v8::Local<v8::Value> value_;
+ uint16_t wrapper_class_id_;
+ };
+
CppHeap& cpp_heap_;
v8::EmbedderGraph& graph_;
StateStorage states_;
@@ -733,7 +754,7 @@ void CppGraphBuilderImpl::VisitForVisibility(State* parent,
} else {
// No need to mark/unmark pending as the node is immediately processed.
current.MarkVisible();
- // In case the names are visible, the graph is no traversed in this phase.
+ // In case the names are visible, the graph is not traversed in this phase.
// Explicitly trace one level to handle weak containers.
WeakVisitor weak_visitor(*this);
header.Trace(&weak_visitor);
diff --git a/deps/v8/src/heap/cppgc/caged-heap.cc b/deps/v8/src/heap/cppgc/caged-heap.cc
index 3a8304f448..c43ea6e3a5 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap.cc
@@ -11,8 +11,10 @@
#include "src/heap/cppgc/caged-heap.h"
#include "include/cppgc/internal/caged-heap-local-data.h"
+#include "include/cppgc/platform.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index b014bd6134..17e64e6fbe 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -229,8 +229,10 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
- if (HeapObjectHeader::FromObject(desc.base_object_payload)
- .IsMarked<AccessMode::kAtomic>())
+ const HeapObjectHeader& header =
+ HeapObjectHeader::FromObject(desc.base_object_payload);
+ if (!header.IsInConstruction<AccessMode::kAtomic>() &&
+ header.IsMarked<AccessMode::kAtomic>())
return;
RegisterWeakCallback(weak_callback, parameter);
}
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index ff3c17d37f..8a3d6cd97c 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include <numeric>
+#include "include/cppgc/cross-thread-persistent.h"
#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/process-heap.h"
@@ -15,23 +16,32 @@ namespace internal {
PersistentRegion::~PersistentRegion() { ClearAllUsedNodes(); }
+template <typename PersistentBaseClass>
void PersistentRegion::ClearAllUsedNodes() {
for (auto& slots : nodes_) {
for (auto& node : *slots) {
- if (node.IsUsed()) {
- static_cast<PersistentBase*>(node.owner())->ClearFromGC();
- // Add nodes back to the free list to allow reusing for subsequent
- // creation calls.
- node.InitializeAsFreeNode(free_list_head_);
- free_list_head_ = &node;
- CPPGC_DCHECK(nodes_in_use_ > 0);
- nodes_in_use_--;
- }
+ if (!node.IsUsed()) continue;
+
+ static_cast<PersistentBaseClass*>(node.owner())->ClearFromGC();
+
+ // Add nodes back to the free list to allow reusing for subsequent
+ // creation calls.
+ node.InitializeAsFreeNode(free_list_head_);
+ free_list_head_ = &node;
+ CPPGC_DCHECK(nodes_in_use_ > 0);
+ nodes_in_use_--;
}
}
CPPGC_DCHECK(0u == nodes_in_use_);
}
+template void PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
+template void PersistentRegion::ClearAllUsedNodes<PersistentBase>();
+
+void PersistentRegion::ClearAllUsedNodes() {
+ ClearAllUsedNodes<PersistentBase>();
+}
+
size_t PersistentRegion::NodesInUse() const {
#ifdef DEBUG
const size_t accumulated_nodes_in_use_ = std::accumulate(
@@ -97,23 +107,24 @@ void PersistentRegionLock::AssertLocked() {
CrossThreadPersistentRegion::~CrossThreadPersistentRegion() {
PersistentRegionLock guard;
- persistent_region_.ClearAllUsedNodes();
- persistent_region_.nodes_.clear();
+ PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
+ nodes_.clear();
+ // PersistentRegion destructor will be a noop.
}
void CrossThreadPersistentRegion::Trace(Visitor* visitor) {
PersistentRegionLock::AssertLocked();
- return persistent_region_.Trace(visitor);
+ PersistentRegion::Trace(visitor);
}
size_t CrossThreadPersistentRegion::NodesInUse() const {
// This method does not require a lock.
- return persistent_region_.NodesInUse();
+ return PersistentRegion::NodesInUse();
}
void CrossThreadPersistentRegion::ClearAllUsedNodes() {
PersistentRegionLock::AssertLocked();
- return persistent_region_.ClearAllUsedNodes();
+ PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/process-heap.h b/deps/v8/src/heap/cppgc/process-heap.h
index c581bad29c..e65342a8ac 100644
--- a/deps/v8/src/heap/cppgc/process-heap.h
+++ b/deps/v8/src/heap/cppgc/process-heap.h
@@ -32,6 +32,7 @@ class V8_EXPORT_PRIVATE HeapRegistry final {
static HeapBase* TryFromManagedPointer(const void* needle);
+ // Does not take the registry mutex and is thus only useful for testing.
static const Storage& GetRegisteredHeapsForTesting();
private:
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 4aa884fcfd..482bab1595 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -575,15 +575,17 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
page.space().AddPage(&page);
return true;
}
- if (!header->IsFinalizable()) {
- LargePage::Destroy(&page);
- return true;
+ std::vector<HeapObjectHeader*> unfinalized_objects;
+ if (header->IsFinalizable()) {
+ unfinalized_objects.push_back(page.ObjectHeader());
}
const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index];
+ // Avoid directly destroying large pages here as counter updates and
+ // backend access in BasePage::Destroy() are not concurrency safe.
state.swept_unfinalized_pages.Push(
- {&page, {page.ObjectHeader()}, {}, {}, true});
+ {&page, std::move(unfinalized_objects), {}, {}, true});
return true;
}
@@ -597,8 +599,10 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
// This visitor:
// - clears free lists for all spaces;
// - moves all Heap pages to local Sweeper's state (SpaceStates).
+// - ASAN: Poisons all unmarked object payloads.
class PrepareForSweepVisitor final
- : public HeapVisitor<PrepareForSweepVisitor> {
+ : protected HeapVisitor<PrepareForSweepVisitor> {
+ friend class HeapVisitor<PrepareForSweepVisitor>;
using CompactableSpaceHandling =
Sweeper::SweepingConfig::CompactableSpaceHandling;
@@ -608,6 +612,9 @@ class PrepareForSweepVisitor final
: states_(states),
compactable_space_handling_(compactable_space_handling) {}
+ void Run(RawHeap& raw_heap) { Traverse(raw_heap); }
+
+ protected:
bool VisitNormalPageSpace(NormalPageSpace& space) {
if ((compactable_space_handling_ == CompactableSpaceHandling::kIgnore) &&
space.is_compactable())
@@ -677,7 +684,7 @@ class Sweeper::SweeperImpl final {
}
PrepareForSweepVisitor(&space_states_, config.compactable_space_handling)
- .Traverse(heap_);
+ .Run(heap_);
if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index d6c5c997c3..0c80e81f51 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -297,6 +297,12 @@ void Factory::CodeBuilder::FinalizeOnHeapCode(Handle<Code> code,
code->CopyRelocInfoToByteArray(reloc_info, code_desc_);
+ if (code_desc_.origin->OnHeapGCCount() != heap->gc_count()) {
+ // If a GC happens between Code object allocation and now, we might have
+ // invalid embedded object references.
+ code_desc_.origin->FixOnHeapReferences();
+ }
+
#ifdef VERIFY_HEAP
code->VerifyRelocInfo(isolate_, reloc_info);
#endif
@@ -1466,9 +1472,9 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
}
#if V8_ENABLE_WEBASSEMBLY
-Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
- Handle<Map> opt_parent,
- int instance_size_bytes) {
+Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
+ Address type_address, Handle<Map> opt_parent, int instance_size_bytes,
+ Handle<WasmInstanceObject> instance) {
// We pretenure WasmTypeInfo objects because they are refererenced by Maps,
// which are assumed to be long-lived. The supertypes list is constant
// after initialization, so we pretenure that too.
@@ -1493,6 +1499,7 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
result.set_supertypes(*supertypes, SKIP_WRITE_BARRIER);
result.set_subtypes(*subtypes);
result.set_instance_size(instance_size_bytes);
+ result.set_instance(*instance);
return handle(result, isolate());
}
@@ -1805,8 +1812,8 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
bool is_clonable_js_type =
instance_type == JS_REG_EXP_TYPE || instance_type == JS_OBJECT_TYPE ||
instance_type == JS_ERROR_TYPE || instance_type == JS_ARRAY_TYPE ||
- instance_type == JS_API_OBJECT_TYPE ||
- instance_type == JS_SPECIAL_API_OBJECT_TYPE;
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE ||
+ InstanceTypeChecker::IsJSApiObject(instance_type);
bool is_clonable_wasm_type = false;
#if V8_ENABLE_WEBASSEMBLY
is_clonable_wasm_type = instance_type == WASM_GLOBAL_OBJECT_TYPE ||
@@ -2121,6 +2128,7 @@ DEFINE_ERROR(TypeError, type_error)
DEFINE_ERROR(WasmCompileError, wasm_compile_error)
DEFINE_ERROR(WasmLinkError, wasm_link_error)
DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
+DEFINE_ERROR(WasmExceptionError, wasm_exception_error)
#undef DEFINE_ERROR
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
@@ -2647,7 +2655,7 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module.set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
module.set_module_namespace(roots.undefined_value(), SKIP_WRITE_BARRIER);
module.set_requested_modules(*requested_modules);
- module.set_status(Module::kUninstantiated);
+ module.set_status(Module::kUnlinked);
module.set_exception(roots.the_hole_value(), SKIP_WRITE_BARRIER);
module.set_top_level_capability(roots.undefined_value(), SKIP_WRITE_BARRIER);
module.set_import_meta(roots.the_hole_value(), kReleaseStore,
@@ -2678,7 +2686,7 @@ Handle<SyntheticModule> Factory::NewSyntheticModule(
DisallowGarbageCollection no_gc;
module.set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
module.set_module_namespace(roots.undefined_value(), SKIP_WRITE_BARRIER);
- module.set_status(Module::kUninstantiated);
+ module.set_status(Module::kUnlinked);
module.set_exception(roots.the_hole_value(), SKIP_WRITE_BARRIER);
module.set_top_level_capability(roots.undefined_value(), SKIP_WRITE_BARRIER);
module.set_name(*module_name);
@@ -2720,19 +2728,10 @@ MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer(
std::shared_ptr<BackingStore> backing_store) {
- Handle<Map> map;
- if (backing_store->is_resizable()) {
- DCHECK(FLAG_harmony_rab_gsab);
- map = Handle<Map>(isolate()
- ->native_context()
- ->growable_shared_array_buffer_fun()
- .initial_map(),
- isolate());
- } else {
- map = Handle<Map>(
- isolate()->native_context()->shared_array_buffer_fun().initial_map(),
- isolate());
- }
+ DCHECK_IMPLIES(backing_store->is_resizable(), FLAG_harmony_rab_gsab);
+ Handle<Map> map(
+ isolate()->native_context()->shared_array_buffer_fun().initial_map(),
+ isolate());
auto result = Handle<JSArrayBuffer>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
ResizableFlag resizable = backing_store->is_resizable()
@@ -3323,41 +3322,27 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
int number_of_properties) {
- if (number_of_properties == 0) {
- // Reuse the initial map of the Object function if the literal has no
- // predeclared properties.
- return handle(context->object_function().initial_map(), isolate());
- }
-
// Use initial slow object proto map for too many properties.
- const int kMapCacheSize = 128;
- if (number_of_properties > kMapCacheSize) {
+ if (number_of_properties >= JSObject::kMapCacheSize) {
return handle(context->slow_object_with_object_prototype_map(), isolate());
}
- int cache_index = number_of_properties - 1;
- Handle<Object> maybe_cache(context->map_cache(), isolate());
- if (maybe_cache->IsUndefined(isolate())) {
- // Allocate the new map cache for the native context.
- maybe_cache = NewWeakFixedArray(kMapCacheSize, AllocationType::kOld);
- context->set_map_cache(*maybe_cache);
- } else {
- // Check to see whether there is a matching element in the cache.
- Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
- MaybeObject result = cache->Get(cache_index);
- HeapObject heap_object;
- if (result->GetHeapObjectIfWeak(&heap_object)) {
- Map map = Map::cast(heap_object);
- DCHECK(!map.is_dictionary_map());
- return handle(map, isolate());
- }
+ Handle<WeakFixedArray> cache(WeakFixedArray::cast(context->map_cache()),
+ isolate());
+
+ // Check to see whether there is a matching element in the cache.
+ MaybeObject result = cache->Get(number_of_properties);
+ HeapObject heap_object;
+ if (result->GetHeapObjectIfWeak(&heap_object)) {
+ Map map = Map::cast(heap_object);
+ DCHECK(!map.is_dictionary_map());
+ return handle(map, isolate());
}
// Create a new map and add it to the cache.
- Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
Handle<Map> map = Map::Create(isolate(), number_of_properties);
DCHECK(!map->is_dictionary_map());
- cache->Set(cache_index, HeapObjectReference::Weak(*map));
+ cache->Set(number_of_properties, HeapObjectReference::Weak(*map));
return map;
}
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 366f441b05..1acf9a65c2 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -561,7 +561,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> NewWasmTypeInfo(Address type_address,
Handle<Map> opt_parent,
- int instance_size_bytes);
+ int instance_size_bytes,
+ Handle<WasmInstanceObject> instance);
Handle<WasmCapiFunctionData> NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
Handle<Code> wrapper_code,
@@ -692,6 +693,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
DECLARE_ERROR(WasmCompileError)
DECLARE_ERROR(WasmLinkError)
DECLARE_ERROR(WasmRuntimeError)
+ DECLARE_ERROR(WasmExceptionError)
#undef DECLARE_ERROR
Handle<String> NumberToString(Handle<Object> number,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 8e670c2b74..a780ac01b0 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -69,9 +69,8 @@ CollectionEpoch GCTracer::CurrentEpoch(Scope::ScopeId scope_id) {
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
: tracer_(tracer), scope_(scope), thread_kind_(thread_kind) {
start_time_ = tracer_->MonotonicallyIncreasingTimeInMs();
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
-
#ifdef V8_RUNTIME_CALL_STATS
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
if (thread_kind_ == ThreadKind::kMain) {
DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
runtime_stats_ =
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index d0ab41e1c2..7c8a2f54d6 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -45,6 +45,7 @@
#include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/strings/string-hasher.h"
+#include "src/utils/ostreams.h"
#include "src/zone/zone-list-inl.h"
namespace v8 {
@@ -84,16 +85,29 @@ Address AllocationResult::ToAddress() {
}
// static
-BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) {
+base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
if (isolate->disable_bytecode_flushing()) {
- return BytecodeFlushMode::kDoNotFlushBytecode;
+ return base::EnumSet<CodeFlushMode>();
}
- if (FLAG_stress_flush_bytecode) {
- return BytecodeFlushMode::kStressFlushBytecode;
- } else if (FLAG_flush_bytecode) {
- return BytecodeFlushMode::kFlushBytecode;
+
+ base::EnumSet<CodeFlushMode> code_flush_mode;
+ if (FLAG_flush_bytecode) {
+ code_flush_mode.Add(CodeFlushMode::kFlushBytecode);
+ }
+
+ if (FLAG_flush_baseline_code) {
+ code_flush_mode.Add(CodeFlushMode::kFlushBaselineCode);
+ }
+
+ if (FLAG_stress_flush_code) {
+ // This is to check tests accidentally don't miss out on adding either flush
+ // bytecode or flush code along with stress flush code. stress_flush_code
+ // doesn't do anything if either one of them isn't enabled.
+ DCHECK(FLAG_fuzzing || FLAG_flush_baseline_code || FLAG_flush_bytecode);
+ code_flush_mode.Add(CodeFlushMode::kStressFlushCode);
}
- return BytecodeFlushMode::kDoNotFlushBytecode;
+
+ return code_flush_mode;
}
Isolate* Heap::isolate() {
@@ -335,14 +349,11 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
UNREACHABLE();
}
-Address Heap::DeserializerAllocate(AllocationType type, int size_in_bytes) {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- AllocationResult allocation = tp_heap_->Allocate(
- size_in_bytes, type, AllocationAlignment::kDoubleAligned);
- return allocation.ToObjectChecked().ptr();
- } else {
- UNIMPLEMENTED(); // unimplemented
- }
+Address Heap::AllocateRawOrFail(int size, AllocationType allocation,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ return AllocateRawWith<kRetryOrFail>(size, allocation, origin, alignment)
+ .address();
}
void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
@@ -492,13 +503,6 @@ bool Heap::InOldSpace(Object object) {
return old_space_->Contains(object);
}
-bool Heap::InCodeSpace(HeapObject object) {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- return third_party_heap::Heap::InCodeSpace(object.ptr());
- }
- return code_space_->Contains(object) || code_lo_space_->Contains(object);
-}
-
// static
Heap* Heap::FromWritableHeapObject(HeapObject obj) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
@@ -613,7 +617,7 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
(*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
}
-bool Heap::IsPendingAllocation(HeapObject object) {
+bool Heap::IsPendingAllocationInternal(HeapObject object) {
DCHECK(deserialization_complete());
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
@@ -665,6 +669,15 @@ bool Heap::IsPendingAllocation(HeapObject object) {
UNREACHABLE();
}
+bool Heap::IsPendingAllocation(HeapObject object) {
+ bool result = IsPendingAllocationInternal(object);
+ if (FLAG_trace_pending_allocations && result) {
+ StdoutStream{} << "Pending allocation: " << std::hex << "0x" << object.ptr()
+ << "\n";
+ }
+ return result;
+}
+
bool Heap::IsPendingAllocation(Object object) {
return object.IsHeapObject() && IsPendingAllocation(HeapObject::cast(object));
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index bb5e91a829..982b80bb89 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -1909,6 +1909,9 @@ void Heap::StartIncrementalMarking(int gc_flags,
// Sweeping needs to be completed such that markbits are all cleared before
// starting marking again.
CompleteSweepingFull();
+ if (cpp_heap()) {
+ CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
+ }
SafepointScope safepoint(this);
@@ -1941,16 +1944,28 @@ void Heap::CompleteSweepingFull() {
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags, const GCCallbackFlags gc_callback_flags) {
if (incremental_marking()->IsStopped()) {
- IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
- if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
- incremental_marking()->incremental_marking_job()->ScheduleTask(this);
- } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
- StartIncrementalMarking(
- gc_flags,
- OldGenerationSpaceAvailable() <= NewSpaceCapacity()
- ? GarbageCollectionReason::kAllocationLimit
- : GarbageCollectionReason::kGlobalAllocationLimit,
- gc_callback_flags);
+ switch (IncrementalMarkingLimitReached()) {
+ case IncrementalMarkingLimit::kHardLimit:
+ StartIncrementalMarking(
+ gc_flags,
+ OldGenerationSpaceAvailable() <= NewSpaceCapacity()
+ ? GarbageCollectionReason::kAllocationLimit
+ : GarbageCollectionReason::kGlobalAllocationLimit,
+ gc_callback_flags);
+ break;
+ case IncrementalMarkingLimit::kSoftLimit:
+ incremental_marking()->incremental_marking_job()->ScheduleTask(this);
+ break;
+ case IncrementalMarkingLimit::kFallbackForEmbedderLimit:
+ // This is a fallback case where no appropriate limits have been
+ // configured yet.
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kPossibleGarbage;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer()->NotifyPossibleGarbage(event);
+ break;
+ case IncrementalMarkingLimit::kNoLimit:
+ break;
}
}
}
@@ -2162,6 +2177,9 @@ size_t Heap::PerformGarbageCollection(
} else {
DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
CompleteSweepingFull();
+ if (cpp_heap()) {
+ CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
+ }
}
// The last GC cycle is done after completing sweeping. Start the next GC
@@ -3461,12 +3479,8 @@ void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
void Heap::UndoLastAllocationAt(Address addr, int size) {
DCHECK_LE(0, size);
if (size == 0) return;
- if (code_space_->Contains(addr)) {
- Address* top = code_space_->allocation_top_address();
- if (addr + size == *top && code_space_->original_top() <= addr) {
- *top = addr;
- return;
- }
+ if (code_space_->TryFreeLast(addr, size)) {
+ return;
}
CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
}
@@ -3759,7 +3773,7 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
DevToolsTraceEventScope devtools_trace_event_scope(
this, "MajorGC", "incremental finalization step");
- HistogramTimerScope incremental_marking_scope(
+ NestedTimedHistogramScope incremental_marking_scope(
isolate()->counters()->gc_incremental_marking_finalize());
TRACE_EVENT1("v8", "V8.GCIncrementalMarkingFinalize", "epoch", epoch_full());
TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
@@ -3832,6 +3846,13 @@ class SlotCollectingVisitor final : public ObjectVisitor {
}
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+#if V8_EXTERNAL_CODE_SPACE
+ code_slots_.push_back(slot);
+#endif
+ }
+
void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
@@ -3843,9 +3864,16 @@ class SlotCollectingVisitor final : public ObjectVisitor {
int number_of_slots() { return static_cast<int>(slots_.size()); }
MaybeObjectSlot slot(int i) { return slots_[i]; }
+#if V8_EXTERNAL_CODE_SPACE
+ ObjectSlot code_slot(int i) { return code_slots_[i]; }
+ int number_of_code_slots() { return static_cast<int>(code_slots_.size()); }
+#endif
private:
std::vector<MaybeObjectSlot> slots_;
+#if V8_EXTERNAL_CODE_SPACE
+ std::vector<ObjectSlot> code_slots_;
+#endif
};
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
@@ -3882,6 +3910,13 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
}
+#if V8_EXTERNAL_CODE_SPACE
+ DCHECK_EQ(new_visitor.number_of_code_slots(),
+ old_visitor.number_of_code_slots());
+ for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
+ DCHECK_EQ(new_visitor.code_slot(i), old_visitor.code_slot(i));
+ }
+#endif // V8_EXTERNAL_CODE_SPACE
} else {
DCHECK_EQ(pending_layout_change_object_, object);
pending_layout_change_object_ = HeapObject();
@@ -3972,7 +4007,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
double deadline_in_ms =
deadline_in_seconds *
static_cast<double>(base::Time::kMillisecondsPerSecond);
- HistogramTimerScope idle_notification_scope(
+ NestedTimedHistogramScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
TRACE_EVENT0("v8", "V8.GCIdleNotification");
double start_ms = MonotonicallyIncreasingTimeInMs();
@@ -4282,6 +4317,18 @@ bool Heap::Contains(HeapObject value) const {
(new_lo_space_ && new_lo_space_->Contains(value)));
}
+bool Heap::ContainsCode(HeapObject value) const {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ return true;
+ }
+ // TODO(v8:11880): support external code space.
+ if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
+ return false;
+ }
+ return HasBeenSetUp() &&
+ (code_space_->Contains(value) || code_lo_space_->Contains(value));
+}
+
bool Heap::SharedHeapContains(HeapObject value) const {
if (shared_old_space_)
return shared_old_space_->Contains(value) ||
@@ -4437,6 +4484,17 @@ class SlotVerifyingVisitor : public ObjectVisitor {
}
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(slot.address());
+ if (ShouldHaveBeenRecorded(
+ host, MaybeObject::FromObject(slot.load(code_cage_base)))) {
+ CHECK_GT(untyped_->count(slot.address()), 0);
+ }
+ }
+
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
@@ -5108,12 +5166,14 @@ size_t Heap::OldGenerationSizeOfObjects() {
return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
}
+size_t Heap::EmbedderSizeOfObjects() const {
+ return local_embedder_heap_tracer()
+ ? local_embedder_heap_tracer()->used_size()
+ : 0;
+}
+
size_t Heap::GlobalSizeOfObjects() {
- const size_t on_heap_size = OldGenerationSizeOfObjects();
- const size_t embedder_size = local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->used_size()
- : 0;
- return on_heap_size + embedder_size;
+ return OldGenerationSizeOfObjects() + EmbedderSizeOfObjects();
}
uint64_t Heap::AllocatedExternalMemorySinceMarkCompact() {
@@ -5255,11 +5315,13 @@ double Heap::PercentToGlobalMemoryLimit() {
return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
}
-// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
-// The kNoLimit means that either incremental marking is disabled or it is too
+// - kNoLimit means that either incremental marking is disabled or it is too
// early to start incremental marking.
-// The kSoftLimit means that incremental marking should be started soon.
-// The kHardLimit means that incremental marking should be started immediately.
+// - kSoftLimit means that incremental marking should be started soon.
+// - kHardLimit means that incremental marking should be started immediately.
+// - kFallbackForEmbedderLimit means that incremental marking should be
+// started as soon as the embedder does not allocate with high throughput
+// anymore.
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
@@ -5324,6 +5386,15 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available > NewSpaceCapacity() &&
(!global_memory_available ||
global_memory_available > NewSpaceCapacity())) {
+ if (local_embedder_heap_tracer()->InUse() &&
+ !old_generation_size_configured_ && gc_count_ == 0) {
+ // At this point the embedder memory is above the activation
+ // threshold. No GC happened so far and it's thus unlikely to get a
+ // configured heap any time soon. Start a memory reducer in this case
+ // which will wait until the allocation rate is low to trigger garbage
+ // collection.
+ return IncrementalMarkingLimit::kFallbackForEmbedderLimit;
+ }
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -5795,6 +5866,12 @@ void Heap::RegisterExternallyReferencedObject(Address* location) {
}
void Heap::StartTearDown() {
+ // Finish any ongoing sweeping to avoid stray background tasks still accessing
+ // the heap during teardown.
+ CompleteSweepingFull();
+
+ memory_allocator()->unmapper()->EnsureUnmappingCompleted();
+
SetGCState(TEAR_DOWN);
// Background threads may allocate and block until GC is performed. However
@@ -6307,6 +6384,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkPointers(start, end);
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
+ HeapObject code = HeapObject::unchecked_cast(slot.load(code_cage_base));
+ MarkHeapObject(code);
+ }
+
void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
MarkHeapObject(target);
@@ -6779,6 +6864,20 @@ void VerifyPointersVisitor::VisitPointers(HeapObject host,
VerifyPointers(host, start, end);
}
+void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
+ CodeObjectSlot slot) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
+ Object maybe_code = slot.load(code_cage_base);
+ HeapObject code;
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyCodeObjectImpl(code);
+ } else {
+ CHECK(maybe_code.IsSmi());
+ }
+}
+
void VerifyPointersVisitor::VisitRootPointers(Root root,
const char* description,
FullObjectSlot start,
@@ -6798,6 +6897,14 @@ void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
CHECK(heap_object.map().IsMap());
}
+void VerifyPointersVisitor::VerifyCodeObjectImpl(HeapObject heap_object) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ CHECK(IsValidCodeObject(heap_, heap_object));
+ PtrComprCageBase cage_base(heap_->isolate());
+ CHECK(heap_object.map(cage_base).IsMap(cage_base));
+ CHECK(heap_object.map(cage_base).instance_type() == CODE_TYPE);
+}
+
template <typename TSlot>
void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
Isolate* isolate = heap_->isolate();
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index d80087824c..61dea819f0 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -465,7 +465,7 @@ class Heap {
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
- static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate);
+ static inline base::EnumSet<CodeFlushMode> GetCodeFlushMode(Isolate* isolate);
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
@@ -567,11 +567,6 @@ class Heap {
V8_EXPORT_PRIVATE static bool IsLargeObject(HeapObject object);
- // This method supports the deserialization allocator. All allocations
- // are word-aligned. The method should never fail to allocate since the
- // total space requirements of the deserializer are known at build time.
- inline Address DeserializerAllocate(AllocationType type, int size_in_bytes);
-
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
V8_EXPORT_PRIVATE FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj,
@@ -1270,13 +1265,13 @@ class Heap {
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
- // Returns whether the object resides in any of the code spaces.
- inline bool InCodeSpace(HeapObject object);
-
// Checks whether an address/object is in the non-read-only heap (including
// auxiliary area and unused area). Use IsValidHeapObject if checking both
// heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value) const;
+ // Same as above, but checks whether the object resides in any of the code
+ // spaces.
+ V8_EXPORT_PRIVATE bool ContainsCode(HeapObject value) const;
// Checks whether an address/object is in the non-read-only heap (including
// auxiliary area and unused area). Use IsValidHeapObject if checking both
@@ -1471,6 +1466,10 @@ class Heap {
// Excludes external memory held by those objects.
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
+ // Returns the size of objects held by the EmbedderHeapTracer.
+ V8_EXPORT_PRIVATE size_t EmbedderSizeOfObjects() const;
+
+ // Returns the global size of objects (embedder + V8 non-new spaces).
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
// We allow incremental marking to overshoot the V8 and global allocation
@@ -2052,7 +2051,12 @@ class Heap {
double PercentToOldGenerationLimit();
double PercentToGlobalMemoryLimit();
- enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
+ enum class IncrementalMarkingLimit {
+ kNoLimit,
+ kSoftLimit,
+ kHardLimit,
+ kFallbackForEmbedderLimit
+ };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
bool ShouldStressCompaction() const;
@@ -2105,6 +2109,12 @@ class Heap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
+ // Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
+ V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
+ int size, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kWordAligned);
+
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
// is triggered and the allocation is retried. This is performed multiple
@@ -2139,6 +2149,9 @@ class Heap {
force_gc_on_next_allocation_ = true;
}
+ // Helper for IsPendingAllocation.
+ inline bool IsPendingAllocationInternal(HeapObject object);
+
// ===========================================================================
// Retaining path tracing ====================================================
// ===========================================================================
@@ -2527,6 +2540,7 @@ class Heap {
// The allocator interface.
friend class Factory;
+ template <typename IsolateT>
friend class Deserializer;
// The Isolate constructs us.
@@ -2580,8 +2594,6 @@ class V8_NODISCARD AlwaysAllocateScope {
private:
friend class AlwaysAllocateScopeForTesting;
- friend class Deserializer;
- friend class DeserializerAllocator;
friend class Evacuator;
friend class Heap;
friend class Isolate;
@@ -2650,6 +2662,7 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
ObjectSlot end) override;
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override;
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override;
void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
@@ -2661,6 +2674,7 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
protected:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
+ V8_INLINE void VerifyCodeObjectImpl(HeapObject heap_object);
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 29b2a84d68..af1e3c6308 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -159,7 +159,7 @@ bool IncrementalMarking::CanBeActivated() {
bool IncrementalMarking::IsBelowActivationThresholds() const {
return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
- heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
+ heap_->EmbedderSizeOfObjects() <= kEmbedderActivationThreshold;
}
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
@@ -194,7 +194,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
counters->incremental_marking_reason()->AddSample(
static_cast<int>(gc_reason));
- HistogramTimerScope incremental_marking_scope(
+ NestedTimedHistogramScope incremental_marking_scope(
counters->gc_incremental_marking_start());
TRACE_EVENT1("v8", "V8.GCIncrementalMarkingStart", "epoch",
heap_->epoch_full());
@@ -784,7 +784,7 @@ StepResult CombineStepResults(StepResult a, StepResult b) {
StepResult IncrementalMarking::AdvanceWithDeadline(
double deadline_in_ms, CompletionAction completion_action,
StepOrigin step_origin) {
- HistogramTimerScope incremental_marking_scope(
+ NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch", heap_->epoch_full());
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
@@ -889,7 +889,7 @@ void IncrementalMarking::AdvanceOnAllocation() {
state_ != MARKING || heap_->always_allocate()) {
return;
}
- HistogramTimerScope incremental_marking_scope(
+ NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
@@ -944,7 +944,8 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
// This ignores that case where the embedder finds new V8-side objects. The
// assumption is that large graphs are well connected and can mostly be
// processed on their own. For small graphs, helping is not necessary.
- v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
+ std::tie(v8_bytes_processed, std::ignore) =
+ collector_->ProcessMarkingWorklist(bytes_to_process);
StepResult v8_result = local_marking_worklists()->IsEmpty()
? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining;
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 6b87101882..6bd5ac5be2 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -75,10 +75,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
#ifndef DEBUG
static constexpr size_t kV8ActivationThreshold = 8 * MB;
- static constexpr size_t kGlobalActivationThreshold = 16 * MB;
+ static constexpr size_t kEmbedderActivationThreshold = 8 * MB;
#else
static constexpr size_t kV8ActivationThreshold = 0;
- static constexpr size_t kGlobalActivationThreshold = 0;
+ static constexpr size_t kEmbedderActivationThreshold = 0;
#endif
#ifdef V8_ATOMIC_MARKING_STATE
diff --git a/deps/v8/src/heap/linear-allocation-area.h b/deps/v8/src/heap/linear-allocation-area.h
new file mode 100644
index 0000000000..a03285c046
--- /dev/null
+++ b/deps/v8/src/heap/linear-allocation-area.h
@@ -0,0 +1,117 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_LINEAR_ALLOCATION_AREA_H_
+#define V8_HEAP_LINEAR_ALLOCATION_AREA_H_
+
+#include "include/v8-internal.h"
+#include "src/common/checks.h"
+
+namespace v8 {
+namespace internal {
+
+// A linear allocation area to allocate objects from.
+//
+// Invariant that must hold at all times:
+// start <= top <= limit
+class LinearAllocationArea final {
+ public:
+ LinearAllocationArea() = default;
+ LinearAllocationArea(Address top, Address limit)
+ : start_(top), top_(top), limit_(limit) {
+ Verify();
+ }
+
+ void Reset(Address top, Address limit) {
+ start_ = top;
+ top_ = top;
+ limit_ = limit;
+ Verify();
+ }
+
+ void ResetStart() { start_ = top_; }
+
+ V8_INLINE bool CanIncrementTop(size_t bytes) {
+ Verify();
+ return (top_ + bytes) <= limit_;
+ }
+
+ V8_INLINE Address IncrementTop(size_t bytes) {
+ Address old_top = top_;
+ top_ += bytes;
+ Verify();
+ return old_top;
+ }
+
+ V8_INLINE bool DecrementTopIfAdjacent(Address new_top, size_t bytes) {
+ Verify();
+ if ((new_top + bytes) == top_) {
+ top_ = new_top;
+ if (start_ > top_) {
+ ResetStart();
+ }
+ Verify();
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE bool MergeIfAdjacent(LinearAllocationArea& other) {
+ Verify();
+ other.Verify();
+ if (top_ == other.limit_) {
+ top_ = other.top_;
+ start_ = other.start_;
+ other.Reset(kNullAddress, kNullAddress);
+ Verify();
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE void SetLimit(Address limit) {
+ limit_ = limit;
+ Verify();
+ }
+
+ V8_INLINE Address start() const {
+ Verify();
+ return start_;
+ }
+ V8_INLINE Address top() const {
+ Verify();
+ return top_;
+ }
+ V8_INLINE Address limit() const {
+ Verify();
+ return limit_;
+ }
+ const Address* top_address() const { return &top_; }
+ Address* top_address() { return &top_; }
+ const Address* limit_address() const { return &limit_; }
+ Address* limit_address() { return &limit_; }
+
+ void Verify() const {
+#ifdef DEBUG
+ SLOW_DCHECK(start_ <= top_);
+ SLOW_DCHECK(top_ <= limit_);
+ SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
+#endif // DEBUG
+ }
+
+ private:
+ // The start of the LAB. Initially coincides with `top_`. As top is moved
+ // ahead, the area [start_, top_[ denotes a range of new objects. This range
+ // is reset with `ResetStart()`.
+ Address start_ = kNullAddress;
+ // The top of the LAB that is used for allocation.
+ Address top_ = kNullAddress;
+ // Limit of the LAB the denotes the end of the valid range for allocation.
+ Address limit_ = kNullAddress;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_LINEAR_ALLOCATION_AREA_H_
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
index 0f6f7e5453..d28d1a6464 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -56,7 +56,8 @@ void EvacuationAllocator::FreeLastInNewSpace(HeapObject object,
void EvacuationAllocator::FreeLastInOldSpace(HeapObject object,
int object_size) {
- if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
+ if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object.address(),
+ object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
diff --git a/deps/v8/src/heap/local-factory-inl.h b/deps/v8/src/heap/local-factory-inl.h
index 1d2d0e9c35..1f6c5cc7fb 100644
--- a/deps/v8/src/heap/local-factory-inl.h
+++ b/deps/v8/src/heap/local-factory-inl.h
@@ -29,7 +29,7 @@ READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
ACCESSOR_INFO_ROOT_LIST(ACCESSOR_INFO_ACCESSOR)
#undef ACCESSOR_INFO_ACCESSOR
-#endif // V8_HEAP_LOCAL_FACTORY_INL_H_
-
} // namespace internal
} // namespace v8
+
+#endif // V8_HEAP_LOCAL_FACTORY_INL_H_
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index 01a869f9d4..92c8f4b845 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -10,6 +10,7 @@
#include "src/common/assert-scope.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
namespace v8 {
@@ -53,6 +54,13 @@ Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
alignment);
}
+void LocalHeap::CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots clear_slots_mode) {
+ DCHECK_EQ(clear_slots_mode, ClearRecordedSlots::kNo);
+ heap()->CreateFillerObjectAtBackground(
+ addr, size, ClearFreedMemoryMode::kDontClearFreedMemory);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index e96b3e7aa0..8c1cddf8c4 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -82,6 +82,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::unique_ptr<PersistentHandles> persistent_handles);
std::unique_ptr<PersistentHandles> DetachPersistentHandles();
#ifdef DEBUG
+ bool HasPersistentHandles() { return !!persistent_handles_; }
bool ContainsPersistentHandle(Address* location);
bool ContainsLocalHandle(Address* location);
bool IsHandleDereferenceAllowed();
@@ -130,7 +131,14 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
+ inline void CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots clear_slots_mode);
+
bool is_main_thread() const { return is_main_thread_; }
+ bool deserialization_complete() const {
+ return heap_->deserialization_complete();
+ }
+ ReadOnlySpace* read_only_space() { return heap_->read_only_space(); }
// Requests GC and blocks until the collection finishes.
bool TryPerformCollection();
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index f28b9b5e84..2210c73958 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -67,12 +67,9 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
- BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
- if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
- !source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
- slot.address());
+ if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ RecordSlot(source_page, slot, target);
}
}
@@ -80,8 +77,14 @@ void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
- slot.address());
+ if (V8_EXTERNAL_CODE_SPACE_BOOL &&
+ target_page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ RememberedSet<OLD_TO_CODE>::Insert<AccessMode::ATOMIC>(source_page,
+ slot.address());
+ } else {
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
+ slot.address());
+ }
}
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index d6c644696d..0fffb4ea45 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -88,6 +88,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
+ virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
virtual bool IsMarked(HeapObject object) = 0;
@@ -104,6 +105,11 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ VerifyCodePointer(slot);
+ }
+
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
VerifyRootPointers(start, end);
@@ -227,6 +233,18 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyPointersImpl(start, end);
}
+ void VerifyCodePointer(CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(slot.address());
+ Object maybe_code = slot.load(code_cage_base);
+ HeapObject code;
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyHeapObjectImpl(code);
+ }
+ }
+
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -255,8 +273,10 @@ class FullMarkingVerifier : public MarkingVerifier {
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
+ PtrComprCageBase cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = *slot;
+ typename TSlot::TObject object = slot.load(cage_base);
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -281,6 +301,11 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ VerifyCodePointer(slot);
+ }
+
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
VerifyRootPointers(start, end);
@@ -296,6 +321,7 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
+ virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
void VerifyRoots();
@@ -370,8 +396,10 @@ class FullEvacuationVerifier : public EvacuationVerifier {
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
+ PtrComprCageBase cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = *current;
+ typename TSlot::TObject object = current.load(cage_base);
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -385,6 +413,17 @@ class FullEvacuationVerifier : public EvacuationVerifier {
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
VerifyPointersImpl(start, end);
}
+ void VerifyCodePointer(CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(slot.address());
+ Object maybe_code = slot.load(code_cage_base);
+ HeapObject code;
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyHeapObjectImpl(code);
+ }
+ }
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
@@ -519,14 +558,13 @@ void MarkCompactCollector::StartMarking() {
contexts.push_back(context->ptr());
}
}
- bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate());
+ code_flush_mode_ = Heap::GetCodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), weak_objects(), heap_,
- epoch(), bytecode_flush_mode(),
- heap_->local_embedder_heap_tracer()->InUse(),
+ epoch(), code_flush_mode(), heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
@@ -628,11 +666,6 @@ void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
sweeper()->EnsurePageIsSwept(page);
}
-void MarkCompactCollector::DrainSweepingWorklists() {
- if (!sweeper()->sweeping_in_progress()) return;
- sweeper()->DrainSweepingWorklists();
-}
-
void MarkCompactCollector::DrainSweepingWorklistForSpace(
AllocationSpace space) {
if (!sweeper()->sweeping_in_progress()) return;
@@ -851,6 +884,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
RememberedSet<OLD_TO_OLD>::ClearAll(heap());
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ RememberedSet<OLD_TO_CODE>::ClearAll(heap());
+ }
for (Page* p : evacuation_candidates_) {
p->ClearEvacuationCandidate();
}
@@ -1038,14 +1074,22 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
void VisitMapPointer(HeapObject host) final { MarkObject(host, host.map()); }
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (ObjectSlot p = start; p < end; ++p) {
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
- DCHECK(!HasWeakHeapObjectTag(*p));
- MarkObject(host, *p);
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base)));
+ MarkObject(host, p.load(cage_base));
}
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // At the moment, custom roots cannot contain CodeDataContainers - the only
+ // objects that can contain Code pointers.
+ UNREACHABLE();
+ }
+
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
// At the moment, custom roots cannot contain weak pointers.
@@ -1213,6 +1257,17 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
}
}
+ inline void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // This code is similar to the implementation of VisitPointer() modulo
+ // new kind of slot.
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
+ Object code = slot.load(code_cage_base);
+ RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
+ }
+
inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot value) override {
DCHECK(host.IsEphemeronHashTable());
@@ -1271,8 +1326,14 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate()) {
- RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- MemoryChunk::FromHeapObject(host), slot);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL &&
+ p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot);
+ } else {
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot);
+ }
}
}
}
@@ -1353,14 +1414,6 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- CodeDataContainer code_data_container =
- code.GCSafeCodeDataContainer(kAcquireLoad);
- Isolate* isolate_for_sandbox = base->heap_->isolate();
- // Update the |code_entry_point| which is a raw interiour or off-heap
- // pointer and thus not handled by the regular updating mechanism.
- code_data_container.SetCodeAndEntryPoint(isolate_for_sandbox, code);
- }
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1667,24 +1720,24 @@ void MarkCompactCollector::MarkDescriptorArrayFromWriteBarrier(
descriptors, number_of_own_descriptors);
}
-void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
- bool work_to_do = true;
+bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
int iterations = 0;
int max_iterations = FLAG_ephemeron_fixpoint_iterations;
- while (work_to_do) {
+ bool another_ephemeron_iteration_main_thread;
+
+ do {
PerformWrapperTracing();
if (iterations >= max_iterations) {
// Give up fixpoint iteration and switch to linear algorithm.
- ProcessEphemeronsLinear();
- break;
+ return false;
}
// Move ephemerons from next_ephemerons into current_ephemerons to
// drain them in this iteration.
weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
- heap()->concurrent_marking()->set_ephemeron_marked(false);
+ heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
{
TRACE_GC(heap()->tracer(),
@@ -1695,47 +1748,54 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
TaskPriority::kUserBlocking);
}
- work_to_do = ProcessEphemerons();
+ another_ephemeron_iteration_main_thread = ProcessEphemerons();
FinishConcurrentMarking();
}
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
- work_to_do = work_to_do || !local_marking_worklists()->IsEmpty() ||
- heap()->concurrent_marking()->ephemeron_marked() ||
- !local_marking_worklists()->IsEmbedderEmpty() ||
- !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
- }
+ } while (another_ephemeron_iteration_main_thread ||
+ heap()->concurrent_marking()->another_ephemeron_iteration() ||
+ !local_marking_worklists()->IsEmpty() ||
+ !local_marking_worklists()->IsEmbedderEmpty() ||
+ !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ return true;
}
bool MarkCompactCollector::ProcessEphemerons() {
Ephemeron ephemeron;
- bool ephemeron_marked = false;
+ bool another_ephemeron_iteration = false;
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
- ephemeron_marked = true;
+ another_ephemeron_iteration = true;
}
}
// Drain marking worklist and push discovered ephemerons into
// discovered_ephemerons.
- DrainMarkingWorklist();
+ size_t objects_processed;
+ std::tie(std::ignore, objects_processed) = ProcessMarkingWorklist(0);
+
+ // As soon as a single object was processed and potentially marked another
+ // object we need another iteration. Otherwise we might miss to apply
+ // ephemeron semantics on it.
+ if (objects_processed > 0) another_ephemeron_iteration = true;
// Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
while (weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
- ephemeron_marked = true;
+ another_ephemeron_iteration = true;
}
}
@@ -1743,7 +1803,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
- return ephemeron_marked;
+ return another_ephemeron_iteration;
}
void MarkCompactCollector::ProcessEphemeronsLinear() {
@@ -1829,6 +1889,12 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
ephemeron_marking_.newly_discovered.shrink_to_fit();
CHECK(local_marking_worklists()->IsEmpty());
+ CHECK(weak_objects_.current_ephemerons.IsEmpty());
+ CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+
+ // Flush local ephemerons for main task to global pool.
+ weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
+ weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
}
void MarkCompactCollector::PerformWrapperTracing() {
@@ -1850,9 +1916,11 @@ void MarkCompactCollector::PerformWrapperTracing() {
void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
-size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
+std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
+ size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
+ size_t objects_processed = 0;
bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate();
while (local_marking_worklists()->Pop(&object) ||
@@ -1892,18 +1960,19 @@ size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
map, object, visited_size);
}
bytes_processed += visited_size;
+ objects_processed++;
if (bytes_to_process && bytes_processed >= bytes_to_process) {
break;
}
}
- return bytes_processed;
+ return std::make_pair(bytes_processed, objects_processed);
}
// Generate definitions for use in other files.
-template size_t MarkCompactCollector::ProcessMarkingWorklist<
+template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
size_t bytes_to_process);
-template size_t MarkCompactCollector::ProcessMarkingWorklist<
+template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
@@ -1928,7 +1997,23 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
// buffer, flush it into global pool.
weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
- ProcessEphemeronsUntilFixpoint();
+ if (!ProcessEphemeronsUntilFixpoint()) {
+ // Fixpoint iteration needed too many iterations and was cancelled. Use the
+ // guaranteed linear algorithm.
+ ProcessEphemeronsLinear();
+ }
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ Ephemeron ephemeron;
+
+ weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
+
+ while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
+ }
+ }
+#endif
CHECK(local_marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
@@ -2086,7 +2171,7 @@ void MarkCompactCollector::MarkLiveObjects() {
}
// We depend on IterateWeakRootsForPhantomHandles being called before
- // ClearOldBytecodeCandidates in order to identify flushed bytecode in the
+ // ProcessOldCodeCandidates in order to identify flushed bytecode in the
// CPU profiler.
{
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
@@ -2122,7 +2207,11 @@ void MarkCompactCollector::ClearNonLiveReferences() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
- ClearOldBytecodeCandidates();
+ // ProcessFlusheBaselineCandidates should be called after clearing bytecode
+ // so that we flush any bytecode if needed so we could correctly set the
+ // code object on the JSFunction.
+ ProcessOldCodeCandidates();
+ ProcessFlushedBaselineCandidates();
}
{
@@ -2160,7 +2249,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
DCHECK(weak_objects_.weak_cells.IsEmpty());
- DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
+ DCHECK(weak_objects_.code_flushing_candidates.IsEmpty());
+ DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
@@ -2278,21 +2368,63 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
DCHECK(!shared_info.is_compiled());
}
-void MarkCompactCollector::ClearOldBytecodeCandidates() {
- DCHECK(FLAG_flush_bytecode ||
- weak_objects_.bytecode_flushing_candidates.IsEmpty());
+void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
+ if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
+
+ // Mark baseline data as live.
+ non_atomic_marking_state()->WhiteToBlack(baseline_data);
+
+ // Record object slots.
+ DCHECK(
+ non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
+ ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
+ RecordSlot(baseline_data, code, HeapObject::cast(*code));
+
+ DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
+ ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
+ RecordSlot(baseline_data, data, HeapObject::cast(*data));
+}
+
+void MarkCompactCollector::ProcessOldCodeCandidates() {
+ DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
+ weak_objects_.code_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
- while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
- &flushing_candidate)) {
- // If the BytecodeArray is dead, flush it, which will replace the field with
- // an uncompiled data object.
- if (!non_atomic_marking_state()->IsBlackOrGrey(
- flushing_candidate.GetBytecodeArray(isolate()))) {
+ while (weak_objects_.code_flushing_candidates.Pop(kMainThreadTask,
+ &flushing_candidate)) {
+ bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
+ flushing_candidate.GetBytecodeArray(isolate()));
+ if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineData()) {
+ BaselineData baseline_data = flushing_candidate.baseline_data();
+ if (non_atomic_marking_state()->IsBlackOrGrey(
+ baseline_data.baseline_code())) {
+ // Currently baseline code holds bytecode array strongly and it is
+ // always ensured that bytecode is live if baseline code is live. Hence
+ // baseline code can safely load bytecode array without any additional
+ // checks. In future if this changes we need to update these checks to
+ // flush code if the bytecode is not live and also update baseline code
+ // to bailout if there is no bytecode.
+ DCHECK(is_bytecode_live);
+ MarkBaselineDataAsLive(baseline_data);
+ } else if (is_bytecode_live) {
+ // If baseline code is flushed but we have a valid bytecode array reset
+ // the function_data field to BytecodeArray.
+ flushing_candidate.set_function_data(baseline_data.data(),
+ kReleaseStore);
+ }
+ }
+
+ if (!is_bytecode_live) {
+ // If baseline code flushing is disabled we should only flush bytecode
+ // from functions that don't have baseline data.
+ DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineData());
+
+ // If the BytecodeArray is dead, flush it, which will replace the field
+ // with an uncompiled data object.
FlushBytecodeFromSFI(flushing_candidate);
}
// Now record the slot, which has either been updated to an uncompiled data,
- // or is the BytecodeArray which is still alive.
+ // Baseline code or BytecodeArray which is still alive.
ObjectSlot slot =
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
@@ -2308,7 +2440,26 @@ void MarkCompactCollector::ClearFlushedJsFunctions() {
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
};
- flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot);
+ flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
+ }
+}
+
+void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
+ DCHECK(FLAG_flush_baseline_code ||
+ weak_objects_.baseline_flushing_candidates.IsEmpty());
+ JSFunction flushed_js_function;
+ while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,
+ &flushed_js_function)) {
+ auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
+ Object target) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ };
+ flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
+
+ // Record the code slot that has been updated either to CompileLazy,
+ // InterpreterEntryTrampoline or baseline code.
+ ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
+ RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
}
}
@@ -2624,7 +2775,8 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_objects_in_code.Clear();
weak_objects_.js_weak_refs.Clear();
weak_objects_.weak_cells.Clear();
- weak_objects_.bytecode_flushing_candidates.Clear();
+ weak_objects_.code_flushing_candidates.Clear();
+ weak_objects_.baseline_flushing_candidates.Clear();
weak_objects_.flushed_js_functions.Clear();
}
@@ -2726,7 +2878,8 @@ MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
template <AccessMode access_mode, HeapObjectReferenceType reference_type,
typename TSlot>
-static inline SlotCallbackResult UpdateSlot(TSlot slot,
+static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
+ TSlot slot,
typename TSlot::TObject old,
HeapObject heap_obj) {
static_assert(std::is_same<TSlot, FullObjectSlot>::value ||
@@ -2736,7 +2889,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
std::is_same<TSlot, OffHeapObjectSlot>::value,
"Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
"expected here");
- MapWord map_word = heap_obj.map_word(kRelaxedLoad);
+ MapWord map_word = heap_obj.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
@@ -2752,7 +2905,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
DCHECK(!Heap::InFromPage(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
- DCHECK(heap_obj.map().IsMap());
+ DCHECK(heap_obj.map(cage_base).IsMap(cage_base));
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
@@ -2764,10 +2917,11 @@ static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
HeapObject heap_obj;
if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
- UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
+ UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(cage_base, slot, obj,
+ heap_obj);
} else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
- return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
- heap_obj);
+ return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
+ cage_base, slot, obj, heap_obj);
}
return REMOVE_SLOT;
}
@@ -2779,8 +2933,31 @@ static inline SlotCallbackResult UpdateStrongSlot(PtrComprCageBase cage_base,
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
HeapObject heap_obj;
if (obj.GetHeapObject(&heap_obj)) {
- return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
- heap_obj);
+ return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
+ cage_base, slot, obj, heap_obj);
+ }
+ return REMOVE_SLOT;
+}
+
+template <AccessMode access_mode>
+static inline SlotCallbackResult UpdateStrongCodeSlot(
+ HeapObject host, PtrComprCageBase cage_base,
+ PtrComprCageBase code_cage_base, CodeObjectSlot slot) {
+ Object obj = slot.Relaxed_Load(code_cage_base);
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
+ HeapObject heap_obj;
+ if (obj.GetHeapObject(&heap_obj)) {
+ SlotCallbackResult result =
+ UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
+ cage_base, slot, obj, heap_obj);
+
+ CodeDataContainer code_data_container =
+ CodeDataContainer::cast(HeapObject::FromAddress(
+ slot.address() - CodeDataContainer::kCodeOffset));
+ Code code = code_data_container.code(code_cage_base);
+ Isolate* isolate_for_sandbox = GetIsolateForHeapSandbox(host);
+ code_data_container.UpdateCodeEntryPoint(isolate_for_sandbox, code);
+ return result;
}
return REMOVE_SLOT;
}
@@ -2791,8 +2968,7 @@ static inline SlotCallbackResult UpdateStrongSlot(PtrComprCageBase cage_base,
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- explicit PointersUpdatingVisitor(PtrComprCageBase cage_base)
- : cage_base_(cage_base) {}
+ explicit PointersUpdatingVisitor(Heap* heap) : cage_base_(heap->isolate()) {}
void VisitPointer(HeapObject host, ObjectSlot p) override {
UpdateStrongSlotInternal(cage_base_, p);
@@ -2816,6 +2992,14 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
}
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = cage_base_;
+ UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base_,
+ code_cage_base, slot);
+ }
+
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
@@ -3633,9 +3817,10 @@ class PointersUpdatingJob : public v8::JobTask {
template <typename MarkingState>
class ToSpaceUpdatingItem : public UpdatingItem {
public:
- explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
- MarkingState* marking_state)
- : chunk_(chunk),
+ explicit ToSpaceUpdatingItem(Heap* heap, MemoryChunk* chunk, Address start,
+ Address end, MarkingState* marking_state)
+ : heap_(heap),
+ chunk_(chunk),
start_(start),
end_(end),
marking_state_(marking_state) {}
@@ -3655,8 +3840,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor visitor(
- GetPtrComprCageBaseFromOnHeapAddress(start_));
+ PointersUpdatingVisitor visitor(heap_);
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
Map map = object.map();
@@ -3671,14 +3855,14 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor visitor(
- GetPtrComprCageBaseFromOnHeapAddress(start_));
+ PointersUpdatingVisitor visitor(heap_);
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first.IterateBodyFast(&visitor);
}
}
+ Heap* heap_;
MemoryChunk* chunk_;
Address start_;
Address end_;
@@ -3834,6 +4018,28 @@ class RememberedSetUpdatingItem : public UpdatingItem {
// processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
}
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
+ (chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>() !=
+ nullptr)) {
+ PtrComprCageBase cage_base = heap_->isolate();
+ PtrComprCageBase code_cage_base = heap_->isolate();
+ RememberedSet<OLD_TO_CODE>::Iterate(
+ chunk_,
+ [=](MaybeObjectSlot slot) {
+ HeapObject host = HeapObject::FromAddress(
+ slot.address() - CodeDataContainer::kCodeOffset);
+ DCHECK(host.IsCodeDataContainer(cage_base));
+ return UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(
+ host, cage_base, code_cage_base, CodeObjectSlot(slot));
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
+ chunk_->ReleaseSlotSet<OLD_TO_CODE>();
+ }
+ // The invalidated slots are not needed after old-to-code slots were
+ // processsed, but since there are no invalidated OLD_TO_CODE slots,
+ // there's nothing to clear.
+ }
}
void UpdateTypedPointers() {
@@ -3876,7 +4082,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
MemoryChunk* chunk, Address start, Address end) {
return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
- chunk, start, end, non_atomic_marking_state());
+ heap(), chunk, start, end, non_atomic_marking_state());
}
std::unique_ptr<UpdatingItem>
@@ -3914,6 +4120,9 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_old_slots =
chunk->slot_set<OLD_TO_OLD>() != nullptr ||
chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
+ const bool contains_old_to_code_slots =
+ V8_EXTERNAL_CODE_SPACE_BOOL &&
+ chunk->slot_set<OLD_TO_CODE>() != nullptr;
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
@@ -3925,7 +4134,7 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots &&
!contains_old_to_old_slots && !contains_old_to_old_invalidated_slots &&
- !contains_old_to_new_invalidated_slots)
+ !contains_old_to_new_invalidated_slots && !contains_old_to_code_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
contains_old_to_new_sweeping_slots ||
@@ -3992,7 +4201,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(isolate());
+ PointersUpdatingVisitor updating_visitor(heap());
{
TRACE_GC(heap()->tracer(),
@@ -4231,6 +4440,13 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
VerifyPointersImpl(start, end);
}
+ void VerifyCodePointer(CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // Code slots never appear in new space because CodeDataContainers, the
+ // only object that can contain code pointers, are always allocated in
+ // the old space.
+ UNREACHABLE();
+ }
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -4250,8 +4466,10 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
+ PtrComprCageBase cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = *slot;
+ typename TSlot::TObject object = slot.load(cage_base);
HeapObject heap_object;
// Minor MC treats weak references as strong.
if (object.GetHeapObject(&heap_object)) {
@@ -4284,8 +4502,10 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
+ PtrComprCageBase cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = *current;
+ typename TSlot::TObject object = current.load(cage_base);
HeapObject heap_object;
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -4299,6 +4519,14 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
VerifyPointersImpl(start, end);
}
+ void VerifyCodePointer(CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base =
+ GetPtrComprCageBaseFromOnHeapAddress(slot.address());
+ Code code = Code::unchecked_cast(slot.load(code_cage_base));
+ VerifyHeapObjectImpl(code);
+ }
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
@@ -4340,6 +4568,15 @@ class YoungGenerationMarkingVisitor final
VisitPointersImpl(host, start, end);
}
+ V8_INLINE void VisitCodePointer(HeapObject host,
+ CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // Code slots never appear in new space because CodeDataContainers, the
+ // only object that can contain code pointers, are always allocated in
+ // the old space.
+ UNREACHABLE();
+ }
+
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
VisitPointerImpl(host, slot);
}
@@ -4488,8 +4725,14 @@ class YoungGenerationRecordMigratedSlotVisitor final
DCHECK(chunk->SweepingDone());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
- RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- MemoryChunk::FromHeapObject(host), slot);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL &&
+ p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot);
+ } else {
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot);
+ }
}
}
}
@@ -4499,7 +4742,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(isolate());
+ PointersUpdatingVisitor updating_visitor(heap());
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
// Create batches of global handles.
@@ -4787,7 +5030,7 @@ MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start,
Address end) {
return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
- chunk, start, end, non_atomic_marking_state());
+ heap(), chunk, start, end, non_atomic_marking_state());
}
std::unique_ptr<UpdatingItem>
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index b077522213..9ce993898c 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -376,11 +376,11 @@ class MainMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
- BytecodeFlushMode bytecode_flush_mode,
+ base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, local_marking_worklists, weak_objects, heap,
- mark_compact_epoch, bytecode_flush_mode, embedder_tracing_enabled,
+ mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
is_forced_gc),
marking_state_(marking_state),
revisiting_object_(false) {}
@@ -515,7 +515,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EnsurePageIsSwept(Page* page);
- void DrainSweepingWorklists();
void DrainSweepingWorklistForSpace(AllocationSpace space);
// Checks if sweeping is in progress right now on any space.
@@ -570,7 +569,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; }
- BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
+ base::EnumSet<CodeFlushMode> code_flush_mode() const {
+ return code_flush_mode_;
+ }
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
@@ -591,7 +592,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// is drained until it is empty.
template <MarkingWorklistProcessingMode mode =
MarkingWorklistProcessingMode::kDefault>
- size_t ProcessMarkingWorklist(size_t bytes_to_process);
+ std::pair<size_t, size_t> ProcessMarkingWorklist(size_t bytes_to_process);
private:
void ComputeEvacuationHeuristics(size_t area_size,
@@ -637,8 +638,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool ProcessEphemeron(HeapObject key, HeapObject value);
// Marks ephemerons and drains marking worklist iteratively
- // until a fixpoint is reached.
- void ProcessEphemeronsUntilFixpoint();
+ // until a fixpoint is reached. Returns false if too many iterations have been
+ // tried and the linear approach should be used.
+ bool ProcessEphemeronsUntilFixpoint();
// Drains ephemeron and marking worklists. Single iteration of the
// fixpoint iteration.
@@ -668,9 +670,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Flushes a weakly held bytecode array from a shared function info.
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
- // Clears bytecode arrays that have not been executed for multiple
- // collections.
- void ClearOldBytecodeCandidates();
+ // Marks the BaselineData as live and records the slots of baseline data
+ // fields. This assumes that the objects in the data fields are alive.
+ void MarkBaselineDataAsLive(BaselineData baseline_data);
+
+ // Clears bytecode arrays / baseline code that have not been executed for
+ // multiple collections.
+ void ProcessOldCodeCandidates();
+ void ProcessFlushedBaselineCandidates();
// Resets any JSFunctions which have had their bytecode flushed.
void ClearFlushedJsFunctions();
@@ -791,9 +798,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Bytecode flushing is disabled when the code coverage mode is changed. Since
// that can happen while a GC is happening and we need the
- // bytecode_flush_mode_ to remain the same through out a GC, we record this at
+ // code_flush_mode_ to remain the same through out a GC, we record this at
// the start of each GC.
- BytecodeFlushMode bytecode_flush_mode_;
+ base::EnumSet<CodeFlushMode> code_flush_mode_;
friend class FullEvacuator;
friend class RecordMigratedSlotVisitor;
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index 3468d732bf..fe8661c516 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -89,6 +89,23 @@ MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitPointersImpl(
}
template <typename ConcreteVisitor, typename MarkingState>
+V8_INLINE void
+MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodePointerImpl(
+ HeapObject host, CodeObjectSlot slot) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
+ Object object = slot.Relaxed_Load(code_cage_base);
+ HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
+ // If the reference changes concurrently from strong to weak, the write
+ // barrier will treat the weak reference as strong, so we won't miss the
+ // weak reference.
+ ProcessStrongHeapObject(host, HeapObjectSlot(slot), heap_object);
+ }
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
Code host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
@@ -132,12 +149,20 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
- Map map, JSFunction object) {
- int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
- // Check if the JSFunction needs reset due to bytecode being flushed.
- if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
- object.NeedsResetDueToFlushedBytecode()) {
- weak_objects_->flushed_js_functions.Push(task_id_, object);
+ Map map, JSFunction js_function) {
+ int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
+ if (js_function.ShouldFlushBaselineCode(code_flush_mode_)) {
+ DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
+ weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
+ } else {
+ VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
+ // TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
+ // also include cases where there is old bytecode even when there is no
+ // baseline code and remove this check here.
+ if (IsByteCodeFlushingEnabled(code_flush_mode_) &&
+ js_function.NeedsResetDueToFlushedBytecode()) {
+ weak_objects_->flushed_js_functions.Push(task_id_, js_function);
+ }
}
return size;
}
@@ -151,13 +176,25 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
this->VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size, this);
- // If the SharedFunctionInfo has old bytecode, mark it as flushable,
- // otherwise visit the function data field strongly.
- if (shared_info.ShouldFlushBytecode(bytecode_flush_mode_)) {
- weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
- } else {
+ if (!shared_info.ShouldFlushCode(code_flush_mode_)) {
+ // If the SharedFunctionInfo doesn't have old bytecode visit the function
+ // data strongly.
VisitPointer(shared_info,
shared_info.RawField(SharedFunctionInfo::kFunctionDataOffset));
+ } else if (!IsByteCodeFlushingEnabled(code_flush_mode_)) {
+ // If bytecode flushing is disabled but baseline code flushing is enabled
+ // then we have to visit the bytecode but not the baseline code.
+ DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
+ BaselineData baseline_data =
+ BaselineData::cast(shared_info.function_data(kAcquireLoad));
+ // Visit the bytecode hanging off baseline data.
+ VisitPointer(baseline_data,
+ baseline_data.RawField(BaselineData::kDataOffset));
+ weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
+ } else {
+ // In other cases, record as a flushing candidate since we have old
+ // bytecode.
+ weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
}
return size;
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index f8795aadfd..555b2e8118 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -105,14 +105,14 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
- BytecodeFlushMode bytecode_flush_mode,
+ base::EnumSet<CodeFlushMode> code_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects),
heap_(heap),
task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
- bytecode_flush_mode_(bytecode_flush_mode),
+ code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
is_forced_gc_(is_forced_gc),
is_shared_heap_(heap->IsShared()) {}
@@ -153,6 +153,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
MaybeObjectSlot end) final {
VisitPointersImpl(host, start, end);
}
+ V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
+ VisitCodePointerImpl(host, slot);
+ }
V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
@@ -178,6 +181,10 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
template <typename TSlot>
V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end);
+ // Similar to VisitPointersImpl() but using code cage base for loading from
+ // the slot.
+ V8_INLINE void VisitCodePointerImpl(HeapObject host, CodeObjectSlot slot);
+
V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
int number_of_own_descriptors);
@@ -199,7 +206,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
Heap* const heap_;
const int task_id_;
const unsigned mark_compact_epoch_;
- const BytecodeFlushMode bytecode_flush_mode_;
+ const base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_;
const bool is_shared_heap_;
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 41512cbbce..f37583ab42 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -26,6 +26,7 @@ class SlotSet;
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
+ OLD_TO_CODE = V8_EXTERNAL_CODE_SPACE_BOOL ? OLD_TO_OLD + 1 : OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES
};
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 4d16da707f..0d9afdb1c7 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -115,6 +115,10 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE],
+ nullptr);
+ }
base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
@@ -122,6 +126,10 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
nullptr);
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ // Not actually used but initialize anyway for predictability.
+ chunk->invalidated_slots_[OLD_TO_CODE] = nullptr;
+ }
chunk->progress_bar_ = 0;
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
@@ -224,6 +232,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSweepingSlotSet();
ReleaseSlotSet<OLD_TO_OLD>();
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots<OLD_TO_NEW>();
@@ -243,6 +252,9 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
+#ifdef V8_EXTERNAL_CODE_SPACE
+template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_CODE>();
+#endif // V8_EXTERNAL_CODE_SPACE
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
@@ -267,6 +279,9 @@ SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
+#ifdef V8_EXTERNAL_CODE_SPACE
+template void MemoryChunk::ReleaseSlotSet<OLD_TO_CODE>();
+#endif // V8_EXTERNAL_CODE_SPACE
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index a0c5491e9d..ef4e7bb439 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -111,13 +111,11 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
- Address top = allocation_info_.top();
- if (allocation_info_.limit() < top + size_in_bytes) {
+ if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + size_in_bytes);
+ HeapObject obj =
+ HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes));
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
@@ -136,13 +134,11 @@ AllocationResult NewSpace::AllocateFastAligned(
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
- if (allocation_info_.limit() - top <
- static_cast<uintptr_t>(aligned_size_in_bytes)) {
+ if (!allocation_info_.CanIncrementTop(aligned_size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + aligned_size_in_bytes);
+ HeapObject obj = HeapObject::FromAddress(
+ allocation_info_.IncrementTop(aligned_size_in_bytes));
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 7e5a4c694a..d08fe48f23 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -500,7 +500,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, to_space_.page_high());
- allocation_info_.set_limit(new_limit);
+ allocation_info_.SetLimit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
@@ -596,11 +596,8 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
- if (info.limit() != kNullAddress && info.limit() == top()) {
- DCHECK_NE(info.top(), kNullAddress);
- allocation_info_.set_top(info.top());
- allocation_info_.MoveStartToTop();
- original_top_.store(info.top(), std::memory_order_release);
+ if (allocation_info_.MergeIfAdjacent(info)) {
+ original_top_.store(allocation_info_.top(), std::memory_order_release);
}
#if DEBUG
@@ -675,8 +672,9 @@ void NewSpace::VerifyTop() {
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
DCHECK_LE(allocation_info_.top(), allocation_info_.limit());
- // Ensure that original_top_ always equals LAB start.
- DCHECK_EQ(original_top_, allocation_info_.start());
+ // Ensure that original_top_ always >= LAB start. The delta between start_
+ // and top_ is still to be processed by allocation observers.
+ DCHECK_GE(original_top_, allocation_info_.start());
// Ensure that limit() is <= original_limit_, original_limit_ always needs
// to be end of curent to space page.
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 512f5db7b8..6dcd0a51a0 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -93,6 +93,12 @@ class FieldStatsCollector : public ObjectVisitor {
*tagged_fields_count_ += (end - start);
}
+ V8_INLINE void VisitCodePointer(HeapObject host,
+ CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ *tagged_fields_count_ += 1;
+ }
+
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
// Code target is most likely encoded as a relative 32-bit offset and not
// as a full tagged value, so there's nothing to count.
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index 2c0fbd1ed2..75550d6b26 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -78,13 +78,9 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
return added;
}
-bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
+bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
if (allocation_info_.top() != kNullAddress) {
- const Address object_address = object.address();
- if ((allocation_info_.top() - object_size) == object_address) {
- allocation_info_.set_top(object_address);
- return true;
- }
+ return allocation_info_.DecrementTopIfAdjacent(object_address, object_size);
}
return false;
}
@@ -97,14 +93,11 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
}
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
- Address current_top = allocation_info_.top();
- Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit())
+ if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(identity());
- DCHECK_LE(new_top, allocation_info_.limit());
- allocation_info_.set_top(new_top);
-
- return AllocationResult(HeapObject::FromAddress(current_top));
+ }
+ return AllocationResult(
+ HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes)));
}
AllocationResult PagedSpace::AllocateFastAligned(
@@ -112,20 +105,17 @@ AllocationResult PagedSpace::AllocateFastAligned(
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
-
- Address new_top = current_top + filler_size + size_in_bytes;
- if (new_top > allocation_info_.limit())
+ int aligned_size = filler_size + size_in_bytes;
+ if (!allocation_info_.CanIncrementTop(aligned_size)) {
return AllocationResult::Retry(identity());
-
- allocation_info_.set_top(new_top);
- if (aligned_size_in_bytes)
- *aligned_size_in_bytes = filler_size + size_in_bytes;
+ }
+ HeapObject obj =
+ HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
+ if (aligned_size_in_bytes) *aligned_size_in_bytes = aligned_size;
if (filler_size > 0) {
- Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
- HeapObject::FromAddress(current_top), filler_size);
+ obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
-
- return AllocationResult(HeapObject::FromAddress(current_top + filler_size));
+ return AllocationResult(obj);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index 7a8316ce87..d502b226c4 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -176,7 +176,7 @@ class V8_EXPORT_PRIVATE PagedSpace
return size_in_bytes - wasted;
}
- inline bool TryFreeLast(HeapObject object, int object_size);
+ inline bool TryFreeLast(Address object_address, int object_size);
void ResetFreeList();
diff --git a/deps/v8/src/heap/parked-scope.h b/deps/v8/src/heap/parked-scope.h
index 41dc6bfc1f..c7bfa38ce1 100644
--- a/deps/v8/src/heap/parked-scope.h
+++ b/deps/v8/src/heap/parked-scope.h
@@ -50,6 +50,7 @@ class V8_NODISCARD ParkedMutexGuard {
: ParkedMutexGuard(local_isolate->heap(), mutex) {}
explicit ParkedMutexGuard(LocalHeap* local_heap, base::Mutex* mutex)
: mutex_(mutex) {
+ DCHECK(AllowGarbageCollection::IsAllowed());
if (!mutex_->TryLock()) {
ParkedScope scope(local_heap);
mutex_->Lock();
@@ -74,6 +75,7 @@ class V8_NODISCARD ParkedSharedMutexGuardIf final {
: ParkedSharedMutexGuardIf(local_isolate->heap(), mutex, enable_mutex) {}
ParkedSharedMutexGuardIf(LocalHeap* local_heap, base::SharedMutex* mutex,
bool enable_mutex) {
+ DCHECK(AllowGarbageCollection::IsAllowed());
DCHECK_IMPLIES(Behavior == base::NullBehavior::kRequireNotNull,
mutex != nullptr);
if (!enable_mutex) return;
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index e729a1cf59..a7b47e16c8 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -278,11 +278,14 @@ class RememberedSet : public AllStatic {
// Clear all old to old slots from the remembered set.
static void ClearAll(Heap* heap) {
- STATIC_ASSERT(type == OLD_TO_OLD);
+ STATIC_ASSERT(type == OLD_TO_OLD || type == OLD_TO_CODE);
OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ chunk->ReleaseSlotSet<OLD_TO_CODE>();
+ }
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
}
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index 5bcfb58045..e67c9743f8 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -14,6 +14,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
+#include "src/logging/counters-scopes.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index f600df2027..5eea1afafe 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -453,6 +453,14 @@ void ScavengeVisitor::VisitPointers(HeapObject host, MaybeObjectSlot start,
return VisitPointersImpl(host, start, end);
}
+void ScavengeVisitor::VisitCodePointer(HeapObject host, CodeObjectSlot slot) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // Code slots never appear in new space because CodeDataContainers, the
+ // only object that can contain code pointers, are always allocated in
+ // the old space.
+ UNREACHABLE();
+}
+
void ScavengeVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
#ifdef DEBUG
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 953a245603..f697e83105 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -41,6 +41,14 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
VisitPointersImpl(host, start, end);
}
+ V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // Code slots never appear in new space because CodeDataContainers, the
+ // only object that can contain code pointers, are always allocated in
+ // the old space.
+ UNREACHABLE();
+ }
+
V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
HandleSlot(host, FullHeapObjectSlot(&target), target);
@@ -114,6 +122,13 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
HeapObject::cast(target))) {
// We should never try to record off-heap slots.
DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
+ // Code slots never appear in new space because CodeDataContainers, the
+ // only object that can contain code pointers, are always allocated in
+ // the old space.
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
+ !MemoryChunk::FromHeapObject(target)->IsFlagSet(
+ MemoryChunk::IS_EXECUTABLE));
+
// We cannot call MarkCompactCollector::RecordSlot because that checks
// that the host page is not in young generation, which does not hold
// for pending large pages.
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 481ec4d558..92e24a605e 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -235,6 +235,7 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final;
+ V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final;
V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 4c3e29ef1d..53afdad22d 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -138,19 +138,17 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
-
- Address new_top = current_top + filler_size + size_in_bytes;
- if (new_top > allocation_info_.limit())
+ int aligned_size = filler_size + size_in_bytes;
+ if (!allocation_info_.CanIncrementTop(aligned_size)) {
return AllocationResult::Retry(NEW_SPACE);
-
- allocation_info_.set_top(new_top);
+ }
+ HeapObject object =
+ HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
if (filler_size > 0) {
- return Heap::PrecedeWithFiller(ReadOnlyRoots(heap_),
- HeapObject::FromAddress(current_top),
- filler_size);
+ return Heap::PrecedeWithFiller(ReadOnlyRoots(heap_), object, filler_size);
}
- return AllocationResult(HeapObject::FromAddress(current_top));
+ return AllocationResult(object);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
@@ -165,23 +163,14 @@ LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
}
-
bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
- if (allocation_info_.top() == other->allocation_info_.limit()) {
- allocation_info_.set_top(other->allocation_info_.top());
- other->allocation_info_.Reset(kNullAddress, kNullAddress);
- return true;
- }
- return false;
+ return allocation_info_.MergeIfAdjacent(other->allocation_info_);
}
bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
if (IsValid()) {
const Address object_address = object.address();
- if ((allocation_info_.top() - object_size) == object_address) {
- allocation_info_.set_top(object_address);
- return true;
- }
+ return allocation_info_.DecrementTopIfAdjacent(object_address, object_size);
}
return false;
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 81aa365e44..4d3fd9411f 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -385,7 +385,7 @@ void SpaceWithLinearArea::AdvanceAllocationObservers() {
}
void SpaceWithLinearArea::MarkLabStartInitialized() {
- allocation_info_.MoveStartToTop();
+ allocation_info_.ResetStart();
if (identity() == NEW_SPACE) {
heap()->new_space()->MoveOriginalTopForward();
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index ffa4cd0f33..6a047fd375 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -17,6 +17,7 @@
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/free-list.h"
#include "src/heap/heap.h"
+#include "src/heap/linear-allocation-area.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/objects.h"
@@ -37,7 +38,6 @@ class FreeList;
class Isolate;
class LargeObjectSpace;
class LargePage;
-class LinearAllocationArea;
class Page;
class PagedSpace;
class SemiSpace;
@@ -366,61 +366,6 @@ class PageRange {
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
-// An abstraction of allocation and relocation pointers in a page-structured
-// space.
-class LinearAllocationArea {
- public:
- LinearAllocationArea()
- : start_(kNullAddress), top_(kNullAddress), limit_(kNullAddress) {}
- LinearAllocationArea(Address top, Address limit)
- : start_(top), top_(top), limit_(limit) {}
-
- void Reset(Address top, Address limit) {
- start_ = top;
- set_top(top);
- set_limit(limit);
- }
-
- void MoveStartToTop() { start_ = top_; }
-
- V8_INLINE Address start() const { return start_; }
-
- V8_INLINE void set_top(Address top) {
- SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
- top_ = top;
- }
-
- V8_INLINE Address top() const {
- SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
- return top_;
- }
-
- Address* top_address() { return &top_; }
-
- V8_INLINE void set_limit(Address limit) { limit_ = limit; }
-
- V8_INLINE Address limit() const { return limit_; }
-
- Address* limit_address() { return &limit_; }
-
-#ifdef DEBUG
- bool VerifyPagedAllocation() {
- return (Page::FromAllocationAreaAddress(top_) ==
- Page::FromAllocationAreaAddress(limit_)) &&
- (top_ <= limit_);
- }
-#endif
-
- private:
- // Current allocation top.
- Address start_;
- // Current allocation top.
- Address top_;
- // Current allocation limit.
- Address limit_;
-};
-
-
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 61413c5e95..7d2d680456 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -227,13 +227,6 @@ void Sweeper::EnsureCompleted() {
sweeping_in_progress_ = false;
}
-void Sweeper::DrainSweepingWorklists() {
- if (!sweeping_in_progress_) return;
-
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { DrainSweepingWorklistForSpace(space); });
-}
-
void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
if (!sweeping_in_progress_) return;
ParallelSweepSpace(space, 0);
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 580c09c8d2..f6a362d596 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -111,7 +111,6 @@ class Sweeper {
void StartSweeping();
V8_EXPORT_PRIVATE void StartSweeperTasks();
void EnsureCompleted();
- void DrainSweepingWorklists();
void DrainSweepingWorklistForSpace(AllocationSpace space);
bool AreSweeperTasksRunning();
diff --git a/deps/v8/src/heap/third-party/heap-api-stub.cc b/deps/v8/src/heap/third-party/heap-api-stub.cc
index a89c52cd8b..3ca562fe58 100644
--- a/deps/v8/src/heap/third-party/heap-api-stub.cc
+++ b/deps/v8/src/heap/third-party/heap-api-stub.cc
@@ -48,14 +48,14 @@ bool Heap::InSpace(Address, AllocationSpace) { return false; }
bool Heap::InOldSpace(Address) { return false; }
// static
-bool Heap::InCodeSpace(Address) { return false; }
-
-// static
bool Heap::InReadOnlySpace(Address) { return false; }
// static
bool Heap::IsValidHeapObject(HeapObject) { return false; }
+// static
+bool Heap::IsValidCodeObject(HeapObject) { return false; }
+
bool Heap::CollectGarbage() { return false; }
} // namespace third_party_heap
diff --git a/deps/v8/src/heap/third-party/heap-api.h b/deps/v8/src/heap/third-party/heap-api.h
index 6bf72b85b8..9354c7bca8 100644
--- a/deps/v8/src/heap/third-party/heap-api.h
+++ b/deps/v8/src/heap/third-party/heap-api.h
@@ -34,8 +34,6 @@ class Heap {
static bool InOldSpace(Address address);
- static bool InCodeSpace(Address address);
-
static bool InReadOnlySpace(Address address);
static bool InLargeObjectSpace(Address address);
@@ -44,6 +42,8 @@ class Heap {
static bool IsImmovable(HeapObject object);
+ static bool IsValidCodeObject(HeapObject object);
+
void ResetIterator();
HeapObject NextObject();
diff --git a/deps/v8/src/heap/weak-object-worklists.cc b/deps/v8/src/heap/weak-object-worklists.cc
index 84df473076..8a36c3aef8 100644
--- a/deps/v8/src/heap/weak-object-worklists.cc
+++ b/deps/v8/src/heap/weak-object-worklists.cc
@@ -133,9 +133,9 @@ void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
DCHECK(!ContainsYoungObjects(weak_cells));
}
-void WeakObjects::UpdateBytecodeFlushingCandidates(
- WeakObjectWorklist<SharedFunctionInfo>& bytecode_flushing_candidates) {
- DCHECK(!ContainsYoungObjects(bytecode_flushing_candidates));
+void WeakObjects::UpdateCodeFlushingCandidates(
+ WeakObjectWorklist<SharedFunctionInfo>& code_flushing_candidates) {
+ DCHECK(!ContainsYoungObjects(code_flushing_candidates));
}
void WeakObjects::UpdateFlushedJSFunctions(
@@ -153,6 +153,21 @@ void WeakObjects::UpdateFlushedJSFunctions(
});
}
+void WeakObjects::UpdateBaselineFlushingCandidates(
+ WeakObjectWorklist<JSFunction>& baseline_flush_candidates) {
+ baseline_flush_candidates.Update(
+ [](JSFunction slot_in, JSFunction* slot_out) -> bool {
+ JSFunction forwarded = ForwardingAddress(slot_in);
+
+ if (!forwarded.is_null()) {
+ *slot_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+}
+
#ifdef DEBUG
template <typename Type>
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
diff --git a/deps/v8/src/heap/weak-object-worklists.h b/deps/v8/src/heap/weak-object-worklists.h
index 67df372b57..60e698e0a7 100644
--- a/deps/v8/src/heap/weak-object-worklists.h
+++ b/deps/v8/src/heap/weak-object-worklists.h
@@ -57,8 +57,8 @@ class TransitionArray;
F(HeapObjectAndCode, weak_objects_in_code, WeakObjectsInCode) \
F(JSWeakRef, js_weak_refs, JSWeakRefs) \
F(WeakCell, weak_cells, WeakCells) \
- F(SharedFunctionInfo, bytecode_flushing_candidates, \
- BytecodeFlushingCandidates) \
+ F(SharedFunctionInfo, code_flushing_candidates, CodeFlushingCandidates) \
+ F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
F(JSFunction, flushed_js_functions, FlushedJSFunctions)
class WeakObjects {
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index d5b432b363..285c266b80 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -216,8 +216,12 @@ Handle<Smi> StoreHandler::StoreSlow(Isolate* isolate,
}
Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
+ return handle(StoreProxy(), isolate);
+}
+
+Smi StoreHandler::StoreProxy() {
int config = KindBits::encode(kProxy);
- return handle(Smi::FromInt(config), isolate);
+ return Smi::FromInt(config);
}
Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 5d3a0ad569..2fc200f93e 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -354,6 +354,7 @@ class StoreHandler final : public DataHandler {
// Creates a Smi-handler for storing a property on a proxy.
static inline Handle<Smi> StoreProxy(Isolate* isolate);
+ static inline Smi StoreProxy();
// Decodes the KeyedAccessStoreMode from a {handler}.
static KeyedAccessStoreMode GetKeyedAccessStoreMode(MaybeObject handler);
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 047a74cfd3..a2b920a09d 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -427,7 +427,13 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name,
}
}
- if (MigrateDeprecated(isolate(), object)) use_ic = false;
+ // If we encounter an object with a deprecated map, we want to update the
+ // feedback vector with the migrated map.
+ // Mark ourselves as RECOMPUTE_HANDLER so that we don't turn megamorphic due
+ // to seeing the same map and handler.
+ if (MigrateDeprecated(isolate(), object)) {
+ UpdateState(object, name);
+ }
JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
update_lookup_start_object_map(object);
@@ -732,8 +738,8 @@ bool IC::IsTransitionOfMonomorphicTarget(Map source_map, Map target_map) {
if (more_general_transition) {
MapHandles map_list;
map_list.push_back(handle(target_map, isolate_));
- transitioned_map =
- source_map.FindElementsKindTransitionedMap(isolate(), map_list);
+ transitioned_map = source_map.FindElementsKindTransitionedMap(
+ isolate(), map_list, ConcurrencyMode::kNotConcurrent);
}
return transitioned_map == target_map;
}
@@ -1389,8 +1395,8 @@ void KeyedLoadIC::LoadElementPolymorphicHandlers(
// among receiver_maps as unstable because the optimizing compilers may
// generate an elements kind transition for this kind of receivers.
if (receiver_map->is_stable()) {
- Map tmap = receiver_map->FindElementsKindTransitionedMap(isolate(),
- *receiver_maps);
+ Map tmap = receiver_map->FindElementsKindTransitionedMap(
+ isolate(), *receiver_maps, ConcurrencyMode::kNotConcurrent);
if (!tmap.is_null()) {
receiver_map->NotifyLeafMapLayoutChange(isolate());
}
@@ -2238,8 +2244,8 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
} else {
{
- Map tmap = receiver_map->FindElementsKindTransitionedMap(isolate(),
- receiver_maps);
+ Map tmap = receiver_map->FindElementsKindTransitionedMap(
+ isolate(), receiver_maps, ConcurrencyMode::kNotConcurrent);
if (!tmap.is_null()) {
if (receiver_map->is_stable()) {
receiver_map->NotifyLeafMapLayoutChange(isolate());
@@ -2434,28 +2440,30 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
namespace {
-void StoreOwnElement(Isolate* isolate, Handle<JSArray> array,
- Handle<Object> index, Handle<Object> value) {
+Maybe<bool> StoreOwnElement(Isolate* isolate, Handle<JSArray> array,
+ Handle<Object> index, Handle<Object> value) {
DCHECK(index->IsNumber());
PropertyKey key(isolate, index);
LookupIterator it(isolate, array, key, LookupIterator::OWN);
- CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, value, NONE, Just(ShouldThrow::kThrowOnError))
- .FromJust());
+ MAYBE_RETURN(JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, value, NONE, Just(ShouldThrow::kThrowOnError)),
+ Nothing<bool>());
+ return Just(true);
}
} // namespace
-void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
- Handle<Object> value) {
+MaybeHandle<Object> StoreInArrayLiteralIC::Store(Handle<JSArray> array,
+ Handle<Object> index,
+ Handle<Object> value) {
DCHECK(!array->map().IsMapInArrayPrototypeChain(isolate()));
DCHECK(index->IsNumber());
if (!FLAG_use_ic || state() == NO_FEEDBACK ||
MigrateDeprecated(isolate(), array)) {
- StoreOwnElement(isolate(), array, index, value);
+ MAYBE_RETURN_NULL(StoreOwnElement(isolate(), array, index, value));
TraceIC("StoreInArrayLiteralIC", index);
- return;
+ return value;
}
// TODO(neis): Convert HeapNumber to Smi if possible?
@@ -2468,7 +2476,7 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
}
Handle<Map> old_array_map(array->map(), isolate());
- StoreOwnElement(isolate(), array, index, value);
+ MAYBE_RETURN_NULL(StoreOwnElement(isolate(), array, index, value));
if (index->IsSmi()) {
DCHECK(!old_array_map->is_abandoned_prototype_map());
@@ -2482,6 +2490,7 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
ConfigureVectorState(MEGAMORPHIC, index);
}
TraceIC("StoreInArrayLiteralIC", index);
+ return value;
}
// ----------------------------------------------------------------------------
@@ -2788,8 +2797,8 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
DCHECK(key->IsNumber());
StoreInArrayLiteralIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
- ic.Store(Handle<JSArray>::cast(receiver), key, value);
- return *value;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, ic.Store(Handle<JSArray>::cast(receiver), key, value));
}
}
@@ -2811,8 +2820,8 @@ RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Miss) {
DCHECK(key->IsNumber());
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
StoreInArrayLiteralIC ic(isolate, vector, vector_slot);
- ic.Store(Handle<JSArray>::cast(receiver), key, value);
- return *value;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, ic.Store(Handle<JSArray>::cast(receiver), key, value));
}
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
@@ -3004,11 +3013,13 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<Object> value = args.at(4);
HandleScope scope(isolate);
+#ifdef V8_RUNTIME_CALL_STATS
if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) {
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
StoreOrigin::kMaybeKeyed));
}
+#endif
DCHECK(info->IsCompatibleReceiver(*receiver));
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index c0243d400e..e97ddabf37 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -337,7 +337,8 @@ class StoreInArrayLiteralIC : public KeyedStoreIC {
DCHECK(IsStoreInArrayLiteralICKind(kind()));
}
- void Store(Handle<JSArray> array, Handle<Object> index, Handle<Object> value);
+ MaybeHandle<Object> Store(Handle<JSArray> array, Handle<Object> index,
+ Handle<Object> value);
};
} // namespace internal
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
index a55e214008..0fe9f8d9b4 100644
--- a/deps/v8/src/init/OWNERS
+++ b/deps/v8/src/init/OWNERS
@@ -5,3 +5,4 @@ jgruber@chromium.org
jkummerow@chromium.org
marja@chromium.org
verwaest@chromium.org
+syg@chromium.org
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 4028475955..326944e13e 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -57,6 +57,7 @@
#include "src/objects/js-segmenter.h"
#include "src/objects/js-segments.h"
#endif // V8_INTL_SUPPORT
+#include "src/codegen/script-details.h"
#include "src/objects/js-weak-refs.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/property-cell.h"
@@ -233,12 +234,7 @@ class Genesis {
#undef DECLARE_FEATURE_INITIALIZATION
void InitializeGlobal_regexp_linear_flag();
- enum ArrayBufferKind {
- ARRAY_BUFFER,
- SHARED_ARRAY_BUFFER,
- RESIZABLE_ARRAY_BUFFER,
- GROWABLE_SHARED_ARRAY_BUFFER
- };
+ enum ArrayBufferKind { ARRAY_BUFFER, SHARED_ARRAY_BUFFER };
Handle<JSFunction> CreateArrayBuffer(Handle<String> name,
ArrayBufferKind array_buffer_kind);
@@ -249,7 +245,7 @@ class Genesis {
ElementsKind elements_kind,
InstanceType type,
int rab_gsab_initial_map_index);
- void InitializeNormalizedMapCaches();
+ void InitializeMapCaches();
enum ExtensionTraversalState { UNVISITED, VISITED, INSTALLED };
@@ -1441,11 +1437,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
JSObject::AddProperty(isolate, prototype, factory->message_string(),
factory->empty_string(), DONT_ENUM);
- if (FLAG_harmony_error_cause) {
- JSObject::AddProperty(isolate, prototype, factory->cause_string(),
- factory->undefined_value(), DONT_ENUM);
- }
-
if (context_index == Context::ERROR_FUNCTION_INDEX) {
Handle<JSFunction> to_string_fun =
SimpleInstallFunction(isolate, prototype, "toString",
@@ -1741,8 +1732,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::AddProperty(isolate_, proto, factory->constructor_string(),
array_function, DONT_ENUM);
- SimpleInstallFunction(isolate_, proto, "concat", Builtin::kArrayConcat, 1,
- false);
+ SimpleInstallFunction(isolate_, proto, "concat",
+ Builtin::kArrayPrototypeConcat, 1, false);
SimpleInstallFunction(isolate_, proto, "copyWithin",
Builtin::kArrayPrototypeCopyWithin, 2, false);
SimpleInstallFunction(isolate_, proto, "fill", Builtin::kArrayPrototypeFill,
@@ -2471,6 +2462,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtin::kRegExpPrototypeFlagsGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->global_string(),
Builtin::kRegExpPrototypeGlobalGetter, true);
+ SimpleInstallGetter(isolate(), prototype, factory->has_indices_string(),
+ Builtin::kRegExpPrototypeHasIndicesGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->ignoreCase_string(),
Builtin::kRegExpPrototypeIgnoreCaseGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->multiline_string(),
@@ -2698,6 +2691,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// -- R u n t i m e E r r o r
InstallError(isolate_, dummy, factory->RuntimeError_string(),
Context::WASM_RUNTIME_ERROR_FUNCTION_INDEX);
+
+ // -- W e b A s s e m b l y . E x c e p t i o n
+ InstallError(isolate_, dummy, factory->WebAssemblyException_string(),
+ Context::WASM_EXCEPTION_ERROR_FUNCTION_INDEX);
}
// Initialize the embedder data slot.
@@ -3283,25 +3280,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(isolate_, shared_array_buffer_fun);
}
- { // R e s i z a b l e A r r a y B u f f e r
- Handle<String> name = factory->ResizableArrayBuffer_string();
- Handle<JSFunction> resizable_array_buffer_fun =
- CreateArrayBuffer(name, RESIZABLE_ARRAY_BUFFER);
- InstallWithIntrinsicDefaultProto(isolate_, resizable_array_buffer_fun,
- Context::RESIZABLE_ARRAY_BUFFER_FUN_INDEX);
- InstallSpeciesGetter(isolate_, resizable_array_buffer_fun);
- }
-
- { // G r o w a b l e S h a r e d A r r a y B u f f e r
- Handle<String> name = factory->GrowableSharedArrayBuffer_string();
- Handle<JSFunction> growable_shared_array_buffer_fun =
- CreateArrayBuffer(name, GROWABLE_SHARED_ARRAY_BUFFER);
- InstallWithIntrinsicDefaultProto(
- isolate_, growable_shared_array_buffer_fun,
- Context::GROWABLE_SHARED_ARRAY_BUFFER_FUN_INDEX);
- InstallSpeciesGetter(isolate_, growable_shared_array_buffer_fun);
- }
-
{ // -- A t o m i c s
Handle<JSObject> atomics_object =
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
@@ -4152,8 +4130,7 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- isolate, source, Compiler::ScriptDetails(script_name),
- ScriptOriginOptions(), extension, nullptr,
+ isolate, source, ScriptDetails(script_name), extension, nullptr,
ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
@@ -4324,6 +4301,10 @@ void Genesis::InitializeIteratorFunctions() {
native_context->async_function_map(), kReleaseStore);
async_function_constructor->shared().DontAdaptArguments();
async_function_constructor->shared().set_length(1);
+ InstallWithIntrinsicDefaultProto(
+ isolate, async_function_constructor,
+ Context::ASYNC_FUNCTION_FUNCTION_INDEX);
+
native_context->set_async_function_constructor(*async_function_constructor);
JSObject::ForceSetPrototype(isolate, async_function_constructor,
isolate->function_function());
@@ -4428,6 +4409,39 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_more_timezone)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
+void Genesis::InitializeGlobal_harmony_array_find_last() {
+ if (!FLAG_harmony_array_find_last) return;
+
+ {
+ Handle<JSFunction> array_function(native_context()->array_function(),
+ isolate());
+ Handle<JSObject> array_prototype(
+ JSObject::cast(array_function->instance_prototype()), isolate());
+
+ SimpleInstallFunction(isolate_, array_prototype, "findLast",
+ Builtin::kArrayPrototypeFindLast, 1, false);
+ SimpleInstallFunction(isolate_, array_prototype, "findLastIndex",
+ Builtin::kArrayPrototypeFindLastIndex, 1, false);
+
+ Handle<JSObject> unscopables = Handle<JSObject>::cast(
+ JSObject::GetProperty(isolate(), array_prototype,
+ isolate()->factory()->unscopables_symbol())
+ .ToHandleChecked());
+
+ InstallTrueValuedProperty(isolate_, unscopables, "findLast");
+ InstallTrueValuedProperty(isolate_, unscopables, "findLastIndex");
+ }
+
+ {
+ Handle<JSObject> prototype(native_context()->typed_array_prototype(),
+ isolate());
+ SimpleInstallFunction(isolate_, prototype, "findLast",
+ Builtin::kTypedArrayPrototypeFindLast, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "findLastIndex",
+ Builtin::kTypedArrayPrototypeFindLastIndex, 1, false);
+ }
+}
+
void Genesis::InitializeGlobal_harmony_object_has_own() {
if (!FLAG_harmony_object_has_own) return;
@@ -4473,36 +4487,6 @@ void Genesis::InitializeGlobal_harmony_weak_refs_with_cleanup_some() {
DONT_ENUM);
}
-void Genesis::InitializeGlobal_harmony_regexp_match_indices() {
- if (!FLAG_harmony_regexp_match_indices) return;
-
- Handle<Map> source_map(native_context()->regexp_result_map(), isolate());
- Handle<Map> initial_map =
- Map::Copy(isolate(), source_map, "JSRegExpResult with indices");
- initial_map->set_instance_size(JSRegExpResultWithIndices::kSize);
- DCHECK_EQ(initial_map->GetInObjectProperties(),
- JSRegExpResultWithIndices::kInObjectPropertyCount);
-
- // indices descriptor
- {
- Descriptor d =
- Descriptor::DataField(isolate(), factory()->indices_string(),
- JSRegExpResultWithIndices::kIndicesIndex, NONE,
- Representation::Tagged());
- Map::EnsureDescriptorSlack(isolate(), initial_map, 1);
- initial_map->AppendDescriptor(isolate(), &d);
- }
-
- native_context()->set_regexp_result_with_indices_map(*initial_map);
-
- Handle<JSObject> prototype(native_context()->regexp_prototype(), isolate());
- SimpleInstallGetter(isolate(), prototype, factory()->has_indices_string(),
- Builtin::kRegExpPrototypeHasIndicesGetter, true);
-
- // Store regexp prototype map again after change.
- native_context()->set_regexp_prototype_map(prototype->map());
-}
-
void Genesis::InitializeGlobal_regexp_linear_flag() {
if (!FLAG_enable_experimental_regexp_engine) return;
@@ -4557,6 +4541,35 @@ void Genesis::InitializeGlobal_harmony_relative_indexing_methods() {
}
}
+void Genesis::InitializeGlobal_harmony_rab_gsab() {
+ if (!FLAG_harmony_rab_gsab) return;
+ Handle<JSObject> array_buffer_prototype(
+ JSObject::cast(native_context()->array_buffer_fun().instance_prototype()),
+ isolate());
+ SimpleInstallGetter(isolate(), array_buffer_prototype,
+ factory()->max_byte_length_string(),
+ Builtin::kArrayBufferPrototypeGetMaxByteLength, false);
+ SimpleInstallGetter(isolate(), array_buffer_prototype,
+ factory()->resizable_string(),
+ Builtin::kArrayBufferPrototypeGetResizable, false);
+ SimpleInstallFunction(isolate(), array_buffer_prototype, "resize",
+ Builtin::kArrayBufferPrototypeResize, 1, true);
+
+ Handle<JSObject> shared_array_buffer_prototype(
+ JSObject::cast(
+ native_context()->shared_array_buffer_fun().instance_prototype()),
+ isolate());
+ SimpleInstallGetter(isolate(), shared_array_buffer_prototype,
+ factory()->max_byte_length_string(),
+ Builtin::kSharedArrayBufferPrototypeGetMaxByteLength,
+ false);
+ SimpleInstallGetter(isolate(), shared_array_buffer_prototype,
+ factory()->growable_string(),
+ Builtin::kSharedArrayBufferPrototypeGetGrowable, false);
+ SimpleInstallFunction(isolate(), shared_array_buffer_prototype, "grow",
+ Builtin::kSharedArrayBufferPrototypeGrow, 1, true);
+}
+
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_intl_locale_info() {
@@ -4583,19 +4596,6 @@ void Genesis::InitializeGlobal_harmony_intl_locale_info() {
#endif // V8_INTL_SUPPORT
-void Genesis::InitializeGlobal_harmony_rab_gsab() {
- if (!FLAG_harmony_rab_gsab) return;
-
- Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
-
- JSObject::AddProperty(isolate_, global, "ResizableArrayBuffer",
- isolate()->resizable_array_buffer_fun(), DONT_ENUM);
-
- JSObject::AddProperty(isolate_, global, "GrowableSharedArrayBuffer",
- isolate()->growable_shared_array_buffer_fun(),
- DONT_ENUM);
-}
-
Handle<JSFunction> Genesis::CreateArrayBuffer(
Handle<String> name, ArrayBufferKind array_buffer_kind) {
// Create the %ArrayBufferPrototype%
@@ -4624,7 +4624,6 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
// Install the "byteLength" getter on the {prototype}.
SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
Builtin::kArrayBufferPrototypeGetByteLength, false);
-
SimpleInstallFunction(isolate(), prototype, "slice",
Builtin::kArrayBufferPrototypeSlice, 2, true);
break;
@@ -4634,32 +4633,9 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
Builtin::kSharedArrayBufferPrototypeGetByteLength,
false);
-
SimpleInstallFunction(isolate(), prototype, "slice",
Builtin::kSharedArrayBufferPrototypeSlice, 2, true);
break;
- case RESIZABLE_ARRAY_BUFFER:
- SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
- Builtin::kResizableArrayBufferPrototypeGetByteLength,
- false);
- SimpleInstallGetter(
- isolate(), prototype, factory()->max_byte_length_string(),
- Builtin::kResizableArrayBufferPrototypeGetMaxByteLength, false);
- SimpleInstallFunction(isolate(), prototype, "resize",
- Builtin::kResizableArrayBufferPrototypeResize, 1,
- true);
- break;
- case GROWABLE_SHARED_ARRAY_BUFFER:
- SimpleInstallGetter(
- isolate(), prototype, factory()->byte_length_string(),
- Builtin::kGrowableSharedArrayBufferPrototypeGetByteLength, true);
- SimpleInstallGetter(
- isolate(), prototype, factory()->max_byte_length_string(),
- Builtin::kGrowableSharedArrayBufferPrototypeGetMaxByteLength, false);
- SimpleInstallFunction(isolate(), prototype, "grow",
- Builtin::kGrowableSharedArrayBufferPrototypeGrow, 1,
- true);
- break;
}
return array_buffer_fun;
@@ -4922,7 +4898,27 @@ bool Genesis::InstallABunchOfRandomThings() {
}
}
+ // Set up the map for RegExp results objects for regexps with the /d flag.
+ Handle<Map> initial_with_indices_map =
+ Map::Copy(isolate(), initial_map, "JSRegExpResult with indices");
+ initial_with_indices_map->set_instance_size(
+ JSRegExpResultWithIndices::kSize);
+ DCHECK_EQ(initial_with_indices_map->GetInObjectProperties(),
+ JSRegExpResultWithIndices::kInObjectPropertyCount);
+
+ // indices descriptor
+ {
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory()->indices_string(),
+ JSRegExpResultWithIndices::kIndicesIndex, NONE,
+ Representation::Tagged());
+ Map::EnsureDescriptorSlack(isolate(), initial_with_indices_map, 1);
+ initial_with_indices_map->AppendDescriptor(isolate(), &d);
+ }
+
native_context()->set_regexp_result_map(*initial_map);
+ native_context()->set_regexp_result_with_indices_map(
+ *initial_with_indices_map);
}
// Create a constructor for JSRegExpResultIndices (a variant of Array that
@@ -5009,9 +5005,23 @@ bool Genesis::InstallExtrasBindings() {
return true;
}
-void Genesis::InitializeNormalizedMapCaches() {
- Handle<NormalizedMapCache> cache = NormalizedMapCache::New(isolate());
- native_context()->set_normalized_map_cache(*cache);
+void Genesis::InitializeMapCaches() {
+ {
+ Handle<NormalizedMapCache> cache = NormalizedMapCache::New(isolate());
+ native_context()->set_normalized_map_cache(*cache);
+ }
+
+ {
+ Handle<WeakFixedArray> cache = factory()->NewWeakFixedArray(
+ JSObject::kMapCacheSize, AllocationType::kOld);
+
+ DisallowGarbageCollection no_gc;
+ native_context()->set_map_cache(*cache);
+ Map initial = native_context()->object_function().initial_map();
+ cache->Set(0, HeapObjectReference::Weak(initial), SKIP_WRITE_BARRIER);
+ cache->Set(initial.GetInObjectProperties(),
+ HeapObjectReference::Weak(initial), SKIP_WRITE_BARRIER);
+ }
}
bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
@@ -5489,8 +5499,8 @@ Genesis::Genesis(
CreateAsyncFunctionMaps(empty_function);
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
+ InitializeMapCaches();
InitializeGlobal(global_object, empty_function);
- InitializeNormalizedMapCaches();
InitializeIteratorFunctions();
InitializeCallSiteBuiltins();
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 6033c49122..d4737bf331 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -133,226 +133,227 @@
#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _)
#endif // V8_INTL_SUPPORT
-#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
- INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
- V(_, add_string, "add") \
- V(_, AggregateError_string, "AggregateError") \
- V(_, always_string, "always") \
- V(_, anonymous_function_string, "(anonymous function)") \
- V(_, anonymous_string, "anonymous") \
- V(_, apply_string, "apply") \
- V(_, Arguments_string, "Arguments") \
- V(_, arguments_string, "arguments") \
- V(_, arguments_to_string, "[object Arguments]") \
- V(_, Array_string, "Array") \
- V(_, array_to_string, "[object Array]") \
- V(_, ArrayBuffer_string, "ArrayBuffer") \
- V(_, ArrayIterator_string, "Array Iterator") \
- V(_, as_string, "as") \
- V(_, assert_string, "assert") \
- V(_, async_string, "async") \
- V(_, auto_string, "auto") \
- V(_, await_string, "await") \
- V(_, BigInt_string, "BigInt") \
- V(_, bigint_string, "bigint") \
- V(_, BigInt64Array_string, "BigInt64Array") \
- V(_, BigUint64Array_string, "BigUint64Array") \
- V(_, bind_string, "bind") \
- V(_, Boolean_string, "Boolean") \
- V(_, boolean_string, "boolean") \
- V(_, boolean_to_string, "[object Boolean]") \
- V(_, bound__string, "bound ") \
- V(_, buffer_string, "buffer") \
- V(_, byte_length_string, "byteLength") \
- V(_, byte_offset_string, "byteOffset") \
- V(_, CompileError_string, "CompileError") \
- V(_, callee_string, "callee") \
- V(_, caller_string, "caller") \
- V(_, cause_string, "cause") \
- V(_, character_string, "character") \
- V(_, closure_string, "(closure)") \
- V(_, code_string, "code") \
- V(_, column_string, "column") \
- V(_, computed_string, "<computed>") \
- V(_, configurable_string, "configurable") \
- V(_, conjunction_string, "conjunction") \
- V(_, construct_string, "construct") \
- V(_, constructor_string, "constructor") \
- V(_, current_string, "current") \
- V(_, Date_string, "Date") \
- V(_, date_to_string, "[object Date]") \
- V(_, default_string, "default") \
- V(_, defineProperty_string, "defineProperty") \
- V(_, deleteProperty_string, "deleteProperty") \
- V(_, disjunction_string, "disjunction") \
- V(_, done_string, "done") \
- V(_, dot_brand_string, ".brand") \
- V(_, dot_catch_string, ".catch") \
- V(_, dot_default_string, ".default") \
- V(_, dot_for_string, ".for") \
- V(_, dot_generator_object_string, ".generator_object") \
- V(_, dot_home_object_string, ".home_object") \
- V(_, dot_result_string, ".result") \
- V(_, dot_repl_result_string, ".repl_result") \
- V(_, dot_static_home_object_string, "._static_home_object") \
- V(_, dot_string, ".") \
- V(_, dot_switch_tag_string, ".switch_tag") \
- V(_, dotAll_string, "dotAll") \
- V(_, enumerable_string, "enumerable") \
- V(_, element_string, "element") \
- V(_, Error_string, "Error") \
- V(_, errors_string, "errors") \
- V(_, error_to_string, "[object Error]") \
- V(_, eval_string, "eval") \
- V(_, EvalError_string, "EvalError") \
- V(_, exec_string, "exec") \
- V(_, false_string, "false") \
- V(_, FinalizationRegistry_string, "FinalizationRegistry") \
- V(_, flags_string, "flags") \
- V(_, Float32Array_string, "Float32Array") \
- V(_, Float64Array_string, "Float64Array") \
- V(_, from_string, "from") \
- V(_, Function_string, "Function") \
- V(_, function_native_code_string, "function () { [native code] }") \
- V(_, function_string, "function") \
- V(_, function_to_string, "[object Function]") \
- V(_, Generator_string, "Generator") \
- V(_, get_space_string, "get ") \
- V(_, get_string, "get") \
- V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(_, getPrototypeOf_string, "getPrototypeOf") \
- V(_, global_string, "global") \
- V(_, globalThis_string, "globalThis") \
- V(_, groups_string, "groups") \
- V(_, GrowableSharedArrayBuffer_string, "GrowableSharedArrayBuffer") \
- V(_, has_string, "has") \
- V(_, has_indices_string, "hasIndices") \
- V(_, ignoreCase_string, "ignoreCase") \
- V(_, illegal_access_string, "illegal access") \
- V(_, illegal_argument_string, "illegal argument") \
- V(_, index_string, "index") \
- V(_, indices_string, "indices") \
- V(_, Infinity_string, "Infinity") \
- V(_, infinity_string, "infinity") \
- V(_, input_string, "input") \
- V(_, Int16Array_string, "Int16Array") \
- V(_, Int32Array_string, "Int32Array") \
- V(_, Int8Array_string, "Int8Array") \
- V(_, isExtensible_string, "isExtensible") \
- V(_, jsMemoryEstimate_string, "jsMemoryEstimate") \
- V(_, jsMemoryRange_string, "jsMemoryRange") \
- V(_, keys_string, "keys") \
- V(_, lastIndex_string, "lastIndex") \
- V(_, length_string, "length") \
- V(_, let_string, "let") \
- V(_, line_string, "line") \
- V(_, linear_string, "linear") \
- V(_, LinkError_string, "LinkError") \
- V(_, long_string, "long") \
- V(_, Map_string, "Map") \
- V(_, MapIterator_string, "Map Iterator") \
- V(_, max_byte_length_string, "maxByteLength") \
- V(_, medium_string, "medium") \
- V(_, message_string, "message") \
- V(_, meta_string, "meta") \
- V(_, minus_Infinity_string, "-Infinity") \
- V(_, Module_string, "Module") \
- V(_, multiline_string, "multiline") \
- V(_, name_string, "name") \
- V(_, NaN_string, "NaN") \
- V(_, narrow_string, "narrow") \
- V(_, native_string, "native") \
- V(_, new_target_string, ".new.target") \
- V(_, next_string, "next") \
- V(_, NFC_string, "NFC") \
- V(_, NFD_string, "NFD") \
- V(_, NFKC_string, "NFKC") \
- V(_, NFKD_string, "NFKD") \
- V(_, not_equal_string, "not-equal") \
- V(_, null_string, "null") \
- V(_, null_to_string, "[object Null]") \
- V(_, Number_string, "Number") \
- V(_, number_string, "number") \
- V(_, number_to_string, "[object Number]") \
- V(_, Object_string, "Object") \
- V(_, object_string, "object") \
- V(_, object_to_string, "[object Object]") \
- V(_, of_string, "of") \
- V(_, ok_string, "ok") \
- V(_, one_string, "1") \
- V(_, other_string, "other") \
- V(_, ownKeys_string, "ownKeys") \
- V(_, percent_string, "percent") \
- V(_, position_string, "position") \
- V(_, preventExtensions_string, "preventExtensions") \
- V(_, private_constructor_string, "#constructor") \
- V(_, Promise_string, "Promise") \
- V(_, proto_string, "__proto__") \
- V(_, prototype_string, "prototype") \
- V(_, proxy_string, "proxy") \
- V(_, Proxy_string, "Proxy") \
- V(_, query_colon_string, "(?:)") \
- V(_, RangeError_string, "RangeError") \
- V(_, raw_string, "raw") \
- V(_, ReferenceError_string, "ReferenceError") \
- V(_, ReflectGet_string, "Reflect.get") \
- V(_, ReflectHas_string, "Reflect.has") \
- V(_, RegExp_string, "RegExp") \
- V(_, regexp_to_string, "[object RegExp]") \
- V(_, ResizableArrayBuffer_string, "ResizableArrayBuffer") \
- V(_, resolve_string, "resolve") \
- V(_, return_string, "return") \
- V(_, revoke_string, "revoke") \
- V(_, RuntimeError_string, "RuntimeError") \
- V(_, Script_string, "Script") \
- V(_, script_string, "script") \
- V(_, short_string, "short") \
- V(_, Set_string, "Set") \
- V(_, sentence_string, "sentence") \
- V(_, set_space_string, "set ") \
- V(_, set_string, "set") \
- V(_, SetIterator_string, "Set Iterator") \
- V(_, setPrototypeOf_string, "setPrototypeOf") \
- V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
- V(_, source_string, "source") \
- V(_, sourceText_string, "sourceText") \
- V(_, stack_string, "stack") \
- V(_, stackTraceLimit_string, "stackTraceLimit") \
- V(_, sticky_string, "sticky") \
- V(_, String_string, "String") \
- V(_, string_string, "string") \
- V(_, string_to_string, "[object String]") \
- V(_, symbol_species_string, "[Symbol.species]") \
- V(_, Symbol_string, "Symbol") \
- V(_, symbol_string, "symbol") \
- V(_, SyntaxError_string, "SyntaxError") \
- V(_, target_string, "target") \
- V(_, then_string, "then") \
- V(_, this_function_string, ".this_function") \
- V(_, this_string, "this") \
- V(_, throw_string, "throw") \
- V(_, timed_out_string, "timed-out") \
- V(_, toJSON_string, "toJSON") \
- V(_, toString_string, "toString") \
- V(_, true_string, "true") \
- V(_, total_string, "total") \
- V(_, TypeError_string, "TypeError") \
- V(_, Uint16Array_string, "Uint16Array") \
- V(_, Uint32Array_string, "Uint32Array") \
- V(_, Uint8Array_string, "Uint8Array") \
- V(_, Uint8ClampedArray_string, "Uint8ClampedArray") \
- V(_, undefined_string, "undefined") \
- V(_, undefined_to_string, "[object Undefined]") \
- V(_, unicode_string, "unicode") \
- V(_, URIError_string, "URIError") \
- V(_, value_string, "value") \
- V(_, valueOf_string, "valueOf") \
- V(_, WeakMap_string, "WeakMap") \
- V(_, WeakRef_string, "WeakRef") \
- V(_, WeakSet_string, "WeakSet") \
- V(_, week_string, "week") \
- V(_, word_string, "word") \
- V(_, writable_string, "writable") \
+#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
+ V(_, add_string, "add") \
+ V(_, AggregateError_string, "AggregateError") \
+ V(_, always_string, "always") \
+ V(_, anonymous_function_string, "(anonymous function)") \
+ V(_, anonymous_string, "anonymous") \
+ V(_, apply_string, "apply") \
+ V(_, Arguments_string, "Arguments") \
+ V(_, arguments_string, "arguments") \
+ V(_, arguments_to_string, "[object Arguments]") \
+ V(_, Array_string, "Array") \
+ V(_, array_to_string, "[object Array]") \
+ V(_, ArrayBuffer_string, "ArrayBuffer") \
+ V(_, ArrayIterator_string, "Array Iterator") \
+ V(_, as_string, "as") \
+ V(_, assert_string, "assert") \
+ V(_, async_string, "async") \
+ V(_, auto_string, "auto") \
+ V(_, await_string, "await") \
+ V(_, BigInt_string, "BigInt") \
+ V(_, bigint_string, "bigint") \
+ V(_, BigInt64Array_string, "BigInt64Array") \
+ V(_, BigUint64Array_string, "BigUint64Array") \
+ V(_, bind_string, "bind") \
+ V(_, Boolean_string, "Boolean") \
+ V(_, boolean_string, "boolean") \
+ V(_, boolean_to_string, "[object Boolean]") \
+ V(_, bound__string, "bound ") \
+ V(_, buffer_string, "buffer") \
+ V(_, byte_length_string, "byteLength") \
+ V(_, byte_offset_string, "byteOffset") \
+ V(_, CompileError_string, "CompileError") \
+ V(_, callee_string, "callee") \
+ V(_, caller_string, "caller") \
+ V(_, cause_string, "cause") \
+ V(_, character_string, "character") \
+ V(_, closure_string, "(closure)") \
+ V(_, code_string, "code") \
+ V(_, column_string, "column") \
+ V(_, computed_string, "<computed>") \
+ V(_, configurable_string, "configurable") \
+ V(_, conjunction_string, "conjunction") \
+ V(_, construct_string, "construct") \
+ V(_, constructor_string, "constructor") \
+ V(_, current_string, "current") \
+ V(_, Date_string, "Date") \
+ V(_, date_to_string, "[object Date]") \
+ V(_, default_string, "default") \
+ V(_, defineProperty_string, "defineProperty") \
+ V(_, deleteProperty_string, "deleteProperty") \
+ V(_, disjunction_string, "disjunction") \
+ V(_, done_string, "done") \
+ V(_, dot_brand_string, ".brand") \
+ V(_, dot_catch_string, ".catch") \
+ V(_, dot_default_string, ".default") \
+ V(_, dot_for_string, ".for") \
+ V(_, dot_generator_object_string, ".generator_object") \
+ V(_, dot_home_object_string, ".home_object") \
+ V(_, dot_result_string, ".result") \
+ V(_, dot_repl_result_string, ".repl_result") \
+ V(_, dot_static_home_object_string, "._static_home_object") \
+ V(_, dot_string, ".") \
+ V(_, dot_switch_tag_string, ".switch_tag") \
+ V(_, dotAll_string, "dotAll") \
+ V(_, enumerable_string, "enumerable") \
+ V(_, element_string, "element") \
+ V(_, Error_string, "Error") \
+ V(_, errors_string, "errors") \
+ V(_, error_to_string, "[object Error]") \
+ V(_, eval_string, "eval") \
+ V(_, EvalError_string, "EvalError") \
+ V(_, exec_string, "exec") \
+ V(_, false_string, "false") \
+ V(_, FinalizationRegistry_string, "FinalizationRegistry") \
+ V(_, flags_string, "flags") \
+ V(_, Float32Array_string, "Float32Array") \
+ V(_, Float64Array_string, "Float64Array") \
+ V(_, from_string, "from") \
+ V(_, Function_string, "Function") \
+ V(_, function_native_code_string, "function () { [native code] }") \
+ V(_, function_string, "function") \
+ V(_, function_to_string, "[object Function]") \
+ V(_, Generator_string, "Generator") \
+ V(_, get_space_string, "get ") \
+ V(_, get_string, "get") \
+ V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(_, getPrototypeOf_string, "getPrototypeOf") \
+ V(_, global_string, "global") \
+ V(_, globalThis_string, "globalThis") \
+ V(_, groups_string, "groups") \
+ V(_, growable_string, "growable") \
+ V(_, has_string, "has") \
+ V(_, has_indices_string, "hasIndices") \
+ V(_, ignoreCase_string, "ignoreCase") \
+ V(_, illegal_access_string, "illegal access") \
+ V(_, illegal_argument_string, "illegal argument") \
+ V(_, index_string, "index") \
+ V(_, indices_string, "indices") \
+ V(_, Infinity_string, "Infinity") \
+ V(_, infinity_string, "infinity") \
+ V(_, input_string, "input") \
+ V(_, Int16Array_string, "Int16Array") \
+ V(_, Int32Array_string, "Int32Array") \
+ V(_, Int8Array_string, "Int8Array") \
+ V(_, isExtensible_string, "isExtensible") \
+ V(_, jsMemoryEstimate_string, "jsMemoryEstimate") \
+ V(_, jsMemoryRange_string, "jsMemoryRange") \
+ V(_, keys_string, "keys") \
+ V(_, lastIndex_string, "lastIndex") \
+ V(_, length_string, "length") \
+ V(_, let_string, "let") \
+ V(_, line_string, "line") \
+ V(_, linear_string, "linear") \
+ V(_, LinkError_string, "LinkError") \
+ V(_, long_string, "long") \
+ V(_, Map_string, "Map") \
+ V(_, MapIterator_string, "Map Iterator") \
+ V(_, max_byte_length_string, "maxByteLength") \
+ V(_, medium_string, "medium") \
+ V(_, message_string, "message") \
+ V(_, meta_string, "meta") \
+ V(_, minus_Infinity_string, "-Infinity") \
+ V(_, Module_string, "Module") \
+ V(_, multiline_string, "multiline") \
+ V(_, name_string, "name") \
+ V(_, NaN_string, "NaN") \
+ V(_, narrow_string, "narrow") \
+ V(_, native_string, "native") \
+ V(_, new_target_string, ".new.target") \
+ V(_, next_string, "next") \
+ V(_, NFC_string, "NFC") \
+ V(_, NFD_string, "NFD") \
+ V(_, NFKC_string, "NFKC") \
+ V(_, NFKD_string, "NFKD") \
+ V(_, not_equal_string, "not-equal") \
+ V(_, null_string, "null") \
+ V(_, null_to_string, "[object Null]") \
+ V(_, Number_string, "Number") \
+ V(_, number_string, "number") \
+ V(_, number_to_string, "[object Number]") \
+ V(_, Object_string, "Object") \
+ V(_, object_string, "object") \
+ V(_, object_to_string, "[object Object]") \
+ V(_, of_string, "of") \
+ V(_, ok_string, "ok") \
+ V(_, one_string, "1") \
+ V(_, other_string, "other") \
+ V(_, ownKeys_string, "ownKeys") \
+ V(_, percent_string, "percent") \
+ V(_, position_string, "position") \
+ V(_, preventExtensions_string, "preventExtensions") \
+ V(_, private_constructor_string, "#constructor") \
+ V(_, Promise_string, "Promise") \
+ V(_, proto_string, "__proto__") \
+ V(_, prototype_string, "prototype") \
+ V(_, proxy_string, "proxy") \
+ V(_, Proxy_string, "Proxy") \
+ V(_, query_colon_string, "(?:)") \
+ V(_, RangeError_string, "RangeError") \
+ V(_, raw_string, "raw") \
+ V(_, ReferenceError_string, "ReferenceError") \
+ V(_, ReflectGet_string, "Reflect.get") \
+ V(_, ReflectHas_string, "Reflect.has") \
+ V(_, RegExp_string, "RegExp") \
+ V(_, regexp_to_string, "[object RegExp]") \
+ V(_, resizable_string, "resizable") \
+ V(_, resolve_string, "resolve") \
+ V(_, return_string, "return") \
+ V(_, revoke_string, "revoke") \
+ V(_, RuntimeError_string, "RuntimeError") \
+ V(_, WebAssemblyException_string, "WebAssembly.Exception") \
+ V(_, Script_string, "Script") \
+ V(_, script_string, "script") \
+ V(_, short_string, "short") \
+ V(_, Set_string, "Set") \
+ V(_, sentence_string, "sentence") \
+ V(_, set_space_string, "set ") \
+ V(_, set_string, "set") \
+ V(_, SetIterator_string, "Set Iterator") \
+ V(_, setPrototypeOf_string, "setPrototypeOf") \
+ V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
+ V(_, source_string, "source") \
+ V(_, sourceText_string, "sourceText") \
+ V(_, stack_string, "stack") \
+ V(_, stackTraceLimit_string, "stackTraceLimit") \
+ V(_, sticky_string, "sticky") \
+ V(_, String_string, "String") \
+ V(_, string_string, "string") \
+ V(_, string_to_string, "[object String]") \
+ V(_, symbol_species_string, "[Symbol.species]") \
+ V(_, Symbol_string, "Symbol") \
+ V(_, symbol_string, "symbol") \
+ V(_, SyntaxError_string, "SyntaxError") \
+ V(_, target_string, "target") \
+ V(_, then_string, "then") \
+ V(_, this_function_string, ".this_function") \
+ V(_, this_string, "this") \
+ V(_, throw_string, "throw") \
+ V(_, timed_out_string, "timed-out") \
+ V(_, toJSON_string, "toJSON") \
+ V(_, toString_string, "toString") \
+ V(_, true_string, "true") \
+ V(_, total_string, "total") \
+ V(_, TypeError_string, "TypeError") \
+ V(_, Uint16Array_string, "Uint16Array") \
+ V(_, Uint32Array_string, "Uint32Array") \
+ V(_, Uint8Array_string, "Uint8Array") \
+ V(_, Uint8ClampedArray_string, "Uint8ClampedArray") \
+ V(_, undefined_string, "undefined") \
+ V(_, undefined_to_string, "[object Undefined]") \
+ V(_, unicode_string, "unicode") \
+ V(_, URIError_string, "URIError") \
+ V(_, value_string, "value") \
+ V(_, valueOf_string, "valueOf") \
+ V(_, WeakMap_string, "WeakMap") \
+ V(_, WeakRef_string, "WeakRef") \
+ V(_, WeakSet_string, "WeakSet") \
+ V(_, week_string, "week") \
+ V(_, word_string, "word") \
+ V(_, writable_string, "writable") \
V(_, zero_string, "0")
#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 5f7a83d5d3..70367d0697 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -160,12 +160,6 @@ void V8::InitializeOncePerProcessImpl() {
DISABLE_FLAG(trace_turbo_stack_accesses);
}
- if (FLAG_regexp_interpret_all && FLAG_regexp_tier_up) {
- // Turning off the tier-up strategy, because the --regexp-interpret-all and
- // --regexp-tier-up flags are incompatible.
- DISABLE_FLAG(regexp_tier_up);
- }
-
// The --jitless and --interpreted-frames-native-stack flags are incompatible
// since the latter requires code generation while the former prohibits code
// generation.
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index afe790fb62..93a73f2580 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -276,7 +276,7 @@ static String16 identifierFromTitleOrStackTrace(
String16 identifier;
if (title.isEmpty()) {
std::unique_ptr<V8StackTraceImpl> stackTrace =
- V8StackTraceImpl::capture(inspector->debugger(), helper.groupId(), 1);
+ V8StackTraceImpl::capture(inspector->debugger(), 1);
if (stackTrace && !stackTrace->isEmpty()) {
identifier = toString16(stackTrace->topSourceURL()) + ":" +
String16::fromInteger(stackTrace->topLineNumber());
@@ -591,8 +591,8 @@ static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
hints->setBoolean("queryObjects", true);
}
if (V8InspectorSessionImpl* session = helper.session(sessionId)) {
- session->runtimeAgent()->inspect(std::move(wrappedObject),
- std::move(hints));
+ session->runtimeAgent()->inspect(std::move(wrappedObject), std::move(hints),
+ helper.contextId());
}
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index f3666f749d..c49903f8c3 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -1618,7 +1618,7 @@ void V8DebuggerAgentImpl::didParseSource(
hasSourceURLComment ? &hasSourceURLComment : nullptr;
const bool* isModuleParam = isModule ? &isModule : nullptr;
std::unique_ptr<V8StackTraceImpl> stack =
- V8StackTraceImpl::capture(m_inspector->debugger(), contextGroupId, 1);
+ V8StackTraceImpl::capture(m_inspector->debugger(), 1);
std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
stack && !stack->isEmpty()
? stack->buildInspectorObjectImpl(m_debugger, 0)
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index d8f4f7ed1f..0ac934a4d3 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -859,7 +859,7 @@ v8::Local<v8::Array> V8Debugger::queryObjects(v8::Local<v8::Context> context,
std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
v8::Local<v8::StackTrace> v8StackTrace) {
- return V8StackTraceImpl::create(this, currentContextGroupId(), v8StackTrace,
+ return V8StackTraceImpl::create(this, v8StackTrace,
V8StackTraceImpl::maxCallStackSizeToCapture);
}
@@ -902,7 +902,7 @@ V8StackTraceId V8Debugger::storeCurrentStackTrace(
if (!contextGroupId) return V8StackTraceId();
std::shared_ptr<AsyncStackTrace> asyncStack =
- AsyncStackTrace::capture(this, contextGroupId, toString16(description),
+ AsyncStackTrace::capture(this, toString16(description),
V8StackTraceImpl::maxCallStackSizeToCapture);
if (!asyncStack) return V8StackTraceId();
@@ -980,9 +980,8 @@ void V8Debugger::asyncTaskScheduledForStack(const String16& taskName,
void* task, bool recurring) {
if (!m_maxAsyncCallStackDepth) return;
v8::HandleScope scope(m_isolate);
- std::shared_ptr<AsyncStackTrace> asyncStack =
- AsyncStackTrace::capture(this, currentContextGroupId(), taskName,
- V8StackTraceImpl::maxCallStackSizeToCapture);
+ std::shared_ptr<AsyncStackTrace> asyncStack = AsyncStackTrace::capture(
+ this, taskName, V8StackTraceImpl::maxCallStackSizeToCapture);
if (asyncStack) {
m_asyncTaskStacks[task] = asyncStack;
if (recurring) m_recurringTasks.insert(task);
@@ -1104,7 +1103,7 @@ std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
});
}
- return V8StackTraceImpl::capture(this, contextGroupId, stackSize);
+ return V8StackTraceImpl::capture(this, stackSize);
}
int V8Debugger::currentContextGroupId() {
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 778993436a..b78b641edf 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -915,9 +915,10 @@ void V8RuntimeAgentImpl::reportExecutionContextDestroyed(
void V8RuntimeAgentImpl::inspect(
std::unique_ptr<protocol::Runtime::RemoteObject> objectToInspect,
- std::unique_ptr<protocol::DictionaryValue> hints) {
+ std::unique_ptr<protocol::DictionaryValue> hints, int executionContextId) {
if (m_enabled)
- m_frontend.inspectRequested(std::move(objectToInspect), std::move(hints));
+ m_frontend.inspectRequested(std::move(objectToInspect), std::move(hints),
+ executionContextId);
}
void V8RuntimeAgentImpl::messageAdded(V8ConsoleMessage* message) {
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index 61ffca1ac6..eadc596ca3 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -129,7 +129,8 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
void reportExecutionContextCreated(InspectedContext*);
void reportExecutionContextDestroyed(InspectedContext*);
void inspect(std::unique_ptr<protocol::Runtime::RemoteObject> objectToInspect,
- std::unique_ptr<protocol::DictionaryValue> hints);
+ std::unique_ptr<protocol::DictionaryValue> hints,
+ int executionContextId);
void messageAdded(V8ConsoleMessage*);
bool enabled() const { return m_enabled; }
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 81895d3943..6400506610 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -53,7 +53,7 @@ std::vector<std::shared_ptr<StackFrame>> toFramesVector(
return frames;
}
-void calculateAsyncChain(V8Debugger* debugger, int contextGroupId,
+void calculateAsyncChain(V8Debugger* debugger,
std::shared_ptr<AsyncStackTrace>* asyncParent,
V8StackTraceId* externalParent, int* maxAsyncDepth) {
*asyncParent = debugger->currentAsyncParent();
@@ -61,18 +61,6 @@ void calculateAsyncChain(V8Debugger* debugger, int contextGroupId,
DCHECK(externalParent->IsInvalid() || !*asyncParent);
if (maxAsyncDepth) *maxAsyncDepth = debugger->maxAsyncCallChainDepth();
- // Do not accidentally append async call chain from another group. This should
- // not happen if we have proper instrumentation, but let's double-check to be
- // safe.
- if (contextGroupId && *asyncParent &&
- (*asyncParent)->externalParent().IsInvalid() &&
- (*asyncParent)->contextGroupId() != contextGroupId) {
- asyncParent->reset();
- *externalParent = V8StackTraceId();
- if (maxAsyncDepth) *maxAsyncDepth = 0;
- return;
- }
-
// Only the top stack in the chain may be empty, so ensure that second stack
// is non-empty (it's the top of appended chain).
if (*asyncParent && (*asyncParent)->isEmpty()) {
@@ -243,8 +231,8 @@ void V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(
// static
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
- V8Debugger* debugger, int contextGroupId,
- v8::Local<v8::StackTrace> v8StackTrace, int maxStackSize) {
+ V8Debugger* debugger, v8::Local<v8::StackTrace> v8StackTrace,
+ int maxStackSize) {
DCHECK(debugger);
v8::Isolate* isolate = debugger->isolate();
@@ -258,8 +246,7 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
int maxAsyncDepth = 0;
std::shared_ptr<AsyncStackTrace> asyncParent;
V8StackTraceId externalParent;
- calculateAsyncChain(debugger, contextGroupId, &asyncParent, &externalParent,
- &maxAsyncDepth);
+ calculateAsyncChain(debugger, &asyncParent, &externalParent, &maxAsyncDepth);
if (frames.empty() && !asyncParent && externalParent.IsInvalid())
return nullptr;
return std::unique_ptr<V8StackTraceImpl>(new V8StackTraceImpl(
@@ -268,7 +255,7 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
// static
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
- V8Debugger* debugger, int contextGroupId, int maxStackSize) {
+ V8Debugger* debugger, int maxStackSize) {
DCHECK(debugger);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
@@ -281,8 +268,7 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
v8StackTrace = v8::StackTrace::CurrentStackTrace(isolate, maxStackSize,
stackTraceOptions);
}
- return V8StackTraceImpl::create(debugger, contextGroupId, v8StackTrace,
- maxStackSize);
+ return V8StackTraceImpl::create(debugger, v8StackTrace, maxStackSize);
}
V8StackTraceImpl::V8StackTraceImpl(
@@ -419,8 +405,7 @@ StackFrame* V8StackTraceImpl::StackFrameIterator::frame() {
// static
std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
- V8Debugger* debugger, int contextGroupId, const String16& description,
- int maxStackSize) {
+ V8Debugger* debugger, const String16& description, int maxStackSize) {
DCHECK(debugger);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
@@ -438,8 +423,7 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
std::shared_ptr<AsyncStackTrace> asyncParent;
V8StackTraceId externalParent;
- calculateAsyncChain(debugger, contextGroupId, &asyncParent, &externalParent,
- nullptr);
+ calculateAsyncChain(debugger, &asyncParent, &externalParent, nullptr);
if (frames.empty() && !asyncParent && externalParent.IsInvalid())
return nullptr;
@@ -452,30 +436,21 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
return asyncParent;
}
- DCHECK(contextGroupId || asyncParent || !externalParent.IsInvalid());
- if (!contextGroupId && asyncParent) {
- contextGroupId = asyncParent->m_contextGroupId;
- }
-
- return std::shared_ptr<AsyncStackTrace>(
- new AsyncStackTrace(contextGroupId, description, std::move(frames),
- asyncParent, externalParent));
+ return std::shared_ptr<AsyncStackTrace>(new AsyncStackTrace(
+ description, std::move(frames), asyncParent, externalParent));
}
AsyncStackTrace::AsyncStackTrace(
- int contextGroupId, const String16& description,
+ const String16& description,
std::vector<std::shared_ptr<StackFrame>> frames,
std::shared_ptr<AsyncStackTrace> asyncParent,
const V8StackTraceId& externalParent)
- : m_contextGroupId(contextGroupId),
- m_id(0),
+ : m_id(0),
m_suspendedTaskId(nullptr),
m_description(description),
m_frames(std::move(frames)),
m_asyncParent(std::move(asyncParent)),
- m_externalParent(externalParent) {
- DCHECK(m_contextGroupId || (!externalParent.IsInvalid() && m_frames.empty()));
-}
+ m_externalParent(externalParent) {}
std::unique_ptr<protocol::Runtime::StackTrace>
AsyncStackTrace::buildInspectorObject(V8Debugger* debugger,
@@ -485,8 +460,6 @@ AsyncStackTrace::buildInspectorObject(V8Debugger* debugger,
maxAsyncDepth);
}
-int AsyncStackTrace::contextGroupId() const { return m_contextGroupId; }
-
void AsyncStackTrace::setSuspendedTaskId(void* task) {
m_suspendedTaskId = task;
}
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 065d5ce47c..cd86659fdb 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -49,11 +49,9 @@ class V8StackTraceImpl : public V8StackTrace {
bool capture);
static int maxCallStackSizeToCapture;
static std::unique_ptr<V8StackTraceImpl> create(V8Debugger*,
- int contextGroupId,
v8::Local<v8::StackTrace>,
int maxStackSize);
static std::unique_ptr<V8StackTraceImpl> capture(V8Debugger*,
- int contextGroupId,
int maxStackSize);
~V8StackTraceImpl() override;
@@ -114,7 +112,6 @@ class AsyncStackTrace {
AsyncStackTrace(const AsyncStackTrace&) = delete;
AsyncStackTrace& operator=(const AsyncStackTrace&) = delete;
static std::shared_ptr<AsyncStackTrace> capture(V8Debugger*,
- int contextGroupId,
const String16& description,
int maxStackSize);
static uintptr_t store(V8Debugger* debugger,
@@ -133,7 +130,6 @@ class AsyncStackTrace {
void setSuspendedTaskId(void* task);
void* suspendedTaskId() const;
- int contextGroupId() const;
const String16& description() const;
std::weak_ptr<AsyncStackTrace> parent() const;
bool isEmpty() const;
@@ -144,12 +140,11 @@ class AsyncStackTrace {
}
private:
- AsyncStackTrace(int contextGroupId, const String16& description,
+ AsyncStackTrace(const String16& description,
std::vector<std::shared_ptr<StackFrame>> frames,
std::shared_ptr<AsyncStackTrace> asyncParent,
const V8StackTraceId& externalParent);
- int m_contextGroupId;
uintptr_t m_id;
void* m_suspendedTaskId;
String16 m_description;
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index e23d8bd97f..78078f4c17 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -1170,43 +1170,6 @@ std::unique_ptr<ValueMirror> createNativeSetter(v8::Local<v8::Context> context,
return ValueMirror::create(context, function);
}
-bool doesAttributeHaveObservableSideEffectOnGet(v8::Local<v8::Context> context,
- v8::Local<v8::Object> object,
- v8::Local<v8::Name> name) {
- // TODO(dgozman): we should remove this, annotate more embedder properties as
- // side-effect free, and call all getters which do not produce side effects.
- if (!name->IsString()) return false;
- v8::Isolate* isolate = context->GetIsolate();
- if (!name.As<v8::String>()->StringEquals(toV8String(isolate, "body"))) {
- return false;
- }
-
- v8::TryCatch tryCatch(isolate);
- v8::Local<v8::Value> request;
- if (context->Global()
- ->GetRealNamedProperty(context, toV8String(isolate, "Request"))
- .ToLocal(&request)) {
- if (request->IsObject() &&
- object->InstanceOf(context, request.As<v8::Object>())
- .FromMaybe(false)) {
- return true;
- }
- }
- if (tryCatch.HasCaught()) tryCatch.Reset();
-
- v8::Local<v8::Value> response;
- if (context->Global()
- ->GetRealNamedProperty(context, toV8String(isolate, "Response"))
- .ToLocal(&response)) {
- if (response->IsObject() &&
- object->InstanceOf(context, response.As<v8::Object>())
- .FromMaybe(false)) {
- return true;
- }
- }
- return false;
-}
-
} // anonymous namespace
ValueMirror::~ValueMirror() = default;
@@ -1238,8 +1201,6 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
}
}
- bool formatAccessorsAsProperties =
- clientFor(context)->formatAccessorsAsProperties(object);
auto iterator = v8::debug::PropertyIterator::Create(context, object);
if (!iterator) {
CHECK(tryCatch.HasCaught());
@@ -1309,25 +1270,23 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
if (!descriptor.value.IsEmpty()) {
valueMirror = ValueMirror::create(context, descriptor.value);
}
- bool getterIsNativeFunction = false;
+ v8::Local<v8::Function> getterFunction;
if (!descriptor.get.IsEmpty()) {
v8::Local<v8::Value> get = descriptor.get;
getterMirror = ValueMirror::create(context, get);
- getterIsNativeFunction =
- get->IsFunction() && get.As<v8::Function>()->ScriptId() ==
- v8::UnboundScript::kNoScriptId;
+ if (get->IsFunction()) getterFunction = get.As<v8::Function>();
}
if (!descriptor.set.IsEmpty()) {
setterMirror = ValueMirror::create(context, descriptor.set);
}
isAccessorProperty = getterMirror || setterMirror;
- if (name != "__proto__" && getterIsNativeFunction &&
- formatAccessorsAsProperties &&
- !doesAttributeHaveObservableSideEffectOnGet(context, object,
- v8Name)) {
+ if (name != "__proto__" && !getterFunction.IsEmpty() &&
+ getterFunction->ScriptId() == v8::UnboundScript::kNoScriptId) {
v8::TryCatch tryCatch(isolate);
v8::Local<v8::Value> value;
- if (object->Get(context, v8Name).ToLocal(&value)) {
+ if (v8::debug::CallFunctionOn(context, getterFunction, object, 0,
+ nullptr, true)
+ .ToLocal(&value)) {
valueMirror = ValueMirror::create(context, value);
isOwn = true;
setterMirror = nullptr;
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 99d351f778..f78330bea1 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -1246,7 +1246,7 @@ Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
LOG_CODE_EVENT(isolate,
CodeLinePosInfoRecordEvent(
info_->bytecode_array()->GetFirstBytecodeAddress(),
- *source_position_table));
+ *source_position_table, JitCodeEvent::BYTE_CODE));
return source_position_table;
}
@@ -4197,7 +4197,7 @@ void BytecodeGenerator::BuildDestructuringArrayAssignment(
// var rest_runtime_callargs = new Array(3);
// rest_runtime_callargs[0] = value;
//
-// rest_runtime_callargs[1] = value;
+// rest_runtime_callargs[1] = "y";
// y = value.y;
//
// var temp1 = %ToName(x++);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 843acfd08c..c6d6e44a2f 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -1050,8 +1050,12 @@ void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
Branch(condition, &ok, &interrupt_check);
BIND(&interrupt_check);
- CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, GetContext(),
- function);
+ // JumpLoop should do a stack check as part of the interrupt.
+ CallRuntime(
+ bytecode() == Bytecode::kJumpLoop
+ ? Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode
+ : Runtime::kBytecodeBudgetInterruptFromBytecode,
+ GetContext(), function);
Goto(&done);
BIND(&ok);
@@ -1291,7 +1295,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
- BytecodeArray::kOsrNestingLevelOffset);
+ BytecodeArray::kOsrLoopNestingLevelOffset);
}
void InterpreterAssembler::Abort(AbortReason abort_reason) {
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index bbc7ac6dae..e010ab2f64 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -2158,8 +2158,6 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
TNode<Int8T> osr_level = LoadOsrNestingLevel();
TNode<Context> context = GetContext();
- PerformStackCheck(context);
-
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
Label ok(this), osr_armed(this, Label::kDeferred);
@@ -2167,6 +2165,8 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
Branch(condition, &ok, &osr_armed);
BIND(&ok);
+ // The backward jump can trigger a budget interrupt, which can handle stack
+ // interrupts, so we don't need to explicitly handle them here.
JumpBackward(relative_jump);
BIND(&osr_armed);
@@ -2608,7 +2608,7 @@ IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
// previous pending message in the accumulator.
IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
TNode<ExternalReference> pending_message = ExternalConstant(
- ExternalReference::address_of_pending_message_obj(isolate()));
+ ExternalReference::address_of_pending_message(isolate()));
TNode<HeapObject> previous_message =
UncheckedCast<HeapObject>(LoadFullTagged(pending_message));
TNode<Object> new_message = GetAccumulator();
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 4d77622f24..49c8406533 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -28,6 +28,8 @@
#elif V8_OS_WIN || V8_OS_CYGWIN
+#include <windows.h>
+
#include "src/base/win32-headers.h"
#elif V8_OS_FUCHSIA
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index a939746cbc..0fcb2e15af 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -59,6 +59,8 @@ namespace internal {
51) \
HR(wasm_compile_function_peak_memory_bytes, \
V8.WasmCompileFunctionPeakMemoryBytes, 1, GB, 51) \
+ HR(wasm_compile_huge_function_peak_memory_bytes, \
+ V8.WasmCompileHugeFunctionPeakMemoryBytes, 1, GB, 51) \
HR(asm_module_size_bytes, V8.AsmModuleSizeBytes, 1, GB, 51) \
HR(compile_script_cache_behaviour, V8.CompileScript.CacheBehaviour, 0, 20, \
21) \
@@ -102,34 +104,36 @@ namespace internal {
/* The maximum of 100M backtracks takes roughly 2 seconds on my machine. */ \
HR(regexp_backtracks, V8.RegExpBacktracks, 1, 100000000, 50)
-#define HISTOGRAM_TIMER_LIST(HT) \
- /* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
- /* Garbage collection timers. */ \
- HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
- HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
- HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000, \
- MILLISECOND) \
- HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000, \
- MILLISECOND) \
- HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
- MILLISECOND) \
- /* Compilation times. */ \
- HT(collect_source_positions, V8.CollectSourcePositions, 1000000, \
- MICROSECOND) \
- HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
- HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
- /* Serialization as part of compilation (code caching) */ \
- HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND) \
- HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000, \
- MICROSECOND) \
- /* Total compilation time incl. caching/parsing */ \
- HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND) \
- /* Total JavaScript execution time (including callbacks and runtime calls */ \
- HT(execute, V8.Execute, 1000000, MICROSECOND) \
- /* Time for lazily compiling Wasm functions. */ \
- HT(wasm_lazy_compile_time, V8.WasmLazyCompileTimeMicroSeconds, 100000000, \
+#define NESTED_TIMED_HISTOGRAM_LIST(HT) \
+ /* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
+ /* Garbage collection timers. */ \
+ HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
+ HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
+ HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000, \
+ MILLISECOND) \
+ HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000, \
+ MILLISECOND) \
+ HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
+ MILLISECOND) \
+ /* Compilation times. */ \
+ HT(collect_source_positions, V8.CollectSourcePositions, 1000000, \
+ MICROSECOND) \
+ HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
+ HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
+ /* Serialization as part of compilation (code caching) */ \
+ HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND) \
+ HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000, \
+ MICROSECOND) \
+ /* Total compilation time incl. caching/parsing */ \
+ HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND) \
+ /* Time for lazily compiling Wasm functions. */ \
+ HT(wasm_lazy_compile_time, V8.WasmLazyCompileTimeMicroSeconds, 100000000, \
MICROSECOND)
+#define NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT) \
+ /* Total V8 time (including JS and runtime calls, exluding callbacks) */ \
+ HT(execute_precise, V8.ExecuteMicroSeconds, 1000000, MICROSECOND)
+
#define TIMED_HISTOGRAM_LIST(HT) \
/* Timer histograms, thread safe: HT(name, caption, max, unit) */ \
/* Garbage collection timers. */ \
@@ -199,6 +203,8 @@ namespace internal {
1000000, MICROSECOND) \
HT(wasm_compile_wasm_function_time, V8.WasmCompileFunctionMicroSeconds.wasm, \
1000000, MICROSECOND) \
+ HT(wasm_compile_huge_function_time, V8.WasmCompileHugeFunctionMilliSeconds, \
+ 100000, MILLISECOND) \
HT(wasm_instantiate_wasm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
diff --git a/deps/v8/src/logging/counters-scopes.h b/deps/v8/src/logging/counters-scopes.h
new file mode 100644
index 0000000000..4f5c74b5ea
--- /dev/null
+++ b/deps/v8/src/logging/counters-scopes.h
@@ -0,0 +1,191 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOGGING_COUNTERS_SCOPES_H_
+#define V8_LOGGING_COUNTERS_SCOPES_H_
+
+#include "src/execution/isolate.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+
+namespace v8 {
+namespace internal {
+
+class BaseTimedHistogramScope {
+ protected:
+ explicit BaseTimedHistogramScope(TimedHistogram* histogram)
+ : histogram_(histogram) {}
+
+ void Start() {
+ if (!histogram_->Enabled()) return;
+ DCHECK(histogram_->ToggleRunningState(true));
+ timer_.Start();
+ }
+
+ void Stop() {
+ if (!histogram_->Enabled()) return;
+ DCHECK(histogram_->ToggleRunningState(false));
+ histogram_->AddTimedSample(timer_.Elapsed());
+ timer_.Stop();
+ }
+
+ void LogStart(Isolate* isolate) {
+ Logger::CallEventLogger(isolate, histogram_->name(),
+ v8::LogEventStatus::kStart, true);
+ }
+
+ void LogEnd(Isolate* isolate) {
+ Logger::CallEventLogger(isolate, histogram_->name(),
+ v8::LogEventStatus::kEnd, true);
+ }
+
+ base::ElapsedTimer timer_;
+ TimedHistogram* histogram_;
+};
+
+// Helper class for scoping a TimedHistogram.
+class V8_NODISCARD TimedHistogramScope : public BaseTimedHistogramScope {
+ public:
+ explicit TimedHistogramScope(TimedHistogram* histogram,
+ Isolate* isolate = nullptr)
+ : BaseTimedHistogramScope(histogram), isolate_(isolate) {
+ Start();
+ if (isolate_) LogStart(isolate_);
+ }
+
+ ~TimedHistogramScope() {
+ Stop();
+ if (isolate_) LogEnd(isolate_);
+ }
+
+ private:
+ Isolate* const isolate_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
+};
+
+enum class OptionalTimedHistogramScopeMode { TAKE_TIME, DONT_TAKE_TIME };
+
+// Helper class for scoping a TimedHistogram.
+// It will not take time for mode = DONT_TAKE_TIME.
+class V8_NODISCARD OptionalTimedHistogramScope
+ : public BaseTimedHistogramScope {
+ public:
+ OptionalTimedHistogramScope(TimedHistogram* histogram, Isolate* isolate,
+ OptionalTimedHistogramScopeMode mode)
+ : BaseTimedHistogramScope(histogram), isolate_(isolate), mode_(mode) {
+ if (mode != OptionalTimedHistogramScopeMode::TAKE_TIME) return;
+ Start();
+ LogStart(isolate_);
+ }
+
+ ~OptionalTimedHistogramScope() {
+ if (mode_ != OptionalTimedHistogramScopeMode::TAKE_TIME) return;
+ Stop();
+ LogEnd(isolate_);
+ }
+
+ private:
+ Isolate* const isolate_;
+ const OptionalTimedHistogramScopeMode mode_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OptionalTimedHistogramScope);
+};
+
+// Helper class for scoping a TimedHistogram, where the histogram is selected at
+// stop time rather than start time.
+class V8_NODISCARD LazyTimedHistogramScope : public BaseTimedHistogramScope {
+ public:
+ LazyTimedHistogramScope() : BaseTimedHistogramScope(nullptr) {
+ timer_.Start();
+ }
+ ~LazyTimedHistogramScope() {
+ // We should set the histogram before this scope exits.
+ Stop();
+ }
+
+ void set_histogram(TimedHistogram* histogram) {
+ DCHECK_IMPLIES(histogram->Enabled(), histogram->ToggleRunningState(true));
+ histogram_ = histogram;
+ }
+};
+
+// Helper class for scoping a NestedHistogramTimer.
+class V8_NODISCARD NestedTimedHistogramScope : public BaseTimedHistogramScope {
+ public:
+ explicit NestedTimedHistogramScope(NestedTimedHistogram* histogram)
+ : BaseTimedHistogramScope(histogram) {
+ Start();
+ }
+ ~NestedTimedHistogramScope() { Stop(); }
+
+ private:
+ friend NestedTimedHistogram;
+ friend PauseNestedTimedHistogramScope;
+
+ void Start() {
+ previous_scope_ = timed_histogram()->Enter(this);
+ if (histogram_->Enabled()) {
+ base::TimeTicks now = base::TimeTicks::HighResolutionNow();
+ if (previous_scope_) previous_scope_->Pause(now);
+ timer_.Start(now);
+ }
+ LogStart(timed_histogram()->counters()->isolate());
+ }
+
+ void Stop() {
+ timed_histogram()->Leave(previous_scope_);
+ if (histogram_->Enabled()) {
+ base::TimeTicks now = base::TimeTicks::HighResolutionNow();
+ histogram_->AddTimedSample(timer_.Elapsed(now));
+ timer_.Stop();
+ if (previous_scope_) previous_scope_->Resume(now);
+ }
+ LogEnd(timed_histogram()->counters()->isolate());
+ }
+
+ void Pause(base::TimeTicks now) {
+ DCHECK(histogram_->Enabled());
+ timer_.Pause(now);
+ }
+
+ void Resume(base::TimeTicks now) {
+ DCHECK(histogram_->Enabled());
+ timer_.Resume(now);
+ }
+
+ NestedTimedHistogram* timed_histogram() {
+ return static_cast<NestedTimedHistogram*>(histogram_);
+ }
+
+ NestedTimedHistogramScope* previous_scope_;
+};
+
+// Temporarily pause a NestedTimedHistogram when for instance leaving V8 for
+// external callbacks.
+class V8_NODISCARD PauseNestedTimedHistogramScope {
+ public:
+ explicit PauseNestedTimedHistogramScope(NestedTimedHistogram* histogram)
+ : histogram_(histogram) {
+ previous_scope_ = histogram_->Enter(nullptr);
+ if (isEnabled()) {
+ previous_scope_->Pause(base::TimeTicks::HighResolutionNow());
+ }
+ }
+ ~PauseNestedTimedHistogramScope() {
+ histogram_->Leave(previous_scope_);
+ if (isEnabled()) {
+ previous_scope_->Resume(base::TimeTicks::HighResolutionNow());
+ }
+ }
+
+ private:
+ bool isEnabled() const { return previous_scope_ && histogram_->Enabled(); }
+ NestedTimedHistogram* histogram_;
+ NestedTimedHistogramScope* previous_scope_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOGGING_COUNTERS_SCOPES_H_
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index 3ffced0832..a333327e93 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -77,46 +77,48 @@ void* Histogram::CreateHistogram() const {
return counters_->CreateHistogram(name_, min_, max_, num_buckets_);
}
+void TimedHistogram::Stop(base::ElapsedTimer* timer) {
+ DCHECK(Enabled());
+ AddTimedSample(timer->Elapsed());
+ timer->Stop();
+}
+
void TimedHistogram::AddTimedSample(base::TimeDelta sample) {
if (Enabled()) {
- int64_t sample_int = resolution_ == HistogramTimerResolution::MICROSECOND
+ int64_t sample_int = resolution_ == TimedHistogramResolution::MICROSECOND
? sample.InMicroseconds()
: sample.InMilliseconds();
AddSample(static_cast<int>(sample_int));
}
}
-void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
- if (Enabled()) timer->Start();
- if (isolate) Logger::CallEventLogger(isolate, name(), Logger::START, true);
-}
-
-void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
- if (Enabled()) {
- base::TimeDelta delta = timer->Elapsed();
- timer->Stop();
- AddTimedSample(delta);
- }
- if (isolate != nullptr) {
- Logger::CallEventLogger(isolate, name(), Logger::END, true);
- }
-}
-
void TimedHistogram::RecordAbandon(base::ElapsedTimer* timer,
Isolate* isolate) {
if (Enabled()) {
DCHECK(timer->IsStarted());
timer->Stop();
- int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
+ int64_t sample = resolution_ == TimedHistogramResolution::MICROSECOND
? base::TimeDelta::Max().InMicroseconds()
: base::TimeDelta::Max().InMilliseconds();
AddSample(static_cast<int>(sample));
}
if (isolate != nullptr) {
- Logger::CallEventLogger(isolate, name(), Logger::END, true);
+ Logger::CallEventLogger(isolate, name(), v8::LogEventStatus::kEnd, true);
}
}
+#ifdef DEBUG
+bool TimedHistogram::ToggleRunningState(bool expect_to_run) const {
+ static thread_local base::LazyInstance<
+ std::unordered_map<const TimedHistogram*, bool>>::type active_timer =
+ LAZY_INSTANCE_INITIALIZER;
+ bool is_running = (*active_timer.Pointer())[this];
+ DCHECK_NE(is_running, expect_to_run);
+ (*active_timer.Pointer())[this] = !is_running;
+ return true;
+}
+#endif
+
Counters::Counters(Isolate* isolate)
:
#define SC(name, caption) name##_(this, "c:" #caption),
@@ -149,29 +151,30 @@ Counters::Counters(Isolate* isolate)
const int DefaultTimedHistogramNumBuckets = 50;
static const struct {
- HistogramTimer Counters::*member;
+ NestedTimedHistogram Counters::*member;
const char* caption;
int max;
- HistogramTimerResolution res;
- } kHistogramTimers[] = {
+ TimedHistogramResolution res;
+ } kNestedTimedHistograms[] = {
#define HT(name, caption, max, res) \
- {&Counters::name##_, #caption, max, HistogramTimerResolution::res},
- HISTOGRAM_TIMER_LIST(HT)
+ {&Counters::name##_, #caption, max, TimedHistogramResolution::res},
+ NESTED_TIMED_HISTOGRAM_LIST(HT) NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT)
#undef HT
};
- for (const auto& timer : kHistogramTimers) {
- this->*timer.member = HistogramTimer(timer.caption, 0, timer.max, timer.res,
- DefaultTimedHistogramNumBuckets, this);
+ for (const auto& timer : kNestedTimedHistograms) {
+ this->*timer.member =
+ NestedTimedHistogram(timer.caption, 0, timer.max, timer.res,
+ DefaultTimedHistogramNumBuckets, this);
}
static const struct {
TimedHistogram Counters::*member;
const char* caption;
int max;
- HistogramTimerResolution res;
+ TimedHistogramResolution res;
} kTimedHistograms[] = {
#define HT(name, caption, max, res) \
- {&Counters::name##_, #caption, max, HistogramTimerResolution::res},
+ {&Counters::name##_, #caption, max, TimedHistogramResolution::res},
TIMED_HISTOGRAM_LIST(HT)
#undef HT
};
@@ -297,7 +300,11 @@ void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
#undef HR
#define HT(name, caption, max, res) name##_.Reset();
- HISTOGRAM_TIMER_LIST(HT)
+ NESTED_TIMED_HISTOGRAM_LIST(HT)
+#undef HT
+
+#define HT(name, caption, max, res) name##_.Reset(FLAG_slow_histograms);
+ NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT)
#undef HT
#define HT(name, caption, max, res) name##_.Reset();
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 740fd2679a..3a2527f49c 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -214,7 +214,7 @@ class Histogram {
// Returns true if this histogram is enabled.
bool Enabled() { return histogram_ != nullptr; }
- const char* name() { return name_; }
+ const char* name() const { return name_; }
int min() const { return min_; }
int max() const { return max_; }
@@ -242,7 +242,9 @@ class Histogram {
Counters* counters() const { return counters_; }
// Reset the cached internal pointer.
- void Reset() { histogram_ = CreateHistogram(); }
+ void Reset(bool create_new = true) {
+ histogram_ = create_new ? CreateHistogram() : nullptr;
+ }
private:
friend class Counters;
@@ -257,204 +259,74 @@ class Histogram {
Counters* counters_;
};
-enum class HistogramTimerResolution { MILLISECOND, MICROSECOND };
+enum class TimedHistogramResolution { MILLISECOND, MICROSECOND };
// A thread safe histogram timer. It also allows distributions of
// nested timed results.
class TimedHistogram : public Histogram {
public:
- // Start the timer. Log if isolate non-null.
- V8_EXPORT_PRIVATE void Start(base::ElapsedTimer* timer, Isolate* isolate);
-
- // Stop the timer and record the results. Log if isolate non-null.
- V8_EXPORT_PRIVATE void Stop(base::ElapsedTimer* timer, Isolate* isolate);
-
// Records a TimeDelta::Max() result. Useful to record percentage of tasks
// that never got to run in a given scenario. Log if isolate non-null.
void RecordAbandon(base::ElapsedTimer* timer, Isolate* isolate);
// Add a single sample to this histogram.
- void AddTimedSample(base::TimeDelta sample);
+ V8_EXPORT_PRIVATE void AddTimedSample(base::TimeDelta sample);
+
+#ifdef DEBUG
+ // Ensures that we don't have nested timers for TimedHistogram per thread, use
+ // NestedTimedHistogram which correctly pause and resume timers.
+ // This method assumes that each timer is alternating between stopped and
+ // started on a single thread. Multiple timers can be active on different
+ // threads.
+ bool ToggleRunningState(bool expected_is_running) const;
+#endif // DEBUG
protected:
+ void Stop(base::ElapsedTimer* timer);
+ void LogStart(Isolate* isolate);
+ void LogEnd(Isolate* isolate);
+
friend class Counters;
- HistogramTimerResolution resolution_;
+ TimedHistogramResolution resolution_;
TimedHistogram() = default;
TimedHistogram(const char* name, int min, int max,
- HistogramTimerResolution resolution, int num_buckets,
+ TimedHistogramResolution resolution, int num_buckets,
Counters* counters)
: Histogram(name, min, max, num_buckets, counters),
resolution_(resolution) {}
- void AddTimeSample();
-};
-
-// Helper class for scoping a TimedHistogram.
-class V8_NODISCARD TimedHistogramScope {
- public:
- explicit TimedHistogramScope(TimedHistogram* histogram,
- Isolate* isolate = nullptr)
- : histogram_(histogram), isolate_(isolate) {
- histogram_->Start(&timer_, isolate);
- }
-
- ~TimedHistogramScope() { histogram_->Stop(&timer_, isolate_); }
-
- private:
- base::ElapsedTimer timer_;
- TimedHistogram* histogram_;
- Isolate* isolate_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
-};
-
-enum class OptionalTimedHistogramScopeMode { TAKE_TIME, DONT_TAKE_TIME };
-
-// Helper class for scoping a TimedHistogram.
-// It will not take time for mode = DONT_TAKE_TIME.
-class V8_NODISCARD OptionalTimedHistogramScope {
- public:
- OptionalTimedHistogramScope(TimedHistogram* histogram, Isolate* isolate,
- OptionalTimedHistogramScopeMode mode)
- : histogram_(histogram), isolate_(isolate), mode_(mode) {
- if (mode == OptionalTimedHistogramScopeMode::TAKE_TIME) {
- histogram_->Start(&timer_, isolate);
- }
- }
-
- ~OptionalTimedHistogramScope() {
- if (mode_ == OptionalTimedHistogramScopeMode::TAKE_TIME) {
- histogram_->Stop(&timer_, isolate_);
- }
- }
-
- private:
- base::ElapsedTimer timer_;
- TimedHistogram* const histogram_;
- Isolate* const isolate_;
- const OptionalTimedHistogramScopeMode mode_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(OptionalTimedHistogramScope);
};
-// Helper class for recording a TimedHistogram asynchronously with manual
-// controls (it will not generate a report if destroyed without explicitly
-// triggering a report). |async_counters| should be a shared_ptr to
-// |histogram->counters()|, making it is safe to report to an
-// AsyncTimedHistogram after the associated isolate has been destroyed.
-// AsyncTimedHistogram can be moved/copied to avoid computing Now() multiple
-// times when the times of multiple tasks are identical; each copy will generate
-// its own report.
-class AsyncTimedHistogram {
- public:
- explicit AsyncTimedHistogram(TimedHistogram* histogram,
- std::shared_ptr<Counters> async_counters)
- : histogram_(histogram), async_counters_(std::move(async_counters)) {
- histogram_->AssertReportsToCounters(async_counters_.get());
- histogram_->Start(&timer_, nullptr);
- }
-
- // Records the time elapsed to |histogram_| and stops |timer_|.
- void RecordDone() { histogram_->Stop(&timer_, nullptr); }
-
- // Records TimeDelta::Max() to |histogram_| and stops |timer_|.
- void RecordAbandon() { histogram_->RecordAbandon(&timer_, nullptr); }
-
- private:
- base::ElapsedTimer timer_;
- TimedHistogram* histogram_;
- std::shared_ptr<Counters> async_counters_;
-};
+class NestedTimedHistogramScope;
+class PauseNestedTimedHistogramScope;
-// Helper class for scoping a TimedHistogram, where the histogram is selected at
-// stop time rather than start time.
-// TODO(leszeks): This is heavily reliant on TimedHistogram::Start() doing
-// nothing but starting the timer, and TimedHistogram::Stop() logging the sample
-// correctly even if Start() was not called. This happens to be true iff Stop()
-// is passed a null isolate, but that's an implementation detail of
-// TimedHistogram, and we shouldn't rely on it.
-class V8_NODISCARD LazyTimedHistogramScope {
- public:
- LazyTimedHistogramScope() : histogram_(nullptr) { timer_.Start(); }
- ~LazyTimedHistogramScope() {
- // We should set the histogram before this scope exits.
- DCHECK_NOT_NULL(histogram_);
- histogram_->Stop(&timer_, nullptr);
- }
-
- void set_histogram(TimedHistogram* histogram) { histogram_ = histogram; }
-
- private:
- base::ElapsedTimer timer_;
- TimedHistogram* histogram_;
-};
-
-// A HistogramTimer allows distributions of non-nested timed results
-// to be created. WARNING: This class is not thread safe and can only
-// be run on the foreground thread.
-class HistogramTimer : public TimedHistogram {
+// A NestedTimedHistogram allows distributions of nested timed results.
+class NestedTimedHistogram : public TimedHistogram {
public:
// Note: public for testing purposes only.
- HistogramTimer(const char* name, int min, int max,
- HistogramTimerResolution resolution, int num_buckets,
- Counters* counters)
+ NestedTimedHistogram(const char* name, int min, int max,
+ TimedHistogramResolution resolution, int num_buckets,
+ Counters* counters)
: TimedHistogram(name, min, max, resolution, num_buckets, counters) {}
- inline void Start();
- inline void Stop();
-
- // Returns true if the timer is running.
- bool Running() { return Enabled() && timer_.IsStarted(); }
-
- // TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
-#ifdef DEBUG
- base::ElapsedTimer* timer() { return &timer_; }
-#endif
-
private:
friend class Counters;
+ friend class NestedTimedHistogramScope;
+ friend class PauseNestedTimedHistogramScope;
- base::ElapsedTimer timer_;
-
- HistogramTimer() = default;
-};
-
-// Helper class for scoping a HistogramTimer.
-// TODO(bmeurer): The ifdeffery is an ugly hack around the fact that the
-// Parser is currently reentrant (when it throws an error, we call back
-// into JavaScript and all bets are off), but ElapsedTimer is not
-// reentry-safe. Fix this properly and remove |allow_nesting|.
-class V8_NODISCARD HistogramTimerScope {
- public:
- explicit HistogramTimerScope(HistogramTimer* timer,
- bool allow_nesting = false)
-#ifdef DEBUG
- : timer_(timer), skipped_timer_start_(false) {
- if (timer_->timer()->IsStarted() && allow_nesting) {
- skipped_timer_start_ = true;
- } else {
- timer_->Start();
- }
- }
-#else
- : timer_(timer) {
- timer_->Start();
+ inline NestedTimedHistogramScope* Enter(NestedTimedHistogramScope* next) {
+ NestedTimedHistogramScope* previous = current_;
+ current_ = next;
+ return previous;
}
-#endif
- ~HistogramTimerScope() {
-#ifdef DEBUG
- if (!skipped_timer_start_) {
- timer_->Stop();
- }
-#else
- timer_->Stop();
-#endif
+
+ inline void Leave(NestedTimedHistogramScope* previous) {
+ current_ = previous;
}
- private:
- HistogramTimer* timer_;
-#ifdef DEBUG
- bool skipped_timer_start_;
-#endif
+ NestedTimedHistogramScope* current_ = nullptr;
+
+ NestedTimedHistogram() = default;
};
// A histogram timer that can aggregate events within a larger scope.
@@ -672,8 +544,9 @@ class Counters : public std::enable_shared_from_this<Counters> {
#undef HR
#define HT(name, caption, max, res) \
- HistogramTimer* name() { return &name##_; }
- HISTOGRAM_TIMER_LIST(HT)
+ NestedTimedHistogram* name() { return &name##_; }
+ NESTED_TIMED_HISTOGRAM_LIST(HT)
+ NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT)
#undef HT
#define HT(name, caption, max, res) \
@@ -711,7 +584,8 @@ class Counters : public std::enable_shared_from_this<Counters> {
// clang-format off
enum Id {
#define RATE_ID(name, caption, max, res) k_##name,
- HISTOGRAM_TIMER_LIST(RATE_ID)
+ NESTED_TIMED_HISTOGRAM_LIST(RATE_ID)
+ NESTED_TIMED_HISTOGRAM_LIST_SLOW(RATE_ID)
TIMED_HISTOGRAM_LIST(RATE_ID)
#undef RATE_ID
#define AGGREGATABLE_ID(name, caption) k_##name,
@@ -762,7 +636,7 @@ class Counters : public std::enable_shared_from_this<Counters> {
friend class StatsTable;
friend class StatsCounterBase;
friend class Histogram;
- friend class HistogramTimer;
+ friend class NestedTimedHistogramScope;
int* FindLocation(const char* name) {
return stats_table_.FindLocation(name);
@@ -782,8 +656,9 @@ class Counters : public std::enable_shared_from_this<Counters> {
HISTOGRAM_RANGE_LIST(HR)
#undef HR
-#define HT(name, caption, max, res) HistogramTimer name##_;
- HISTOGRAM_TIMER_LIST(HT)
+#define HT(name, caption, max, res) NestedTimedHistogram name##_;
+ NESTED_TIMED_HISTOGRAM_LIST(HT)
+ NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT)
#undef HT
#define HT(name, caption, max, res) TimedHistogram name##_;
@@ -840,13 +715,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
-void HistogramTimer::Start() {
- TimedHistogram::Start(&timer_, counters()->isolate());
-}
-
-void HistogramTimer::Stop() {
- TimedHistogram::Stop(&timer_, counters()->isolate());
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/logging/local-logger.cc b/deps/v8/src/logging/local-logger.cc
index bd0a1598e3..e49bbfd3bd 100644
--- a/deps/v8/src/logging/local-logger.cc
+++ b/deps/v8/src/logging/local-logger.cc
@@ -5,6 +5,7 @@
#include "src/logging/local-logger.h"
#include "src/execution/isolate.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -23,9 +24,15 @@ void LocalLogger::ScriptEvent(Logger::ScriptEventType type, int script_id) {
logger_->ScriptEvent(type, script_id);
}
void LocalLogger::CodeLinePosInfoRecordEvent(Address code_start,
- ByteArray source_position_table) {
- logger_->CodeLinePosInfoRecordEvent(code_start, source_position_table);
+ ByteArray source_position_table,
+ JitCodeEvent::CodeType code_type) {
+ logger_->CodeLinePosInfoRecordEvent(code_start, source_position_table,
+ code_type);
}
+void LocalLogger::MapCreate(Map map) { logger_->MapCreate(map); }
+
+void LocalLogger::MapDetails(Map map) { logger_->MapDetails(map); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/logging/local-logger.h b/deps/v8/src/logging/local-logger.h
index a727013e92..a827980908 100644
--- a/deps/v8/src/logging/local-logger.h
+++ b/deps/v8/src/logging/local-logger.h
@@ -23,7 +23,11 @@ class LocalLogger {
void ScriptDetails(Script script);
void ScriptEvent(Logger::ScriptEventType type, int script_id);
void CodeLinePosInfoRecordEvent(Address code_start,
- ByteArray source_position_table);
+ ByteArray source_position_table,
+ JitCodeEvent::CodeType code_type);
+
+ void MapCreate(Map map);
+ void MapDetails(Map map);
private:
Logger* logger_;
diff --git a/deps/v8/src/logging/log-inl.h b/deps/v8/src/logging/log-inl.h
index 83677f5f64..0929854dcb 100644
--- a/deps/v8/src/logging/log-inl.h
+++ b/deps/v8/src/logging/log-inl.h
@@ -28,19 +28,8 @@ CodeEventListener::LogEventsAndTags Logger::ToNativeByScript(
}
}
-void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
- bool expose_to_api) {
- if (isolate->event_logger()) {
- if (isolate->event_logger() == DefaultEventLoggerSentinel) {
- LOG(isolate, TimerEvent(se, name));
- } else if (expose_to_api) {
- isolate->event_logger()(name, se);
- }
- }
-}
-
template <class TimerEvent>
-void TimerEventScope<TimerEvent>::LogTimerEvent(Logger::StartEnd se) {
+void TimerEventScope<TimerEvent>::LogTimerEvent(v8::LogEventStatus se) {
Logger::CallEventLogger(isolate_, TimerEvent::name(), se,
TimerEvent::expose_to_api());
}
diff --git a/deps/v8/src/logging/log-utils.cc b/deps/v8/src/logging/log-utils.cc
index 5e27880560..67a52a5873 100644
--- a/deps/v8/src/logging/log-utils.cc
+++ b/deps/v8/src/logging/log-utils.cc
@@ -66,6 +66,8 @@ void Log::WriteLogHeader() {
}
msg << kNext << Version::IsCandidate();
msg.WriteToLogFile();
+ msg << "v8-platform" << kNext << V8_OS_STRING << kNext << V8_TARGET_OS_STRING;
+ msg.WriteToLogFile();
}
std::unique_ptr<Log::MessageBuilder> Log::NewMessageBuilder() {
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 3fc145604b..4f6aa856d7 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -681,10 +681,12 @@ class JitLogger : public CodeEventLogger {
Handle<SharedFunctionInfo> shared) override {}
void AddCodeLinePosInfoEvent(void* jit_handler_data, int pc_offset,
int position,
- JitCodeEvent::PositionType position_type);
+ JitCodeEvent::PositionType position_type,
+ JitCodeEvent::CodeType code_type);
- void* StartCodePosInfoEvent();
- void EndCodePosInfoEvent(Address start_address, void* jit_handler_data);
+ void* StartCodePosInfoEvent(JitCodeEvent::CodeType code_type);
+ void EndCodePosInfoEvent(Address start_address, void* jit_handler_data,
+ JitCodeEvent::CodeType code_type);
private:
void LogRecordedBuffer(Handle<AbstractCode> code,
@@ -705,8 +707,7 @@ JitLogger::JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler)
void JitLogger::LogRecordedBuffer(Handle<AbstractCode> code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) {
- JitCodeEvent event;
- memset(static_cast<void*>(&event), 0, sizeof(event));
+ JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_ADDED;
event.code_start = reinterpret_cast<void*>(code->InstructionStart());
event.code_type =
@@ -727,8 +728,7 @@ void JitLogger::LogRecordedBuffer(Handle<AbstractCode> code,
#if V8_ENABLE_WEBASSEMBLY
void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
- JitCodeEvent event;
- memset(static_cast<void*>(&event), 0, sizeof(event));
+ JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_ADDED;
event.code_type = JitCodeEvent::JIT_CODE;
event.code_start = code->instructions().begin();
@@ -793,10 +793,11 @@ void JitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
void JitLogger::AddCodeLinePosInfoEvent(
void* jit_handler_data, int pc_offset, int position,
- JitCodeEvent::PositionType position_type) {
- JitCodeEvent event;
- memset(static_cast<void*>(&event), 0, sizeof(event));
+ JitCodeEvent::PositionType position_type,
+ JitCodeEvent::CodeType code_type) {
+ JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
+ event.code_type = code_type;
event.user_data = jit_handler_data;
event.line_info.offset = pc_offset;
event.line_info.pos = position;
@@ -806,10 +807,10 @@ void JitLogger::AddCodeLinePosInfoEvent(
code_event_handler_(&event);
}
-void* JitLogger::StartCodePosInfoEvent() {
- JitCodeEvent event;
- memset(static_cast<void*>(&event), 0, sizeof(event));
+void* JitLogger::StartCodePosInfoEvent(JitCodeEvent::CodeType code_type) {
+ JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
+ event.code_type = code_type;
event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
@@ -817,10 +818,11 @@ void* JitLogger::StartCodePosInfoEvent() {
}
void JitLogger::EndCodePosInfoEvent(Address start_address,
- void* jit_handler_data) {
- JitCodeEvent event;
- memset(static_cast<void*>(&event), 0, sizeof(event));
+ void* jit_handler_data,
+ JitCodeEvent::CodeType code_type) {
+ JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
+ event.code_type = code_type;
event.code_start = reinterpret_cast<void*>(start_address);
event.user_data = jit_handler_data;
event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
@@ -974,6 +976,7 @@ void Profiler::Engage() {
LOG(isolate_, SharedLibraryEvent(address.library_path, address.start,
address.end, address.aslr_slide));
}
+ LOG(isolate_, SharedLibraryEnd());
// Start thread processing the profiler buffer.
base::Relaxed_Store(&running_, 1);
@@ -983,7 +986,7 @@ void Profiler::Engage() {
Logger* logger = isolate_->logger();
logger->ticker_->SetProfiler(this);
- logger->ProfilerBeginEvent();
+ LOG(isolate_, ProfilerBeginEvent());
}
void Profiler::Disengage() {
@@ -1075,8 +1078,8 @@ void Logger::HandleEvent(const char* name, Address* location) {
msg.WriteToLogFile();
}
-void Logger::ApiSecurityCheck() {
- if (!FLAG_log_api) return;
+void Logger::WriteApiSecurityCheck() {
+ DCHECK(FLAG_log_api);
MSG_BUILDER();
msg << "api" << kNext << "check-security";
msg.WriteToLogFile();
@@ -1093,6 +1096,13 @@ void Logger::SharedLibraryEvent(const std::string& library_path,
msg.WriteToLogFile();
}
+void Logger::SharedLibraryEnd() {
+ if (!FLAG_prof_cpp) return;
+ MSG_BUILDER();
+ msg << "shared-library-end";
+ msg.WriteToLogFile();
+}
+
void Logger::CurrentTimeEvent() {
DCHECK(FLAG_log_internal_timer_events);
MSG_BUILDER();
@@ -1100,16 +1110,16 @@ void Logger::CurrentTimeEvent() {
msg.WriteToLogFile();
}
-void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
+void Logger::TimerEvent(v8::LogEventStatus se, const char* name) {
MSG_BUILDER();
switch (se) {
- case START:
+ case kStart:
msg << "timer-event-start";
break;
- case END:
+ case kEnd:
msg << "timer-event-end";
break;
- case STAMP:
+ case kStamp:
msg << "timer-event";
}
msg << kNext << name << kNext << Time();
@@ -1142,38 +1152,38 @@ bool Logger::is_logging() {
// Instantiate template methods.
#define V(TimerName, expose) \
template void TimerEventScope<TimerEvent##TimerName>::LogTimerEvent( \
- Logger::StartEnd se);
+ v8::LogEventStatus se);
TIMER_EVENTS_LIST(V)
#undef V
-void Logger::ApiNamedPropertyAccess(const char* tag, JSObject holder,
- Object property_name) {
+void Logger::WriteApiNamedPropertyAccess(const char* tag, JSObject holder,
+ Object property_name) {
+ DCHECK(FLAG_log_api);
DCHECK(property_name.IsName());
- if (!FLAG_log_api) return;
MSG_BUILDER();
msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
<< Name::cast(property_name);
msg.WriteToLogFile();
}
-void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder,
- uint32_t index) {
- if (!FLAG_log_api) return;
+void Logger::WriteApiIndexedPropertyAccess(const char* tag, JSObject holder,
+ uint32_t index) {
+ DCHECK(FLAG_log_api);
MSG_BUILDER();
msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
<< index;
msg.WriteToLogFile();
}
-void Logger::ApiObjectAccess(const char* tag, JSReceiver object) {
- if (!FLAG_log_api) return;
+void Logger::WriteApiObjectAccess(const char* tag, JSReceiver object) {
+ DCHECK(FLAG_log_api);
MSG_BUILDER();
msg << "api" << kNext << tag << kNext << object.class_name();
msg.WriteToLogFile();
}
-void Logger::ApiEntryCall(const char* name) {
- if (!FLAG_log_api) return;
+void Logger::WriteApiEntryCall(const char* name) {
+ DCHECK(FLAG_log_api);
MSG_BUILDER();
msg << "api" << kNext << name;
msg.WriteToLogFile();
@@ -1521,35 +1531,38 @@ void Logger::CodeDependencyChangeEvent(Handle<Code> code,
namespace {
void CodeLinePosEvent(JitLogger& jit_logger, Address code_start,
- SourcePositionTableIterator& iter) {
- void* jit_handler_data = jit_logger.StartCodePosInfoEvent();
+ SourcePositionTableIterator& iter,
+ JitCodeEvent::CodeType code_type) {
+ void* jit_handler_data = jit_logger.StartCodePosInfoEvent(code_type);
for (; !iter.done(); iter.Advance()) {
if (iter.is_statement()) {
jit_logger.AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
iter.source_position().ScriptOffset(),
- JitCodeEvent::STATEMENT_POSITION);
+ JitCodeEvent::STATEMENT_POSITION,
+ code_type);
}
jit_logger.AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
iter.source_position().ScriptOffset(),
- JitCodeEvent::POSITION);
+ JitCodeEvent::POSITION, code_type);
}
- jit_logger.EndCodePosInfoEvent(code_start, jit_handler_data);
+ jit_logger.EndCodePosInfoEvent(code_start, jit_handler_data, code_type);
}
} // namespace
void Logger::CodeLinePosInfoRecordEvent(Address code_start,
- ByteArray source_position_table) {
+ ByteArray source_position_table,
+ JitCodeEvent::CodeType code_type) {
if (!jit_logger_) return;
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(*jit_logger_, code_start, iter);
+ CodeLinePosEvent(*jit_logger_, code_start, iter, code_type);
}
void Logger::CodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table) {
if (!jit_logger_) return;
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(*jit_logger_, code_start, iter);
+ CodeLinePosEvent(*jit_logger_, code_start, iter, JitCodeEvent::JIT_CODE);
}
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
@@ -2109,6 +2122,9 @@ FILE* Logger::TearDownAndGetLogFile() {
void Logger::UpdateIsLogging(bool value) {
base::MutexGuard guard(log_->mutex());
+ if (value) {
+ isolate_->CollectSourcePositionsForAllBytecodeArrays();
+ }
// Relaxed atomic to avoid locking the mutex for the most common case: when
// logging is disabled.
is_logging_.store(value, std::memory_order_relaxed);
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 45aadd7163..612c2a2df7 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -12,6 +12,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/execution/isolate.h"
#include "src/logging/code-events.h"
#include "src/objects/objects.h"
@@ -103,8 +104,6 @@ enum class LogSeparator;
class Logger : public CodeEventListener {
public:
- enum StartEnd { START = 0, END = 1, STAMP = 2 };
-
enum class ScriptEventType {
kReserveId,
kCreate,
@@ -173,12 +172,28 @@ class Logger : public CodeEventListener {
void ScriptDetails(Script script);
// ==== Events logged by --log-api. ====
- void ApiSecurityCheck();
- void ApiNamedPropertyAccess(const char* tag, JSObject holder, Object name);
+ void ApiSecurityCheck() {
+ if (!FLAG_log_api) return;
+ WriteApiSecurityCheck();
+ }
+ void ApiNamedPropertyAccess(const char* tag, JSObject holder, Object name) {
+ if (!FLAG_log_api) return;
+ WriteApiNamedPropertyAccess(tag, holder, name);
+ }
void ApiIndexedPropertyAccess(const char* tag, JSObject holder,
- uint32_t index);
- void ApiObjectAccess(const char* tag, JSReceiver obj);
- void ApiEntryCall(const char* name);
+ uint32_t index) {
+ if (!FLAG_log_api) return;
+ WriteApiIndexedPropertyAccess(tag, holder, index);
+ }
+
+ void ApiObjectAccess(const char* tag, JSReceiver obj) {
+ if (!FLAG_log_api) return;
+ WriteApiObjectAccess(tag, obj);
+ }
+ void ApiEntryCall(const char* name) {
+ if (!FLAG_log_api) return;
+ WriteApiEntryCall(name);
+ }
// ==== Events logged by --log-code. ====
V8_EXPORT_PRIVATE void AddCodeEventListener(CodeEventListener* listener);
@@ -224,7 +239,8 @@ class Logger : public CodeEventListener {
// Emits a code line info record event.
void CodeLinePosInfoRecordEvent(Address code_start,
- ByteArray source_position_table);
+ ByteArray source_position_table,
+ JitCodeEvent::CodeType code_type);
void CodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table);
@@ -242,10 +258,11 @@ class Logger : public CodeEventListener {
void SharedLibraryEvent(const std::string& library_path, uintptr_t start,
uintptr_t end, intptr_t aslr_slide);
+ void SharedLibraryEnd();
void CurrentTimeEvent();
- V8_EXPORT_PRIVATE void TimerEvent(StartEnd se, const char* name);
+ V8_EXPORT_PRIVATE void TimerEvent(v8::LogEventStatus se, const char* name);
void BasicBlockCounterEvent(const char* name, int block_id, uint32_t count);
@@ -256,8 +273,15 @@ class Logger : public CodeEventListener {
static void DefaultEventLoggerSentinel(const char* name, int event) {}
- V8_INLINE static void CallEventLogger(Isolate* isolate, const char* name,
- StartEnd se, bool expose_to_api);
+ static void CallEventLogger(Isolate* isolate, const char* name,
+ v8::LogEventStatus se, bool expose_to_api) {
+ if (!isolate->event_logger()) return;
+ if (isolate->event_logger() == DefaultEventLoggerSentinel) {
+ LOG(isolate, TimerEvent(se, name));
+ } else if (expose_to_api) {
+ isolate->event_logger()(name, static_cast<v8::LogEventStatus>(se));
+ }
+ }
V8_EXPORT_PRIVATE bool is_logging();
@@ -312,6 +336,14 @@ class Logger : public CodeEventListener {
Handle<SharedFunctionInfo> shared);
void LogCodeDisassemble(Handle<AbstractCode> code);
+ void WriteApiSecurityCheck();
+ void WriteApiNamedPropertyAccess(const char* tag, JSObject holder,
+ Object name);
+ void WriteApiIndexedPropertyAccess(const char* tag, JSObject holder,
+ uint32_t index);
+ void WriteApiObjectAccess(const char* tag, JSReceiver obj);
+ void WriteApiEntryCall(const char* name);
+
int64_t Time();
Isolate* isolate_;
@@ -373,13 +405,13 @@ template <class TimerEvent>
class V8_NODISCARD TimerEventScope {
public:
explicit TimerEventScope(Isolate* isolate) : isolate_(isolate) {
- LogTimerEvent(Logger::START);
+ LogTimerEvent(v8::LogEventStatus::kStart);
}
- ~TimerEventScope() { LogTimerEvent(Logger::END); }
+ ~TimerEventScope() { LogTimerEvent(v8::LogEventStatus::kEnd); }
private:
- void LogTimerEvent(Logger::StartEnd se);
+ void LogTimerEvent(v8::LogEventStatus se);
Isolate* isolate_;
};
diff --git a/deps/v8/src/logging/runtime-call-stats.h b/deps/v8/src/logging/runtime-call-stats.h
index d0687c2f62..5b3284a0c9 100644
--- a/deps/v8/src/logging/runtime-call-stats.h
+++ b/deps/v8/src/logging/runtime-call-stats.h
@@ -477,6 +477,7 @@ class RuntimeCallTimer final {
V(WebSnapshotDeserialize_Contexts) \
V(WebSnapshotDeserialize_Exports) \
V(WebSnapshotDeserialize_Functions) \
+ V(WebSnapshotDeserialize_Classes) \
V(WebSnapshotDeserialize_Maps) \
V(WebSnapshotDeserialize_Objects) \
V(WebSnapshotDeserialize_Strings)
diff --git a/deps/v8/src/logging/tracing-flags.h b/deps/v8/src/logging/tracing-flags.h
index b23ed03a20..b3ccb896aa 100644
--- a/deps/v8/src/logging/tracing-flags.h
+++ b/deps/v8/src/logging/tracing-flags.h
@@ -23,9 +23,11 @@ struct TracingFlags {
static V8_EXPORT_PRIVATE std::atomic_uint ic_stats;
static V8_EXPORT_PRIVATE std::atomic_uint zone_stats;
+#ifdef V8_RUNTIME_CALL_STATS
static bool is_runtime_stats_enabled() {
return runtime_stats.load(std::memory_order_relaxed) != 0;
}
+#endif
static bool is_gc_enabled() {
return gc.load(std::memory_order_relaxed) != 0;
diff --git a/deps/v8/src/numbers/conversions.cc b/deps/v8/src/numbers/conversions.cc
index cbbfb3fae0..79497a791b 100644
--- a/deps/v8/src/numbers/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -12,6 +12,7 @@
#include "src/base/numbers/dtoa.h"
#include "src/base/numbers/strtod.h"
#include "src/base/platform/wrappers.h"
+#include "src/bigint/bigint.h"
#include "src/common/assert-scope.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
@@ -310,38 +311,35 @@ class StringToIntHelper {
protected:
// Subclasses must implement these:
- virtual void AllocateResult() = 0;
- virtual void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) = 0;
+ virtual void ParseOneByte(const uint8_t* start) = 0;
+ virtual void ParseTwoByte(const base::uc16* start) = 0;
// Subclasses must call this to do all the work.
void ParseInt();
- // Subclasses may override this.
- virtual bool CheckTermination() { return false; }
- virtual void HandleSpecialCases() {}
-
// Subclass constructors should call these for configuration before calling
// ParseInt().
void set_allow_binary_and_octal_prefixes() {
allow_binary_and_octal_prefixes_ = true;
}
void set_disallow_trailing_junk() { allow_trailing_junk_ = false; }
+ bool allow_trailing_junk() { return allow_trailing_junk_; }
bool IsOneByte() const {
return raw_one_byte_subject_ != nullptr ||
String::IsOneByteRepresentationUnderneath(*subject_);
}
- base::Vector<const uint8_t> GetOneByteVector() {
+ base::Vector<const uint8_t> GetOneByteVector(
+ const DisallowGarbageCollection& no_gc) {
if (raw_one_byte_subject_ != nullptr) {
return base::Vector<const uint8_t>(raw_one_byte_subject_, length_);
}
- DisallowGarbageCollection no_gc;
return subject_->GetFlatContent(no_gc).ToOneByteVector();
}
- base::Vector<const base::uc16> GetTwoByteVector() {
- DisallowGarbageCollection no_gc;
+ base::Vector<const base::uc16> GetTwoByteVector(
+ const DisallowGarbageCollection& no_gc) {
return subject_->GetFlatContent(no_gc).ToUC16Vector();
}
@@ -357,8 +355,6 @@ class StringToIntHelper {
private:
template <class Char>
void DetectRadixInternal(Char current, int length);
- template <class Char>
- bool ParseChunkInternal(Char start);
IsolateT* isolate_;
Handle<String> subject_;
@@ -375,46 +371,18 @@ class StringToIntHelper {
template <typename IsolateT>
void StringToIntHelper<IsolateT>::ParseInt() {
- {
- DisallowGarbageCollection no_gc;
- if (IsOneByte()) {
- base::Vector<const uint8_t> vector = GetOneByteVector();
- DetectRadixInternal(vector.begin(), vector.length());
- } else {
- base::Vector<const base::uc16> vector = GetTwoByteVector();
- DetectRadixInternal(vector.begin(), vector.length());
- }
+ DisallowGarbageCollection no_gc;
+ if (IsOneByte()) {
+ base::Vector<const uint8_t> vector = GetOneByteVector(no_gc);
+ DetectRadixInternal(vector.begin(), vector.length());
+ if (state_ != State::kRunning) return;
+ ParseOneByte(vector.begin());
+ } else {
+ base::Vector<const base::uc16> vector = GetTwoByteVector(no_gc);
+ DetectRadixInternal(vector.begin(), vector.length());
+ if (state_ != State::kRunning) return;
+ ParseTwoByte(vector.begin());
}
- if (state_ != State::kRunning) return;
- AllocateResult();
- HandleSpecialCases();
- if (state_ != State::kRunning) return;
- do {
- {
- DisallowGarbageCollection no_gc;
- if (IsOneByte()) {
- base::Vector<const uint8_t> vector = GetOneByteVector();
- DCHECK_EQ(length_, vector.length());
- if (ParseChunkInternal(vector.begin())) {
- break;
- }
- } else {
- base::Vector<const base::uc16> vector = GetTwoByteVector();
- DCHECK_EQ(length_, vector.length());
- if (ParseChunkInternal(vector.begin())) {
- break;
- }
- }
- }
-
- // The flat vector handle is temporarily released after parsing 10kb
- // in order to invoke interrupts which may in turn invoke GC.
- if (CheckTermination()) {
- set_state(State::kError);
- break;
- }
- } while (true);
- DCHECK_NE(state_, State::kRunning);
}
template <typename IsolateT>
@@ -497,87 +465,29 @@ void StringToIntHelper<IsolateT>::DetectRadixInternal(Char current,
cursor_ = static_cast<int>(current - start);
}
-template <typename IsolateT>
-template <class Char>
-bool StringToIntHelper<IsolateT>::ParseChunkInternal(Char start) {
- const int kChunkSize = 10240;
- Char current = start + cursor_;
- Char end = start + length_;
- Char break_pos = current + kChunkSize;
-
- // The following code causes accumulating rounding error for numbers greater
- // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
- // 16, or 32, then mathInt may be an implementation-dependent approximation to
- // the mathematical integer value" (15.1.2.2).
-
- int lim_0 = '0' + (radix_ < 10 ? radix_ : 10);
- int lim_a = 'a' + (radix_ - 10);
- int lim_A = 'A' + (radix_ - 10);
-
- // NOTE: The code for computing the value may seem a bit complex at
- // first glance. It is structured to use 32-bit multiply-and-add
- // loops as long as possible to avoid losing precision.
-
- bool done = false;
- do {
- // Parse the longest part of the string starting at {current}
- // possible while keeping the multiplier, and thus the part
- // itself, within 32 bits.
- uint32_t part = 0, multiplier = 1;
- while (true) {
- uint32_t d;
- if (*current >= '0' && *current < lim_0) {
- d = *current - '0';
- } else if (*current >= 'a' && *current < lim_a) {
- d = *current - 'a' + 10;
- } else if (*current >= 'A' && *current < lim_A) {
- d = *current - 'A' + 10;
- } else {
- done = true;
- break;
- }
-
- // Update the value of the part as long as the multiplier fits
- // in 32 bits. When we can't guarantee that the next iteration
- // will not overflow the multiplier, we stop parsing the part
- // by leaving the loop.
- const uint32_t kMaximumMultiplier = 0xFFFFFFFFU / 36;
- uint32_t m = multiplier * static_cast<uint32_t>(radix_);
- if (m > kMaximumMultiplier) break;
- part = part * radix_ + d;
- multiplier = m;
- DCHECK(multiplier > part);
-
- ++current;
- if (current == end) {
- done = true;
- break;
- }
- }
-
- // Update the value and skip the part in the string.
- ResultMultiplyAdd(multiplier, part);
-
- // Set final state
- if (done) {
- if (!allow_trailing_junk_ && AdvanceToNonspace(&current, end)) {
- set_state(State::kJunk);
- } else {
- set_state(State::kDone);
- }
- return true;
- }
- } while (current < break_pos);
-
- cursor_ = static_cast<int>(current - start);
- return false;
-}
-
class NumberParseIntHelper : public StringToIntHelper<Isolate> {
public:
NumberParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
: StringToIntHelper(isolate, string, radix) {}
+ template <class Char>
+ void ParseInternal(Char start) {
+ Char current = start + cursor();
+ Char end = start + length();
+
+ if (radix() == 10) return HandleBaseTenCase(current, end);
+ if (base::bits::IsPowerOfTwo(radix())) {
+ result_ = HandlePowerOfTwoCase(current, end);
+ set_state(State::kDone);
+ return;
+ }
+ return HandleGenericCase(current, end);
+ }
+ void ParseOneByte(const uint8_t* start) final { return ParseInternal(start); }
+ void ParseTwoByte(const base::uc16* start) final {
+ return ParseInternal(start);
+ }
+
double GetResult() {
ParseInt();
switch (state()) {
@@ -595,35 +505,12 @@ class NumberParseIntHelper : public StringToIntHelper<Isolate> {
UNREACHABLE();
}
- protected:
- void AllocateResult() override {}
- void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) override {
- result_ = result_ * multiplier + part;
- }
-
private:
- void HandleSpecialCases() override {
- bool is_power_of_two = base::bits::IsPowerOfTwo(radix());
- if (!is_power_of_two && radix() != 10) return;
- DisallowGarbageCollection no_gc;
- if (IsOneByte()) {
- base::Vector<const uint8_t> vector = GetOneByteVector();
- DCHECK_EQ(length(), vector.length());
- result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.begin())
- : HandleBaseTenCase(vector.begin());
- } else {
- base::Vector<const base::uc16> vector = GetTwoByteVector();
- DCHECK_EQ(length(), vector.length());
- result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.begin())
- : HandleBaseTenCase(vector.begin());
- }
- set_state(State::kDone);
- }
+ template <class Char>
+ void HandleGenericCase(Char current, Char end);
template <class Char>
- double HandlePowerOfTwoCase(Char start) {
- Char current = start + cursor();
- Char end = start + length();
+ double HandlePowerOfTwoCase(Char current, Char end) {
const bool allow_trailing_junk = true;
// GetResult() will take care of the sign bit, so ignore it for now.
const bool negative = false;
@@ -651,10 +538,8 @@ class NumberParseIntHelper : public StringToIntHelper<Isolate> {
}
template <class Char>
- double HandleBaseTenCase(Char start) {
+ void HandleBaseTenCase(Char current, Char end) {
// Parsing with strtod.
- Char current = start + cursor();
- Char end = start + length();
const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
// The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
// end.
@@ -675,12 +560,73 @@ class NumberParseIntHelper : public StringToIntHelper<Isolate> {
SLOW_DCHECK(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
base::Vector<const char> buffer_vector(buffer, buffer_pos);
- return Strtod(buffer_vector, 0);
+ result_ = Strtod(buffer_vector, 0);
+ set_state(State::kDone);
}
double result_ = 0;
};
+template <class Char>
+void NumberParseIntHelper::HandleGenericCase(Char current, Char end) {
+ // The following code causes accumulating rounding error for numbers greater
+ // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
+ // 16, or 32, then mathInt may be an implementation-dependent approximation to
+ // the mathematical integer value" (15.1.2.2).
+
+ int lim_0 = '0' + (radix() < 10 ? radix() : 10);
+ int lim_a = 'a' + (radix() - 10);
+ int lim_A = 'A' + (radix() - 10);
+
+ // NOTE: The code for computing the value may seem a bit complex at
+ // first glance. It is structured to use 32-bit multiply-and-add
+ // loops as long as possible to avoid losing precision.
+
+ bool done = false;
+ do {
+ // Parse the longest part of the string starting at {current}
+ // possible while keeping the multiplier, and thus the part
+ // itself, within 32 bits.
+ uint32_t part = 0, multiplier = 1;
+ while (true) {
+ uint32_t d;
+ if (*current >= '0' && *current < lim_0) {
+ d = *current - '0';
+ } else if (*current >= 'a' && *current < lim_a) {
+ d = *current - 'a' + 10;
+ } else if (*current >= 'A' && *current < lim_A) {
+ d = *current - 'A' + 10;
+ } else {
+ done = true;
+ break;
+ }
+
+ // Update the value of the part as long as the multiplier fits
+ // in 32 bits. When we can't guarantee that the next iteration
+ // will not overflow the multiplier, we stop parsing the part
+ // by leaving the loop.
+ const uint32_t kMaximumMultiplier = 0xFFFFFFFFU / 36;
+ uint32_t m = multiplier * static_cast<uint32_t>(radix());
+ if (m > kMaximumMultiplier) break;
+ part = part * radix() + d;
+ multiplier = m;
+ DCHECK(multiplier > part);
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+ result_ = result_ * multiplier + part;
+ } while (!done);
+
+ if (!allow_trailing_junk() && AdvanceToNonspace(&current, end)) {
+ return set_state(State::kJunk);
+ }
+ return set_state(State::kDone);
+}
+
// Converts a string to a double value. Assumes the Iterator supports
// the following operations:
// 1. current == end (other ops are not allowed), current != end.
@@ -989,6 +935,11 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
this->set_allow_binary_and_octal_prefixes();
}
+ void ParseOneByte(const uint8_t* start) final { return ParseInternal(start); }
+ void ParseTwoByte(const base::uc16* start) final {
+ return ParseInternal(start);
+ }
+
MaybeHandle<BigInt> GetResult() {
this->ParseInt();
if (behavior_ == Behavior::kStringToBigInt && this->sign() != Sign::kNone &&
@@ -1009,7 +960,8 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
case State::kZero:
return BigInt::Zero(this->isolate(), allocation_type());
case State::kDone:
- return BigInt::Finalize<Isolate>(result_, this->negative());
+ return BigInt::Allocate(this->isolate(), &accumulator_,
+ this->negative(), allocation_type());
case State::kEmpty:
case State::kRunning:
break;
@@ -1017,28 +969,24 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
UNREACHABLE();
}
- protected:
- void AllocateResult() override {
- // We have to allocate a BigInt that's big enough to fit the result.
- // Conseratively assume that all remaining digits are significant.
- // Optimization opportunity: Would it makes sense to scan for trailing
- // junk before allocating the result?
- int charcount = this->length() - this->cursor();
- MaybeHandle<FreshlyAllocatedBigInt> maybe =
- BigInt::AllocateFor(this->isolate(), this->radix(), charcount,
- kDontThrow, allocation_type());
- if (!maybe.ToHandle(&result_)) {
- this->set_state(State::kError);
+ private:
+ template <class Char>
+ void ParseInternal(Char start) {
+ using Result = bigint::FromStringAccumulator::Result;
+ Char current = start + this->cursor();
+ Char end = start + this->length();
+ current = accumulator_.Parse(current, end, this->radix());
+
+ Result result = accumulator_.result();
+ if (result == Result::kMaxSizeExceeded) {
+ return this->set_state(State::kError);
}
+ if (!this->allow_trailing_junk() && AdvanceToNonspace(&current, end)) {
+ return this->set_state(State::kJunk);
+ }
+ return this->set_state(State::kDone);
}
- void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) override {
- BigInt::InplaceMultiplyAdd(*result_, static_cast<uintptr_t>(multiplier),
- static_cast<uintptr_t>(part));
- }
-
- bool CheckTermination() override;
-
AllocationType allocation_type() {
// For literals, we pretenure the allocated BigInt, since it's about
// to be stored in the interpreter's constants array.
@@ -1046,23 +994,10 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
: AllocationType::kYoung;
}
- private:
- Handle<FreshlyAllocatedBigInt> result_;
+ bigint::FromStringAccumulator accumulator_{BigInt::kMaxLength};
Behavior behavior_;
};
-template <typename IsolateT>
-bool StringToBigIntHelper<IsolateT>::CheckTermination() {
- return false;
-}
-
-template <>
-bool StringToBigIntHelper<Isolate>::CheckTermination() {
- StackLimitCheck interrupt_check(isolate());
- return interrupt_check.InterruptRequested() &&
- isolate()->stack_guard()->HandleInterrupts().IsException(isolate());
-}
-
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
string = String::Flatten(isolate, string);
StringToBigIntHelper<Isolate> helper(isolate, string);
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index e4fc40249d..9d17048958 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -30,7 +30,8 @@ ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
RELEASE_ACQUIRE_ACCESSORS(AllocationSite, transition_info_or_boilerplate,
Object, kTransitionInfoOrBoilerplateOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
+IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data,
+ kPretenureDataOffset)
INT32_ACCESSORS(AllocationSite, pretenure_create_count,
kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode, kDependentCodeOffset)
diff --git a/deps/v8/src/objects/allocation-site.tq b/deps/v8/src/objects/allocation-site.tq
index 3710268539..f38270e053 100644
--- a/deps/v8/src/objects/allocation-site.tq
+++ b/deps/v8/src/objects/allocation-site.tq
@@ -4,7 +4,6 @@
extern class AllocationSite extends Struct;
-@generateCppClass
extern class AllocationMemento extends Struct {
allocation_site: AllocationSite;
}
diff --git a/deps/v8/src/objects/api-callbacks.tq b/deps/v8/src/objects/api-callbacks.tq
index 102ffd7ab2..cf94f743c4 100644
--- a/deps/v8/src/objects/api-callbacks.tq
+++ b/deps/v8/src/objects/api-callbacks.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class CallHandlerInfo extends Struct {
callback: NonNullForeign|Undefined|Zero;
js_callback: NonNullForeign|Undefined|Zero;
@@ -17,7 +16,6 @@ bitfield struct InterceptorInfoFlags extends uint31 {
has_no_side_effect: bool: 1 bit;
}
-@generateCppClass
@generatePrint
extern class InterceptorInfo extends Struct {
getter: NonNullForeign|Zero|Undefined;
@@ -31,7 +29,6 @@ extern class InterceptorInfo extends Struct {
flags: SmiTagged<InterceptorInfoFlags>;
}
-@generateCppClass
@generatePrint
extern class AccessCheckInfo extends Struct {
callback: Foreign|Zero|Undefined;
@@ -53,7 +50,6 @@ bitfield struct AccessorInfoFlags extends uint31 {
initial_attributes: PropertyAttributes: 3 bit;
}
-@generateCppClass
@generatePrint
extern class AccessorInfo extends Struct {
name: Name;
diff --git a/deps/v8/src/objects/arguments.tq b/deps/v8/src/objects/arguments.tq
index 6f8e02fdbc..cc60e62f70 100644
--- a/deps/v8/src/objects/arguments.tq
+++ b/deps/v8/src/objects/arguments.tq
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
-extern class JSArgumentsObject extends JSObject {
-}
+extern class JSArgumentsObject extends JSObject {}
type JSArgumentsObjectWithLength =
JSSloppyArgumentsObject|JSStrictArgumentsObject;
@@ -16,12 +14,14 @@ macro IsJSArgumentsObjectWithLength(implicit context: Context)(o: Object):
}
// Just a starting shape for JSObject; properties can move after initialization.
+@doNotGenerateCppClass
extern shape JSSloppyArgumentsObject extends JSArgumentsObject {
length: JSAny;
callee: JSAny;
}
// Just a starting shape for JSObject; properties can move after initialization.
+@doNotGenerateCppClass
extern shape JSStrictArgumentsObject extends JSArgumentsObject {
length: JSAny;
}
@@ -90,7 +90,6 @@ macro NewSloppyArgumentsElements<Iterator: type>(
SloppyArgumentsElements{length, context, arguments, mapped_entries: ...it};
}
-@generateCppClass
@generatePrint
extern class AliasedArgumentsEntry extends Struct {
aliased_context_slot: Smi;
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index 89bb17b03a..e72698858a 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -267,6 +267,7 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
auto result = new BackingStore(buffer_start, // start
byte_length, // length
+ byte_length, // max length
byte_length, // capacity
shared, // shared
ResizableFlag::kNotResizable, // resizable
@@ -305,8 +306,9 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
maximum_pages = std::min(engine_max_pages, maximum_pages);
auto result = TryAllocateAndPartiallyCommitMemory(
- isolate, initial_pages * wasm::kWasmPageSize, wasm::kWasmPageSize,
- initial_pages, maximum_pages, true, shared);
+ isolate, initial_pages * wasm::kWasmPageSize,
+ maximum_pages * wasm::kWasmPageSize, wasm::kWasmPageSize, initial_pages,
+ maximum_pages, true, shared);
// Shared Wasm memories need an anchor for the memory object list.
if (result && shared == SharedFlag::kShared) {
result->type_specific_data_.shared_wasm_memory_data =
@@ -336,9 +338,9 @@ void BackingStore::ReleaseReservation(uint64_t num_bytes) {
}
std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
- Isolate* isolate, size_t byte_length, size_t page_size,
- size_t initial_pages, size_t maximum_pages, bool is_wasm_memory,
- SharedFlag shared) {
+ Isolate* isolate, size_t byte_length, size_t max_byte_length,
+ size_t page_size, size_t initial_pages, size_t maximum_pages,
+ bool is_wasm_memory, SharedFlag shared) {
// Enforce engine limitation on the maximum number of pages.
if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) {
return nullptr;
@@ -445,16 +447,17 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
ResizableFlag resizable =
is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
- auto result = new BackingStore(buffer_start, // start
- byte_length, // length
- byte_capacity, // capacity
- shared, // shared
- resizable, // resizable
- is_wasm_memory, // is_wasm_memory
- true, // free_on_destruct
- guards, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ auto result = new BackingStore(buffer_start, // start
+ byte_length, // length
+ max_byte_length, // max_byte_length
+ byte_capacity, // capacity
+ shared, // shared
+ resizable, // resizable
+ is_wasm_memory, // is_wasm_memory
+ true, // free_on_destruct
+ guards, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
TRACE_BS(
"BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
@@ -707,6 +710,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
SharedFlag shared, bool free_on_destruct) {
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
+ allocation_length, // max length
allocation_length, // capacity
shared, // shared
ResizableFlag::kNotResizable, // resizable
@@ -728,6 +732,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
+ allocation_length, // max length
allocation_length, // capacity
shared, // shared
ResizableFlag::kNotResizable, // resizable
@@ -746,6 +751,7 @@ std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
SharedFlag shared) {
auto result = new BackingStore(nullptr, // start
0, // length
+ 0, // max length
0, // capacity
shared, // shared
ResizableFlag::kNotResizable, // resizable
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 5598388d3b..013a97a526 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -61,9 +61,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// Tries to allocate `maximum_pages` of memory and commit `initial_pages`.
static std::unique_ptr<BackingStore> TryAllocateAndPartiallyCommitMemory(
- Isolate* isolate, size_t byte_length, size_t page_size,
- size_t initial_pages, size_t maximum_pages, bool is_wasm_memory,
- SharedFlag shared);
+ Isolate* isolate, size_t byte_length, size_t max_byte_length,
+ size_t page_size, size_t initial_pages, size_t maximum_pages,
+ bool is_wasm_memory, SharedFlag shared);
// Create a backing store that wraps existing allocated memory.
// If {free_on_destruct} is {true}, the memory will be freed using the
@@ -90,6 +90,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
std::memory_order memory_order = std::memory_order_relaxed) const {
return byte_length_.load(memory_order);
}
+ size_t max_byte_length() const { return max_byte_length_; }
size_t byte_capacity() const { return byte_capacity_; }
bool is_shared() const { return is_shared_; }
bool is_resizable() const { return is_resizable_; }
@@ -165,12 +166,13 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
private:
friend class GlobalBackingStoreRegistry;
- BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity,
- SharedFlag shared, ResizableFlag resizable, bool is_wasm_memory,
- bool free_on_destruct, bool has_guard_regions,
- bool custom_deleter, bool empty_deleter)
+ BackingStore(void* buffer_start, size_t byte_length, size_t max_byte_length,
+ size_t byte_capacity, SharedFlag shared, ResizableFlag resizable,
+ bool is_wasm_memory, bool free_on_destruct,
+ bool has_guard_regions, bool custom_deleter, bool empty_deleter)
: buffer_start_(buffer_start),
byte_length_(byte_length),
+ max_byte_length_(max_byte_length),
byte_capacity_(byte_capacity),
is_shared_(shared == SharedFlag::kShared),
is_resizable_(resizable == ResizableFlag::kResizable),
@@ -185,6 +187,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_);
DCHECK_IMPLIES(is_resizable_, !custom_deleter_);
DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
+ DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,
+ byte_length_ == max_byte_length_);
}
BackingStore(const BackingStore&) = delete;
BackingStore& operator=(const BackingStore&) = delete;
@@ -192,6 +196,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
void* buffer_start_ = nullptr;
std::atomic<size_t> byte_length_{0};
+ // Max byte length of the corresponding JSArrayBuffer(s).
+ size_t max_byte_length_ = 0;
+ // Amount of the memory allocated
size_t byte_capacity_ = 0;
struct DeleterInfo {
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 7e716f0118..5d21adfb89 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -126,10 +126,6 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt result_storage = MutableBigInt());
- static void InternalMultiplyAdd(BigIntBase source, digit_t factor,
- digit_t summand, int n, MutableBigInt result);
- void InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand);
-
// Specialized helpers for shift operations.
static MaybeHandle<BigInt> LeftShiftByAbsolute(Isolate* isolate,
Handle<BigIntBase> x,
@@ -152,7 +148,6 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
// Digit arithmetic helpers.
static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
- static inline digit_t digit_mul(digit_t a, digit_t b, digit_t* high);
static inline bool digit_ismax(digit_t x) {
return static_cast<digit_t>(~x) == 0;
}
@@ -1411,50 +1406,6 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
[](digit_t a, digit_t b) { return a ^ b; });
}
-// Multiplies {source} with {factor} and adds {summand} to the result.
-// {result} and {source} may be the same BigInt for inplace modification.
-void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
- digit_t summand, int n,
- MutableBigInt result) {
- DCHECK(source.length() >= n);
- DCHECK(result.length() >= n);
- digit_t carry = summand;
- digit_t high = 0;
- for (int i = 0; i < n; i++) {
- digit_t current = source.digit(i);
- digit_t new_carry = 0;
- // Compute this round's multiplication.
- digit_t new_high = 0;
- current = digit_mul(current, factor, &new_high);
- // Add last round's carryovers.
- current = digit_add(current, high, &new_carry);
- current = digit_add(current, carry, &new_carry);
- // Store result and prepare for next round.
- result.set_digit(i, current);
- carry = new_carry;
- high = new_high;
- }
- if (result.length() > n) {
- result.set_digit(n++, carry + high);
- // Current callers don't pass in such large results, but let's be robust.
- while (n < result.length()) {
- result.set_digit(n++, 0);
- }
- } else {
- CHECK_EQ(carry + high, 0);
- }
-}
-
-// Multiplies {x} with {factor} and then adds {summand} to it.
-void BigInt::InplaceMultiplyAdd(FreshlyAllocatedBigInt x, uintptr_t factor,
- uintptr_t summand) {
- STATIC_ASSERT(sizeof(factor) == sizeof(digit_t));
- STATIC_ASSERT(sizeof(summand) == sizeof(digit_t));
- MutableBigInt bigint = MutableBigInt::cast(x);
- MutableBigInt::InternalMultiplyAdd(bigint, factor, summand, bigint.length(),
- bigint);
-}
-
MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
Handle<BigIntBase> x,
Handle<BigIntBase> y) {
@@ -1591,71 +1542,33 @@ Maybe<BigInt::digit_t> MutableBigInt::ToShiftAmount(Handle<BigIntBase> x) {
return Just(value);
}
-// Lookup table for the maximum number of bits required per character of a
-// base-N string representation of a number. To increase accuracy, the array
-// value is the actual value multiplied by 32. To generate this table:
-// for (var i = 0; i <= 36; i++) { print(Math.ceil(Math.log2(i) * 32) + ","); }
-constexpr uint8_t kMaxBitsPerChar[] = {
- 0, 0, 32, 51, 64, 75, 83, 90, 96, // 0..8
- 102, 107, 111, 115, 119, 122, 126, 128, // 9..16
- 131, 134, 136, 139, 141, 143, 145, 147, // 17..24
- 149, 151, 153, 154, 156, 158, 159, 160, // 25..32
- 162, 163, 165, 166, // 33..36
-};
-
-static const int kBitsPerCharTableShift = 5;
-static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
+void Terminate(Isolate* isolate) { isolate->TerminateExecution(); }
+// {LocalIsolate} doesn't support interruption or termination.
+void Terminate(LocalIsolate* isolate) { UNREACHABLE(); }
template <typename IsolateT>
-MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
- IsolateT* isolate, int radix, int charcount, ShouldThrow should_throw,
- AllocationType allocation) {
- DCHECK(2 <= radix && radix <= 36);
- DCHECK_GE(charcount, 0);
- size_t bits_per_char = kMaxBitsPerChar[radix];
- uint64_t chars = static_cast<uint64_t>(charcount);
- const int roundup = kBitsPerCharTableMultiplier - 1;
- if (chars <=
- (std::numeric_limits<uint64_t>::max() - roundup) / bits_per_char) {
- uint64_t bits_min = bits_per_char * chars;
- // Divide by 32 (see table), rounding up.
- bits_min = (bits_min + roundup) >> kBitsPerCharTableShift;
- if (bits_min <= static_cast<uint64_t>(kMaxInt)) {
- // Divide by kDigitsBits, rounding up.
- int length = static_cast<int>((bits_min + kDigitBits - 1) / kDigitBits);
- if (length <= kMaxLength) {
- Handle<MutableBigInt> result =
- MutableBigInt::New(isolate, length, allocation).ToHandleChecked();
- result->InitializeDigits(length);
- return result;
- }
- }
- }
- // All the overflow/maximum checks above fall through to here.
- if (should_throw == kThrowOnError) {
- return ThrowBigIntTooBig<FreshlyAllocatedBigInt>(isolate);
- } else {
- return MaybeHandle<FreshlyAllocatedBigInt>();
+MaybeHandle<BigInt> BigInt::Allocate(IsolateT* isolate,
+ bigint::FromStringAccumulator* accumulator,
+ bool negative, AllocationType allocation) {
+ int digits = accumulator->ResultLength();
+ DCHECK_LE(digits, kMaxLength);
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, digits, allocation).ToHandleChecked();
+ bigint::Status status =
+ isolate->bigint_processor()->FromString(GetRWDigits(result), accumulator);
+ if (status == bigint::Status::kInterrupted) {
+ Terminate(isolate);
+ return {};
}
+ if (digits > 0) result->set_sign(negative);
+ return MutableBigInt::MakeImmutable(result);
}
-template MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
- Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
- AllocationType allocation);
-template MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
- LocalIsolate* isolate, int radix, int charcount, ShouldThrow should_throw,
- AllocationType allocation);
-
-template <typename IsolateT>
-Handle<BigInt> BigInt::Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign) {
- Handle<MutableBigInt> bigint = Handle<MutableBigInt>::cast(x);
- bigint->set_sign(sign);
- return MutableBigInt::MakeImmutable<Isolate>(bigint);
-}
-
-template Handle<BigInt> BigInt::Finalize<Isolate>(
- Handle<FreshlyAllocatedBigInt>, bool);
-template Handle<BigInt> BigInt::Finalize<LocalIsolate>(
- Handle<FreshlyAllocatedBigInt>, bool);
+template MaybeHandle<BigInt> BigInt::Allocate(Isolate*,
+ bigint::FromStringAccumulator*,
+ bool, AllocationType);
+template MaybeHandle<BigInt> BigInt::Allocate(LocalIsolate*,
+ bigint::FromStringAccumulator*,
+ bool, AllocationType);
// The serialization format MUST NOT CHANGE without updating the format
// version in value-serializer.cc!
@@ -2056,43 +1969,6 @@ inline BigInt::digit_t MutableBigInt::digit_sub(digit_t a, digit_t b,
#endif
}
-// Returns the low half of the result. High half is in {high}.
-inline BigInt::digit_t MutableBigInt::digit_mul(digit_t a, digit_t b,
- digit_t* high) {
-#if HAVE_TWODIGIT_T
- twodigit_t result = static_cast<twodigit_t>(a) * static_cast<twodigit_t>(b);
- *high = result >> kDigitBits;
- return static_cast<digit_t>(result);
-#else
- // Multiply in half-pointer-sized chunks.
- // For inputs [AH AL]*[BH BL], the result is:
- //
- // [AL*BL] // r_low
- // + [AL*BH] // r_mid1
- // + [AH*BL] // r_mid2
- // + [AH*BH] // r_high
- // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1]
- //
- // Where of course we must be careful with carries between the columns.
- digit_t a_low = a & kHalfDigitMask;
- digit_t a_high = a >> kHalfDigitBits;
- digit_t b_low = b & kHalfDigitMask;
- digit_t b_high = b >> kHalfDigitBits;
-
- digit_t r_low = a_low * b_low;
- digit_t r_mid1 = a_low * b_high;
- digit_t r_mid2 = a_high * b_low;
- digit_t r_high = a_high * b_high;
-
- digit_t carry = 0;
- digit_t low = digit_add(r_low, r_mid1 << kHalfDigitBits, &carry);
- low = digit_add(low, r_mid2 << kHalfDigitBits, &carry);
- *high =
- (r_mid1 >> kHalfDigitBits) + (r_mid2 >> kHalfDigitBits) + r_high + carry;
- return low;
-#endif
-}
-
#undef HAVE_TWODIGIT_T
void MutableBigInt::set_64_bits(uint64_t bits) {
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 31241a0158..58856d737e 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -14,6 +14,11 @@
#include "src/objects/object-macros.h"
namespace v8 {
+
+namespace bigint {
+class FromStringAccumulator;
+} // namespace bigint
+
namespace internal {
void MutableBigInt_AbsoluteAddAndCanonicalize(Address result_addr,
@@ -252,13 +257,9 @@ class BigInt : public BigIntBase {
static Handle<BigInt> Zero(
IsolateT* isolate, AllocationType allocation = AllocationType::kYoung);
template <typename IsolateT>
- static MaybeHandle<FreshlyAllocatedBigInt> AllocateFor(
- IsolateT* isolate, int radix, int charcount, ShouldThrow should_throw,
- AllocationType allocation);
- static void InplaceMultiplyAdd(FreshlyAllocatedBigInt x, uintptr_t factor,
- uintptr_t summand);
- template <typename IsolateT>
- static Handle<BigInt> Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign);
+ static MaybeHandle<BigInt> Allocate(
+ IsolateT* isolate, bigint::FromStringAccumulator* accumulator,
+ bool negative, AllocationType allocation);
// Special functions for ValueSerializer/ValueDeserializer:
uint32_t GetBitfieldForSerialization() const;
diff --git a/deps/v8/src/objects/bigint.tq b/deps/v8/src/objects/bigint.tq
index 60be844cc6..2d8275b2d5 100644
--- a/deps/v8/src/objects/bigint.tq
+++ b/deps/v8/src/objects/bigint.tq
@@ -5,15 +5,14 @@
// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
// in global namespace
extern class BigIntBase extends PrimitiveHeapObject
- generates 'TNode<BigInt>' {}
+ generates 'TNode<BigInt>';
type BigInt extends BigIntBase;
@noVerifier
@hasSameInstanceTypeAsParent
@doNotGenerateCast
-extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
-}
+extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>';
Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
assert(bigint::IsCanonicalized(i));
diff --git a/deps/v8/src/objects/cell.tq b/deps/v8/src/objects/cell.tq
index ef9b281104..c318d40065 100644
--- a/deps/v8/src/objects/cell.tq
+++ b/deps/v8/src/objects/cell.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
@generatePrint
extern class Cell extends HeapObject {
value: Object;
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 7436f3982d..cae02edc23 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -31,8 +31,10 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/code-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(DeoptimizationData, FixedArray)
-OBJECT_CONSTRUCTORS_IMPL(BytecodeArray, FixedArrayBase)
+TQ_OBJECT_CONSTRUCTORS_IMPL(BytecodeArray)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray)
OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
@@ -40,7 +42,6 @@ OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
CAST_ACCESSOR(AbstractCode)
-CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
@@ -524,8 +525,8 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
inline bool Code::is_baseline_trampoline_builtin() const {
return builtin_id() != Builtin::kNoBuiltinId &&
(builtin_id() == Builtin::kBaselineOutOfLinePrologue ||
- builtin_id() == Builtin::kBaselineEnterAtBytecode ||
- builtin_id() == Builtin::kBaselineEnterAtNextBytecode);
+ builtin_id() == Builtin::kBaselineOrInterpreterEnterAtBytecode ||
+ builtin_id() == Builtin::kBaselineOrInterpreterEnterAtNextBytecode);
}
inline bool Code::is_baseline_leave_frame_builtin() const {
@@ -799,8 +800,8 @@ bool Code::IsExecutable() {
// concurrent marker.
STATIC_ASSERT(FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) ==
kInt32Size);
-RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
- kKindSpecificFlagsOffset)
+IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
+ kKindSpecificFlagsOffset)
ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
V8_EXTERNAL_CODE_SPACE_BOOL)
RELAXED_ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
@@ -915,13 +916,13 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
}
int BytecodeArray::osr_loop_nesting_level() const {
- return ReadField<int8_t>(kOsrNestingLevelOffset);
+ return ReadField<int8_t>(kOsrLoopNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- WriteField<int8_t>(kOsrNestingLevelOffset, depth);
+ WriteField<int8_t>(kOsrLoopNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
@@ -943,11 +944,6 @@ int32_t BytecodeArray::parameter_count() const {
return ReadField<int32_t>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
}
-ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
-ACCESSORS(BytecodeArray, handler_table, ByteArray, kHandlerTableOffset)
-RELEASE_ACQUIRE_ACCESSORS(BytecodeArray, source_position_table, Object,
- kSourcePositionTableOffset)
-
void BytecodeArray::clear_padding() {
int data_size = kHeaderSize + length();
memset(reinterpret_cast<void*>(address() + data_size), 0,
@@ -1008,6 +1004,9 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(LazyDeoptCount, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
+#ifdef DEBUG
+DEFINE_DEOPT_ENTRY_ACCESSORS(NodeId, Smi)
+#endif // DEBUG
BytecodeOffset DeoptimizationData::GetBytecodeOffset(int i) {
return BytecodeOffset(BytecodeOffsetRaw(i).value());
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 671b95efcc..e2a4528d0d 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -460,13 +460,21 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) {
int deopt_count = DeoptCount();
os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
if (0 != deopt_count) {
+#ifdef DEBUG
+ os << " index bytecode-offset node-id pc";
+#else // DEBUG
os << " index bytecode-offset pc";
+#endif // DEBUG
if (FLAG_print_code_verbose) os << " commands";
os << "\n";
}
for (int i = 0; i < deopt_count; i++) {
os << std::setw(6) << i << " " << std::setw(15)
- << GetBytecodeOffset(i).ToInt() << " " << std::setw(4);
+ << GetBytecodeOffset(i).ToInt() << " "
+#ifdef DEBUG
+ << std::setw(7) << NodeId(i).value() << " "
+#endif // DEBUG
+ << std::setw(4);
print_pc(os, Pc(i).value());
os << std::setw(2);
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 3bcff9fa95..2d6fc3e983 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -32,6 +32,8 @@ namespace interpreter {
class Register;
} // namespace interpreter
+#include "torque-generated/src/objects/code-tq.inc"
+
// CodeDataContainer is a container for all mutable fields associated with its
// referencing {Code} object. Since {Code} objects reside on write-protected
// pages within the heap, its header fields need to be immutable. There always
@@ -77,11 +79,13 @@ class CodeDataContainer : public HeapObject {
// Layout description.
#define CODE_DATA_FIELDS(V) \
/* Strong pointer fields. */ \
- V(kCodeOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
V(kPointerFieldsStrongEndOffset, 0) \
/* Weak pointer fields. */ \
V(kNextCodeLinkOffset, kTaggedSize) \
V(kPointerFieldsWeakEndOffset, 0) \
+ /* Strong Code pointer fields. */ \
+ V(kCodeOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
+ V(kCodePointerFieldsStrongEndOffset, 0) \
/* Raw data fields. */ \
V(kCodeEntryPointOffset, \
V8_EXTERNAL_CODE_SPACE_BOOL ? kExternalPointerSize : 0) \
@@ -826,7 +830,8 @@ class DependentCode : public WeakFixedArray {
};
// BytecodeArray represents a sequence of interpreter bytecodes.
-class BytecodeArray : public FixedArrayBase {
+class BytecodeArray
+ : public TorqueGeneratedBytecodeArray<BytecodeArray, FixedArrayBase> {
public:
enum Age {
kNoAgeBytecodeAge = 0,
@@ -879,21 +884,6 @@ class BytecodeArray : public FixedArrayBase {
inline Age bytecode_age() const;
inline void set_bytecode_age(Age age);
- // Accessors for the constant pool.
- DECL_ACCESSORS(constant_pool, FixedArray)
-
- // Accessors for handler table containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, ByteArray)
-
- // Accessors for source position table. Can contain:
- // * undefined (initial value)
- // * empty_byte_array (for bytecode generated for functions that will never
- // have source positions, e.g. native functions).
- // * ByteArray (when source positions have been collected for the bytecode)
- // * exception (when an error occurred while explicitly collecting source
- // positions for pre-existing bytecode).
- DECL_RELEASE_ACQUIRE_ACCESSORS(source_position_table, Object)
-
inline bool HasSourcePositionTable() const;
inline bool DidSourcePositionGenerationFail() const;
@@ -907,8 +897,6 @@ class BytecodeArray : public FixedArrayBase {
// as it would if no attempt was ever made to collect source positions.
inline void SetSourcePositionsFailedToCollect();
- DECL_CAST(BytecodeArray)
-
// Dispatched behavior.
inline int BytecodeArraySize();
@@ -933,14 +921,10 @@ class BytecodeArray : public FixedArrayBase {
// is deterministic.
inline void clear_padding();
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
- TORQUE_GENERATED_BYTECODE_ARRAY_FIELDS)
-
// InterpreterEntryTrampoline expects these fields to be next to each other
// and writes a 16-bit value to reset them.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- kOsrNestingLevelOffset + kCharSize);
+ kOsrLoopNestingLevelOffset + kCharSize);
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
@@ -949,7 +933,11 @@ class BytecodeArray : public FixedArrayBase {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(BytecodeArray, FixedArrayBase);
+ private:
+ // Hide accessors inherited from generated class. Use parameter_count instead.
+ DECL_INT_ACCESSORS(parameter_size)
+
+ TQ_OBJECT_CONSTRUCTORS(BytecodeArray)
};
// DeoptimizationData is a fixed array used to hold the deoptimization data for
@@ -978,7 +966,12 @@ class DeoptimizationData : public FixedArray {
static const int kBytecodeOffsetRawOffset = 0;
static const int kTranslationIndexOffset = 1;
static const int kPcOffset = 2;
+#ifdef DEBUG
+ static const int kNodeIdOffset = 3;
+ static const int kDeoptEntrySize = 4;
+#else // DEBUG
static const int kDeoptEntrySize = 3;
+#endif // DEBUG
// Simple element accessors.
#define DECL_ELEMENT_ACCESSORS(name, type) \
@@ -1007,6 +1000,9 @@ class DeoptimizationData : public FixedArray {
DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
DECL_ENTRY_ACCESSORS(Pc, Smi)
+#ifdef DEBUG
+ DECL_ENTRY_ACCESSORS(NodeId, Smi)
+#endif // DEBUG
#undef DECL_ENTRY_ACCESSORS
diff --git a/deps/v8/src/objects/code.tq b/deps/v8/src/objects/code.tq
index ccf7f130c8..c51b187107 100644
--- a/deps/v8/src/objects/code.tq
+++ b/deps/v8/src/objects/code.tq
@@ -7,12 +7,22 @@ type DependentCode extends WeakFixedArray;
extern class BytecodeArray extends FixedArrayBase {
// TODO(v8:8983): bytecode array object sizes vary based on their contents.
constant_pool: FixedArray;
+ // The handler table contains offsets of exception handlers.
handler_table: ByteArray;
+ // Source position table. Can contain:
+ // * undefined (initial value)
+ // * empty_byte_array (for bytecode generated for functions that will never
+ // have source positions, e.g. native functions).
+ // * ByteArray (when source positions have been collected for the bytecode)
+ // * exception (when an error occurred while explicitly collecting source
+ // positions for pre-existing bytecode).
+ @cppAcquireLoad
+ @cppReleaseStore
source_position_table: Undefined|ByteArray|Exception;
frame_size: int32;
parameter_size: int32;
incoming_new_target_or_generator_register: int32;
- osr_nesting_level: int8;
+ osr_loop_nesting_level: int8;
bytecode_age: int8;
}
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 99c2356bce..0d20c0b502 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -48,7 +48,12 @@ Handle<Context> ScriptContextTable::GetContext(Isolate* isolate,
Context ScriptContextTable::get_context(int i) const {
DCHECK_LT(i, used(kAcquireLoad));
- return Context::cast(this->get(i + kFirstContextSlotIndex));
+ return Context::cast(get(i + kFirstContextSlotIndex));
+}
+
+Context ScriptContextTable::get_context(int i, AcquireLoadTag tag) const {
+ DCHECK_LT(i, used(kAcquireLoad));
+ return Context::cast(get(i + kFirstContextSlotIndex, tag));
}
TQ_OBJECT_CONSTRUCTORS_IMPL(Context)
@@ -56,6 +61,8 @@ NEVER_READ_ONLY_SPACE_IMPL(Context)
CAST_ACCESSOR(NativeContext)
+RELAXED_SMI_ACCESSORS(Context, length, kLengthOffset)
+
Object Context::get(int index) const {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return get(cage_base, index);
@@ -63,14 +70,14 @@ Object Context::get(int index) const {
Object Context::get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned int>(index),
- static_cast<unsigned int>(length()));
+ static_cast<unsigned int>(length(kRelaxedLoad)));
return TaggedField<Object>::Relaxed_Load(cage_base, *this,
OffsetOfElementAt(index));
}
void Context::set(int index, Object value, WriteBarrierMode mode) {
DCHECK_LT(static_cast<unsigned int>(index),
- static_cast<unsigned int>(length()));
+ static_cast<unsigned int>(length(kRelaxedLoad)));
const int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
@@ -84,14 +91,14 @@ Object Context::get(int index, AcquireLoadTag tag) const {
Object Context::get(PtrComprCageBase cage_base, int index,
AcquireLoadTag) const {
DCHECK_LT(static_cast<unsigned int>(index),
- static_cast<unsigned int>(length()));
+ static_cast<unsigned int>(length(kRelaxedLoad)));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
}
void Context::set(int index, Object value, WriteBarrierMode mode,
ReleaseStoreTag) {
DCHECK_LT(static_cast<unsigned int>(index),
- static_cast<unsigned int>(length()));
+ static_cast<unsigned int>(length(kRelaxedLoad)));
const int offset = OffsetOfElementAt(index);
RELEASE_WRITE_FIELD(*this, offset, value);
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index 899f795ddf..4d09f69978 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -31,8 +31,7 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
result = table;
}
DCHECK(script_context->IsScriptContext());
- result->set(used + kFirstContextSlotIndex, *script_context);
-
+ result->set(used + kFirstContextSlotIndex, *script_context, kReleaseStore);
result->set_used(used + 1, kReleaseStore);
return result;
}
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 604d81184e..7fae0c9e0d 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -132,8 +132,6 @@ enum ContextLookupFlags {
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(ASYNC_GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
async_generator_object_prototype_map) \
- V(GROWABLE_SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, \
- growable_shared_array_buffer_fun) \
V(INITIAL_ARRAY_ITERATOR_MAP_INDEX, Map, initial_array_iterator_map) \
V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX, JSObject, \
initial_array_iterator_prototype) \
@@ -245,7 +243,6 @@ enum ContextLookupFlags {
V(REGEXP_SPLIT_FUNCTION_INDEX, JSFunction, regexp_split_function) \
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_regexp_string_iterator_prototype_map) \
- V(RESIZABLE_ARRAY_BUFFER_FUN_INDEX, JSFunction, resizable_array_buffer_fun) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SCRIPT_EXECUTION_CALLBACK_INDEX, Object, script_execution_callback) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
@@ -294,6 +291,7 @@ enum ContextLookupFlags {
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(WASM_EXPORTED_FUNCTION_MAP_INDEX, Map, wasm_exported_function_map) \
+ V(WASM_TAG_CONSTRUCTOR_INDEX, JSFunction, wasm_tag_constructor) \
V(WASM_EXCEPTION_CONSTRUCTOR_INDEX, JSFunction, wasm_exception_constructor) \
V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
@@ -339,6 +337,8 @@ enum ContextLookupFlags {
V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
wasm_runtime_error_function) \
+ V(WASM_EXCEPTION_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_exception_error_function) \
V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
V(WEAKMAP_DELETE_INDEX, JSFunction, weakmap_delete) \
@@ -365,6 +365,7 @@ class ScriptContextTable : public FixedArray {
Handle<ScriptContextTable> table,
int i);
inline Context get_context(int i) const;
+ inline Context get_context(int i, AcquireLoadTag tag) const;
// Lookup a variable `name` in a ScriptContextTable.
// If it returns true, the variable is found and `result` contains
@@ -436,6 +437,10 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
+ using TorqueGeneratedContext::length; // Non-atomic.
+ using TorqueGeneratedContext::set_length; // Non-atomic.
+ DECL_RELAXED_SMI_ACCESSORS(length)
+
// Setter and getter for elements.
// Note the plain accessors use relaxed semantics.
// TODO(jgruber): Make that explicit through tags.
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index f18f499294..7a00bcb262 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -16,12 +16,7 @@ namespace internal {
#include "torque-generated/src/objects/data-handler-tq-inl.inc"
-OBJECT_CONSTRUCTORS_IMPL(DataHandler, Struct)
-
-CAST_ACCESSOR(DataHandler)
-
-ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset)
-ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(DataHandler)
int DataHandler::data_field_count() const {
return (map().instance_size() - kSizeWithData0) / kTaggedSize;
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index e27b5be83f..9310824af0 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -18,17 +18,8 @@ namespace internal {
// DataHandler is a base class for load and store handlers that can't be
// encoded in one Smi. Kind of a handler can be deduced from instance type.
-class DataHandler : public Struct {
+class DataHandler : public TorqueGeneratedDataHandler<DataHandler, Struct> {
public:
- // [smi_handler]: A Smi which encodes a handler or Code object (we still
- // use code handlers for accessing lexical environment variables, but soon
- // only smi handlers will remain). See LoadHandler and StoreHandler for
- // details about encoding.
- DECL_ACCESSORS(smi_handler, Object)
-
- // [validity_cell]: A validity Cell that guards prototype chain modifications.
- DECL_ACCESSORS(validity_cell, Object)
-
// Returns number of optional data fields available in the object.
inline int data_field_count() const;
@@ -38,21 +29,16 @@ class DataHandler : public Struct {
DECL_ACCESSORS(data2, MaybeObject)
DECL_ACCESSORS(data3, MaybeObject)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_DATA_HANDLER_FIELDS)
-
static const int kSizeWithData0 = kData1Offset;
static const int kSizeWithData1 = kData2Offset;
static const int kSizeWithData2 = kData3Offset;
static const int kSizeWithData3 = kHeaderSize;
- DECL_CAST(DataHandler)
-
DECL_VERIFIER(DataHandler)
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(DataHandler, Struct);
+ TQ_OBJECT_CONSTRUCTORS(DataHandler)
};
} // namespace internal
diff --git a/deps/v8/src/objects/data-handler.tq b/deps/v8/src/objects/data-handler.tq
index 79deb3a9a6..78bd31e536 100644
--- a/deps/v8/src/objects/data-handler.tq
+++ b/deps/v8/src/objects/data-handler.tq
@@ -4,12 +4,18 @@
@abstract
extern class DataHandler extends Struct {
+ // [smi_handler]: A Smi which encodes a handler or Code object (we still
+ // use code handlers for accessing lexical environment variables, but soon
+ // only smi handlers will remain). See LoadHandler and StoreHandler for
+ // details about encoding.
@if(V8_EXTERNAL_CODE_SPACE) smi_handler: Smi|CodeDataContainer;
@ifnot(V8_EXTERNAL_CODE_SPACE) smi_handler: Smi|Code;
+
+ // [validity_cell]: A validity Cell that guards prototype chain modifications.
validity_cell: Smi|Cell;
// Space for the following fields may or may not be allocated.
- @noVerifier data_1: MaybeObject;
- @noVerifier data_2: MaybeObject;
- @noVerifier data_3: MaybeObject;
+ @noVerifier data1: MaybeObject;
+ @noVerifier data2: MaybeObject;
+ @noVerifier data3: MaybeObject;
}
diff --git a/deps/v8/src/objects/debug-objects.tq b/deps/v8/src/objects/debug-objects.tq
index 1df2e7a484..16e5cb43c6 100644
--- a/deps/v8/src/objects/debug-objects.tq
+++ b/deps/v8/src/objects/debug-objects.tq
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
@generatePrint
extern class BreakPoint extends Struct {
id: Smi;
condition: String;
}
-@generateCppClass
@generatePrint
extern class BreakPointInfo extends Struct {
// The position in the source for the break position.
@@ -34,7 +32,6 @@ bitfield struct DebuggerHints extends uint31 {
debugging_id: int32: 20 bit;
}
-@generateCppClass
@generatePrint
extern class DebugInfo extends Struct {
shared: SharedFunctionInfo;
@@ -69,7 +66,6 @@ struct CoverageInfoSlot {
// CoverageInfo's visitor is included in DATA_ONLY_VISITOR_ID_LIST, so it must
// not contain any HeapObject fields.
-@generateCppClass
extern class CoverageInfo extends HeapObject {
const slot_count: int32;
slots[slot_count]: CoverageInfoSlot;
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index eb86a3343e..a97722d4b9 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -3,7 +3,6 @@
// found in the LICENSE file.
@generatePrint
-@generateCppClass
extern class EnumCache extends Struct {
keys: FixedArray;
indices: FixedArray;
diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h
index f7da82caf7..0d546aba8d 100644
--- a/deps/v8/src/objects/elements-kind.h
+++ b/deps/v8/src/objects/elements-kind.h
@@ -197,6 +197,12 @@ inline bool IsTypedArrayOrRabGsabTypedArrayElementsKind(ElementsKind kind) {
LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
}
+inline bool IsBigIntTypedArrayElementsKind(ElementsKind kind) {
+ return kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS ||
+ kind == RAB_GSAB_BIGINT64_ELEMENTS ||
+ kind == RAB_GSAB_BIGUINT64_ELEMENTS;
+}
+
inline bool IsWasmArrayElementsKind(ElementsKind kind) {
return kind == WASM_ARRAY_ELEMENTS;
}
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index dd137aab55..4eedf3d6c0 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -3296,7 +3296,7 @@ class TypedElementsAccessor
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
DCHECK(!typed_array->WasDetached());
DCHECK_LE(start, end);
- DCHECK_LE(end, typed_array->length());
+ DCHECK_LE(end, typed_array->GetLength());
DisallowGarbageCollection no_gc;
ElementType scalar = FromHandle(value);
ElementType* data = static_cast<ElementType*>(typed_array->DataPtr());
diff --git a/deps/v8/src/objects/embedder-data-array.tq b/deps/v8/src/objects/embedder-data-array.tq
index a3c4b6868e..5d934384db 100644
--- a/deps/v8/src/objects/embedder-data-array.tq
+++ b/deps/v8/src/objects/embedder-data-array.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class EmbedderDataArray extends HeapObject {
// length of the array in an embedder data slots.
length: Smi;
diff --git a/deps/v8/src/objects/feedback-cell.tq b/deps/v8/src/objects/feedback-cell.tq
index 426bd1cb4e..c3bebd1316 100644
--- a/deps/v8/src/objects/feedback-cell.tq
+++ b/deps/v8/src/objects/feedback-cell.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class FeedbackCell extends Struct {
value: Undefined|FeedbackVector|FixedArray;
interrupt_budget: int32;
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 90deada6f4..6318998385 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -112,7 +112,16 @@ FeedbackMetadata FeedbackVector::metadata() const {
return shared_function_info().feedback_metadata();
}
-void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
+FeedbackMetadata FeedbackVector::metadata(AcquireLoadTag tag) const {
+ return shared_function_info().feedback_metadata(tag);
+}
+
+RELAXED_INT32_ACCESSORS(FeedbackVector, invocation_count,
+ kInvocationCountOffset)
+
+void FeedbackVector::clear_invocation_count(RelaxedStoreTag tag) {
+ set_invocation_count(0, tag);
+}
Code FeedbackVector::optimized_code() const {
MaybeObject slot = maybe_optimized_code(kAcquireLoad);
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 5d4ddafda8..f50121aa61 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -4,6 +4,8 @@
#include "src/objects/feedback-vector.h"
+#include "src/common/globals.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/code-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
@@ -206,6 +208,12 @@ FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot) const {
return metadata().GetKind(slot);
}
+FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot,
+ AcquireLoadTag tag) const {
+ DCHECK(!is_empty());
+ return metadata(tag).GetKind(slot);
+}
+
FeedbackSlot FeedbackVector::GetTypeProfileSlot() const {
DCHECK(metadata().HasTypeProfileSlot());
FeedbackSlot slot =
@@ -561,7 +569,7 @@ FeedbackNexus::FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot,
const NexusConfig& config)
: vector_handle_(vector),
slot_(slot),
- kind_(vector->GetKind(slot)),
+ kind_(vector->GetKind(slot, kAcquireLoad)),
config_(config) {}
Handle<WeakFixedArray> FeedbackNexus::CreateArrayOfSize(int length) {
@@ -1019,7 +1027,7 @@ CallFeedbackContent FeedbackNexus::GetCallFeedbackContent() {
float FeedbackNexus::ComputeCallFrequency() {
DCHECK(IsCallICKind(kind()));
- double const invocation_count = vector().invocation_count();
+ double const invocation_count = vector().invocation_count(kRelaxedLoad);
double const call_count = GetCallCount();
if (invocation_count == 0.0) { // Prevent division by 0.
return 0.0f;
@@ -1243,14 +1251,14 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
continue;
} else {
Code code = FromCodeT(CodeT::cast(data_handler->smi_handler()));
- handler = handle(code, vector().GetIsolate());
+ handler = config()->NewHandle(code);
}
} else if (maybe_code_handler.object()->IsSmi()) {
// Skip for Proxy Handlers.
- if (*(maybe_code_handler.object()) ==
- *StoreHandler::StoreProxy(GetIsolate()))
+ if (*maybe_code_handler.object() == StoreHandler::StoreProxy()) {
continue;
+ }
// Decode the KeyedAccessStoreMode information from the Handler.
mode = StoreHandler::GetKeyedAccessStoreMode(*maybe_code_handler);
if (mode != STANDARD_STORE) return mode;
@@ -1259,7 +1267,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
// Element store without prototype chain check.
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
Code code = FromCodeT(CodeT::cast(*maybe_code_handler.object()));
- handler = handle(code, vector().GetIsolate());
+ handler = config()->NewHandle(code);
} else {
handler = Handle<Code>::cast(maybe_code_handler.object());
}
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 247f8ec3c6..9c9e44071a 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -215,11 +215,16 @@ class FeedbackVector
inline bool is_empty() const;
inline FeedbackMetadata metadata() const;
+ inline FeedbackMetadata metadata(AcquireLoadTag tag) const;
// Increment profiler ticks, saturating at the maximal value.
void SaturatingIncrementProfilerTicks();
- inline void clear_invocation_count();
+ // Forward declare the non-atomic accessors.
+ using TorqueGeneratedFeedbackVector::invocation_count;
+ using TorqueGeneratedFeedbackVector::set_invocation_count;
+ DECL_RELAXED_INT32_ACCESSORS(invocation_count)
+ inline void clear_invocation_count(RelaxedStoreTag tag);
inline Code optimized_code() const;
inline bool has_optimized_code() const;
@@ -269,6 +274,8 @@ class FeedbackVector
// Returns slot kind for given slot.
V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot) const;
+ V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot,
+ AcquireLoadTag tag) const;
FeedbackSlot GetTypeProfileSlot() const;
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index 38c88d403f..5c1fbd4e4e 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -12,7 +12,6 @@ bitfield struct FeedbackVectorFlags extends uint32 {
}
@generateBodyDescriptor
-@generateCppClass
extern class FeedbackVector extends HeapObject {
const length: int32;
invocation_count: int32;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 0990ee0c64..98315ad73d 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -84,7 +84,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -92,7 +91,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -446,6 +444,16 @@ void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) {
set_objects(index, value, mode);
}
+Handle<WeakFixedArray> WeakFixedArray::EnsureSpace(Isolate* isolate,
+ Handle<WeakFixedArray> array,
+ int length) {
+ if (array->length() < length) {
+ int grow_by = length - array->length();
+ array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by);
+ }
+ return array;
+}
+
MaybeObjectSlot WeakFixedArray::data_start() {
return RawMaybeWeakField(kObjectsOffset);
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 74f5ecb305..1dfd7dac13 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -134,18 +134,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
@@ -296,6 +285,10 @@ class WeakFixedArray
int index, MaybeObject value,
WriteBarrierMode mode = WriteBarrierMode::UPDATE_WRITE_BARRIER);
+ static inline Handle<WeakFixedArray> EnsureSpace(Isolate* isolate,
+ Handle<WeakFixedArray> array,
+ int length);
+
// Forward declare the non-atomic (set_)length defined in torque.
using TorqueGeneratedWeakFixedArray::length;
using TorqueGeneratedWeakFixedArray::set_length;
diff --git a/deps/v8/src/objects/fixed-array.tq b/deps/v8/src/objects/fixed-array.tq
index 31198d70d4..3daa5bad49 100644
--- a/deps/v8/src/objects/fixed-array.tq
+++ b/deps/v8/src/objects/fixed-array.tq
@@ -3,51 +3,41 @@
// found in the LICENSE file.
@abstract
-@generateCppClass
extern class FixedArrayBase extends HeapObject {
// length of the array.
const length: Smi;
}
@generateBodyDescriptor
-@generateCppClass
extern class FixedArray extends FixedArrayBase {
objects[length]: Object;
}
type EmptyFixedArray extends FixedArray;
-@generateCppClass
extern class FixedDoubleArray extends FixedArrayBase {
floats[length]: float64_or_hole;
}
@generateBodyDescriptor
-@generateCppClass
extern class WeakFixedArray extends HeapObject {
const length: Smi;
@cppRelaxedLoad objects[length]: MaybeObject;
}
-@generateCppClass
-extern class ByteArray extends FixedArrayBase {
- bytes[length]: uint8;
-}
+extern class ByteArray extends FixedArrayBase { bytes[length]: uint8; }
@hasSameInstanceTypeAsParent
-@generateCppClass
@doNotGenerateCast
extern class ArrayList extends FixedArray {
}
@hasSameInstanceTypeAsParent
-@generateCppClass
@doNotGenerateCast
extern class TemplateList extends FixedArray {
}
@generateBodyDescriptor
-@generateCppClass
extern class WeakArrayList extends HeapObject {
const capacity: Smi;
length: Smi;
diff --git a/deps/v8/src/objects/foreign.tq b/deps/v8/src/objects/foreign.tq
index 872da31e3b..be7113769f 100644
--- a/deps/v8/src/objects/foreign.tq
+++ b/deps/v8/src/objects/foreign.tq
@@ -3,7 +3,6 @@
// found in the LICENSE file.
@apiExposedInstanceTypeValue(0x46)
-@generateCppClass
extern class Foreign extends HeapObject {
foreign_address: ExternalPointer;
}
diff --git a/deps/v8/src/objects/free-space.tq b/deps/v8/src/objects/free-space.tq
index 501326b04d..5fc8767a58 100644
--- a/deps/v8/src/objects/free-space.tq
+++ b/deps/v8/src/objects/free-space.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class FreeSpace extends HeapObject {
size: Smi;
next: FreeSpace|Uninitialized;
diff --git a/deps/v8/src/objects/heap-number.tq b/deps/v8/src/objects/heap-number.tq
index 09112e0e86..af2545c1fb 100644
--- a/deps/v8/src/objects/heap-number.tq
+++ b/deps/v8/src/objects/heap-number.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class HeapNumber extends PrimitiveHeapObject {
// Marked as a relaxed store because of a race with reading on the
// compiler thread.
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 0ee86603ce..94fdf7eeb1 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -195,7 +195,8 @@ class HeapObject : public Object {
bool CanBeRehashed() const;
// Rehash the object based on the layout inferred from its map.
- void RehashBasedOnMap(Isolate* isolate);
+ template <typename IsolateT>
+ void RehashBasedOnMap(IsolateT* isolate);
// Layout description.
#define HEAP_OBJECT_FIELDS(V) \
diff --git a/deps/v8/src/objects/heap-object.tq b/deps/v8/src/objects/heap-object.tq
index ca794032f3..deea761965 100644
--- a/deps/v8/src/objects/heap-object.tq
+++ b/deps/v8/src/objects/heap-object.tq
@@ -4,6 +4,7 @@
@abstract
@doNotGenerateCast
+@doNotGenerateCppClass
extern class HeapObject extends StrongTagged {
const map: Map;
}
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index c30c247d25..f7cdd28c05 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -149,7 +149,8 @@ constexpr InstanceType LAST_STRING_TYPE =
STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
-STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
+STATIC_ASSERT(FIRST_JS_API_OBJECT_TYPE == Internals::kFirstJSApiObjectType);
+STATIC_ASSERT(LAST_JS_API_OBJECT_TYPE == Internals::kLastJSApiObjectType);
STATIC_ASSERT(JS_SPECIAL_API_OBJECT_TYPE == Internals::kJSSpecialApiObjectType);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 0fa5737ec7..7ea8aeb3e5 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -200,13 +200,24 @@ bool JSTypedArray::IsVariableLength() const {
return is_length_tracking() || is_backed_by_rab();
}
-size_t JSTypedArray::GetLength() const {
+size_t JSTypedArray::GetLengthOrOutOfBounds(bool& out_of_bounds) const {
+ DCHECK(!out_of_bounds);
if (WasDetached()) return 0;
if (is_length_tracking()) {
if (is_backed_by_rab()) {
- return buffer().byte_length() / element_size();
+ if (byte_offset() >= buffer().byte_length()) {
+ out_of_bounds = true;
+ return 0;
+ }
+ return (buffer().byte_length() - byte_offset()) / element_size();
+ }
+ if (byte_offset() >=
+ buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst)) {
+ out_of_bounds = true;
+ return 0;
}
- return buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst) /
+ return (buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst) -
+ byte_offset()) /
element_size();
}
size_t array_length = LengthUnchecked();
@@ -215,12 +226,18 @@ size_t JSTypedArray::GetLength() const {
// JSTypedArray.
if (byte_offset() + array_length * element_size() >
buffer().byte_length()) {
+ out_of_bounds = true;
return 0;
}
}
return array_length;
}
+size_t JSTypedArray::GetLength() const {
+ bool out_of_bounds = false;
+ return GetLengthOrOutOfBounds(out_of_bounds);
+}
+
void JSTypedArray::AllocateExternalPointerEntries(Isolate* isolate) {
InitExternalPointerField(kExternalPointerOffset, isolate);
}
@@ -356,6 +373,17 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
}
+ if (V8_UNLIKELY(array->IsVariableLength())) {
+ bool out_of_bounds = false;
+ array->GetLengthOrOutOfBounds(out_of_bounds);
+ if (out_of_bounds) {
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
+ }
+ }
+
// spec describes to return `buffer`, but it may disrupt current
// implementations, and it's much useful to return array for now.
return array;
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index 120c4d8bde..917a055b46 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -59,6 +59,7 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
if (!backing_store) {
set_backing_store(GetIsolate(), nullptr);
set_byte_length(0);
+ set_max_byte_length(0);
} else {
Attach(std::move(backing_store));
}
@@ -72,6 +73,9 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
DCHECK_NOT_NULL(backing_store);
DCHECK_EQ(is_shared(), backing_store->is_shared());
DCHECK_EQ(is_resizable(), backing_store->is_resizable());
+ DCHECK_IMPLIES(
+ !backing_store->is_wasm_memory() && !backing_store->is_resizable(),
+ backing_store->byte_length() == backing_store->max_byte_length());
DCHECK(!was_detached());
Isolate* isolate = GetIsolate();
set_backing_store(isolate, backing_store->buffer_start());
@@ -82,6 +86,7 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
} else {
set_byte_length(backing_store->byte_length());
}
+ set_max_byte_length(backing_store->max_byte_length());
if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true);
Heap* heap = isolate->heap();
@@ -222,11 +227,12 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
// 2. Assert: O is an Object that has a [[ViewedArrayBuffer]] internal slot.
// 3. If Type(P) is String, then
PropertyKey lookup_key(isolate, key);
- if (lookup_key.is_element() || key->IsString()) {
+ if (lookup_key.is_element() || key->IsSmi() || key->IsString()) {
// 3a. Let numericIndex be ! CanonicalNumericIndexString(P)
// 3b. If numericIndex is not undefined, then
- bool is_minus_zero;
- if (CanonicalNumericIndexString(isolate, lookup_key, &is_minus_zero)) {
+ bool is_minus_zero = false;
+ if (key->IsSmi() || // Smi keys are definitely canonical
+ CanonicalNumericIndexString(isolate, lookup_key, &is_minus_zero)) {
// 3b i. If IsInteger(numericIndex) is false, return false.
// 3b ii. If numericIndex = -0, return false.
// 3b iii. If numericIndex < 0, return false.
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index f723380772..1522f4b951 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -307,6 +307,7 @@ class JSTypedArray
DECL_BOOLEAN_ACCESSORS(is_length_tracking)
DECL_BOOLEAN_ACCESSORS(is_backed_by_rab)
inline bool IsVariableLength() const;
+ inline size_t GetLengthOrOutOfBounds(bool& out_of_bounds) const;
inline size_t GetLength() const;
static size_t LengthTrackingGsabBackedTypedArrayLength(Isolate* isolate,
@@ -364,6 +365,7 @@ class JSTypedArray
#endif
private:
+ template <typename IsolateT>
friend class Deserializer;
friend class Factory;
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index ddd90d4c81..914720457d 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -11,7 +11,6 @@ bitfield struct JSArrayBufferFlags extends uint32 {
is_resizable: bool: 1 bit;
}
-@generateCppClass
extern class JSArrayBuffer extends JSObject {
byte_length: uintptr;
max_byte_length: uintptr;
@@ -42,7 +41,6 @@ macro IsResizableArrayBuffer(buffer: JSArrayBuffer): bool {
}
@abstract
-@generateCppClass
extern class JSArrayBufferView extends JSObject {
buffer: JSArrayBuffer;
byte_offset: uintptr;
@@ -59,7 +57,6 @@ bitfield struct JSTypedArrayFlags extends uint32 {
is_backed_by_rab: bool: 1 bit;
}
-@generateCppClass
extern class JSTypedArray extends JSArrayBufferView {
length: uintptr;
external_pointer: ExternalPointer;
@@ -80,7 +77,6 @@ macro IsLengthTrackingTypedArray(array: JSTypedArray): bool {
return array.bit_field.is_length_tracking;
}
-@generateCppClass
extern class JSDataView extends JSArrayBufferView {
data_pointer: ExternalPointer;
}
diff --git a/deps/v8/src/objects/js-array.tq b/deps/v8/src/objects/js-array.tq
index 8e238b9f8b..3ccf37b150 100644
--- a/deps/v8/src/objects/js-array.tq
+++ b/deps/v8/src/objects/js-array.tq
@@ -4,6 +4,7 @@
extern enum IterationKind extends uint31 { kKeys, kValues, kEntries }
+@doNotGenerateCppClass
extern class JSArrayIterator extends JSObject {
iterated_object: JSReceiver;
next_index: Number;
@@ -24,6 +25,7 @@ macro CreateArrayIterator(implicit context: NativeContext)(
};
}
+@doNotGenerateCppClass
extern class JSArray extends JSObject {
macro IsEmpty(): bool {
return this.length == 0;
@@ -66,6 +68,10 @@ transient type FastJSArrayForRead extends JSArray;
// A FastJSArray when the global ArraySpeciesProtector is not invalidated.
transient type FastJSArrayForCopy extends FastJSArray;
+// A FastJSArrayForCopy when the global IsConcatSpreadableProtector is not
+// invalidated.
+transient type FastJSArrayForConcat extends FastJSArrayForCopy;
+
// A FastJSArray when the global ArrayIteratorProtector is not invalidated.
transient type FastJSArrayWithNoCustomIteration extends FastJSArray;
diff --git a/deps/v8/src/objects/js-break-iterator.tq b/deps/v8/src/objects/js-break-iterator.tq
index 08d121520a..6dd9f1e5c7 100644
--- a/deps/v8/src/objects/js-break-iterator.tq
+++ b/deps/v8/src/objects/js-break-iterator.tq
@@ -4,7 +4,6 @@
#include 'src/objects/js-break-iterator.h'
-@generateCppClass
extern class JSV8BreakIterator extends JSObject {
locale: String;
break_iterator: Foreign; // Managed<icu::BreakIterator>;
diff --git a/deps/v8/src/objects/js-collator.tq b/deps/v8/src/objects/js-collator.tq
index 2e1c847534..664a096aa9 100644
--- a/deps/v8/src/objects/js-collator.tq
+++ b/deps/v8/src/objects/js-collator.tq
@@ -4,7 +4,6 @@
#include 'src/objects/js-collator.h'
-@generateCppClass
extern class JSCollator extends JSObject {
icu_collator: Foreign; // Managed<icu::Collator>
bound_compare: Undefined|JSFunction;
diff --git a/deps/v8/src/objects/js-collection-iterator.tq b/deps/v8/src/objects/js-collection-iterator.tq
index f047d97ff6..7560cfbe2c 100644
--- a/deps/v8/src/objects/js-collection-iterator.tq
+++ b/deps/v8/src/objects/js-collection-iterator.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
@abstract
extern class JSCollectionIterator extends JSObject {
// The backing hash table mapping keys to values.
diff --git a/deps/v8/src/objects/js-collection.tq b/deps/v8/src/objects/js-collection.tq
index 7839b5947a..0dbda990d2 100644
--- a/deps/v8/src/objects/js-collection.tq
+++ b/deps/v8/src/objects/js-collection.tq
@@ -3,30 +3,20 @@
// found in the LICENSE file.
@abstract
-@generateCppClass
extern class JSCollection extends JSObject {
// The backing hash table.
table: Object;
}
-@generateCppClass
-extern class JSSet extends JSCollection {
-}
-@generateCppClass
-extern class JSMap extends JSCollection {
-}
+extern class JSSet extends JSCollection {}
+extern class JSMap extends JSCollection {}
@abstract
-@generateCppClass
extern class JSWeakCollection extends JSObject {
// The backing hash table mapping keys to values.
table: Object;
}
-@generateCppClass
-extern class JSWeakSet extends JSWeakCollection {
-}
-@generateCppClass
-extern class JSWeakMap extends JSWeakCollection {
-}
+extern class JSWeakSet extends JSWeakCollection {}
+extern class JSWeakMap extends JSWeakCollection {}
@abstract extern class JSMapIterator extends JSCollectionIterator;
extern class JSMapKeyIterator extends JSMapIterator
diff --git a/deps/v8/src/objects/js-date-time-format.tq b/deps/v8/src/objects/js-date-time-format.tq
index a5d6fc96f5..fedd761cdf 100644
--- a/deps/v8/src/objects/js-date-time-format.tq
+++ b/deps/v8/src/objects/js-date-time-format.tq
@@ -13,7 +13,6 @@ bitfield struct JSDateTimeFormatFlags extends uint31 {
iso8601: bool: 1bit;
}
-@generateCppClass
extern class JSDateTimeFormat extends JSObject {
locale: String;
icu_locale: Foreign; // Managed<icu::Locale>
diff --git a/deps/v8/src/objects/js-display-names.cc b/deps/v8/src/objects/js-display-names.cc
index ed9e840fc8..d4f05ad739 100644
--- a/deps/v8/src/objects/js-display-names.cc
+++ b/deps/v8/src/objects/js-display-names.cc
@@ -238,6 +238,12 @@ class CalendarNames : public KeyValueDisplayNames {
~CalendarNames() override = default;
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
+ std::string code_str(code);
+ if (!Intl::IsWellFormedCalendar(code_str)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArgument),
+ Nothing<icu::UnicodeString>());
+ }
return KeyValueDisplayNames::of(isolate, strcmp(code, "gregory") == 0
? "gregorian"
: strcmp(code, "ethioaa") == 0
@@ -300,9 +306,7 @@ class DateTimeFieldNames : public DisplayNamesInternal {
public:
DateTimeFieldNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback)
- : locale_(locale),
- width_(StyleToUDateTimePGDisplayWidth(style)),
- fallback_(fallback) {
+ : locale_(locale), width_(StyleToUDateTimePGDisplayWidth(style)) {
UErrorCode status = U_ZERO_ERROR;
generator_.reset(
icu::DateTimePatternGenerator::createInstance(locale_, status));
@@ -315,9 +319,6 @@ class DateTimeFieldNames : public DisplayNamesInternal {
const char* code) const override {
UDateTimePatternField field = StringToUDateTimePatternField(code);
if (field == UDATPG_FIELD_COUNT) {
- if (fallback_) {
- return Just(icu::UnicodeString(code, -1, US_INV));
- }
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kInvalidArgument),
Nothing<icu::UnicodeString>());
@@ -329,7 +330,6 @@ class DateTimeFieldNames : public DisplayNamesInternal {
icu::Locale locale_;
UDateTimePGDisplayWidth width_;
std::unique_ptr<icu::DateTimePatternGenerator> generator_;
- bool fallback_;
};
DisplayNamesInternal* CreateInternal(const icu::Locale& locale,
diff --git a/deps/v8/src/objects/js-display-names.tq b/deps/v8/src/objects/js-display-names.tq
index f70c0a1a3b..d7dcf0ef39 100644
--- a/deps/v8/src/objects/js-display-names.tq
+++ b/deps/v8/src/objects/js-display-names.tq
@@ -15,7 +15,6 @@ bitfield struct JSDisplayNamesFlags extends uint31 {
language_display: JSDisplayNamesLanguageDisplay: 1 bit;
}
-@generateCppClass
extern class JSDisplayNames extends JSObject {
internal: Foreign; // Managed<DisplayNamesInternal>
flags: SmiTagged<JSDisplayNamesFlags>;
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index bae63f6ef9..275ffba14d 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -168,8 +168,12 @@ Address JSFunction::code_entry_point() const {
// TODO(ishell): Why relaxed read but release store?
DEF_GETTER(JSFunction, shared, SharedFunctionInfo) {
- return SharedFunctionInfo::cast(
- RELAXED_READ_FIELD(*this, kSharedFunctionInfoOffset));
+ return shared(cage_base, kRelaxedLoad);
+}
+
+DEF_RELAXED_GETTER(JSFunction, shared, SharedFunctionInfo) {
+ return TaggedField<SharedFunctionInfo,
+ kSharedFunctionInfoOffset>::Relaxed_Load(cage_base, *this);
}
void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
@@ -200,6 +204,10 @@ Context JSFunction::context() {
return TaggedField<Context, kContextOffset>::load(*this);
}
+DEF_RELAXED_GETTER(JSFunction, context, Context) {
+ return TaggedField<Context, kContextOffset>::Relaxed_Load(cage_base, *this);
+}
+
bool JSFunction::has_context() const {
return TaggedField<HeapObject, kContextOffset>::load(*this).IsContext();
}
@@ -258,8 +266,9 @@ DEF_GETTER(JSFunction, PrototypeRequiresRuntimeLookup, bool) {
DEF_GETTER(JSFunction, instance_prototype, HeapObject) {
DCHECK(has_instance_prototype(cage_base));
- if (has_initial_map(cage_base))
+ if (has_initial_map(cage_base)) {
return initial_map(cage_base).prototype(cage_base);
+ }
// When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
return HeapObject::cast(prototype_or_initial_map(cage_base, kAcquireLoad));
@@ -284,14 +293,37 @@ bool JSFunction::is_compiled() const {
shared().is_compiled();
}
+bool JSFunction::ShouldFlushBaselineCode(
+ base::EnumSet<CodeFlushMode> code_flush_mode) {
+ if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
+ // Do a raw read for shared and code fields here since this function may be
+ // called on a concurrent thread. JSFunction itself should be fully
+ // initialized here but the SharedFunctionInfo, Code objects may not be
+ // initialized. We read using acquire loads to defend against that.
+ Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
+ if (!maybe_shared.IsSharedFunctionInfo()) return false;
+
+ // See crbug.com/v8/11972 for more details on acquire / release semantics for
+ // code field. We don't use release stores when copying code pointers from
+ // SFI / FV to JSFunction but it is safe in practice.
+ Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
+ if (!maybe_code.IsCodeT()) return false;
+ Code code = FromCodeT(CodeT::cast(maybe_code));
+ if (code.kind() != CodeKind::BASELINE) return false;
+
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
+ return shared.ShouldFlushCode(code_flush_mode);
+}
+
bool JSFunction::NeedsResetDueToFlushedBytecode() {
// Do a raw read for shared and code fields here since this function may be
- // called on a concurrent thread and the JSFunction might not be fully
- // initialized yet.
+ // called on a concurrent thread. JSFunction itself should be fully
+ // initialized here but the SharedFunctionInfo, Code objects may not be
+ // initialized. We read using acquire loads to defend against that.
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
if (!maybe_shared.IsSharedFunctionInfo()) return false;
- Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset);
+ Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
if (!maybe_code.IsCodeT()) return false;
Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad);
@@ -299,15 +331,31 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
}
-void JSFunction::ResetIfBytecodeFlushed(
+bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
+ return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
+}
+
+void JSFunction::ResetIfCodeFlushed(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot) {
+ if (!FLAG_flush_bytecode && !FLAG_flush_baseline_code) return;
+
+ DCHECK_IMPLIES(NeedsResetDueToFlushedBytecode(), FLAG_flush_bytecode);
if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) {
// Bytecode was flushed and function is now uncompiled, reset JSFunction
// by setting code to CompileLazy and clearing the feedback vector.
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
+ return;
+ }
+
+ DCHECK_IMPLIES(NeedsResetDueToFlushedBaselineCode(),
+ FLAG_flush_baseline_code);
+ if (FLAG_flush_baseline_code && NeedsResetDueToFlushedBaselineCode()) {
+ DCHECK(FLAG_flush_baseline_code);
+ // Flush baseline code from the closure if required
+ set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline));
}
}
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index 05ac6a6c93..b2d086814f 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -1078,7 +1078,7 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
}
void JSFunction::ClearTypeFeedbackInfo() {
- ResetIfBytecodeFlushed();
+ ResetIfCodeFlushed();
if (has_feedback_vector()) {
FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 5277e4e796..6d7b21abe9 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -58,15 +58,17 @@ class JSFunction : public JSFunctionOrBoundFunction {
// [prototype_or_initial_map]:
DECL_RELEASE_ACQUIRE_ACCESSORS(prototype_or_initial_map, HeapObject)
- // [shared]: The information about the function that
- // can be shared by instances.
+ // [shared]: The information about the function that can be shared by
+ // instances.
DECL_ACCESSORS(shared, SharedFunctionInfo)
+ DECL_RELAXED_GETTER(shared, SharedFunctionInfo)
// Fast binding requires length and name accessors.
static const int kMinDescriptorsForFastBind = 2;
// [context]: The context for this function.
inline Context context();
+ DECL_RELAXED_GETTER(context, Context)
inline bool has_context() const;
inline void set_context(HeapObject context,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -210,11 +212,20 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Resets function to clear compiled data after bytecode has been flushed.
inline bool NeedsResetDueToFlushedBytecode();
- inline void ResetIfBytecodeFlushed(
+ inline void ResetIfCodeFlushed(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot = base::nullopt);
+ // Returns if the closure's code field has to be updated because it has
+ // stale baseline code.
+ inline bool NeedsResetDueToFlushedBaselineCode();
+
+ // Returns if baseline code is a candidate for flushing. This method is called
+ // from concurrent marking so we should be careful when accessing data fields.
+ inline bool ShouldFlushBaselineCode(
+ base::EnumSet<CodeFlushMode> code_flush_mode);
+
DECL_GETTER(has_prototype_slot, bool)
// The initial map for an object created by this constructor.
@@ -311,6 +322,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
static constexpr int kPrototypeOrInitialMapOffset =
FieldOffsets::kPrototypeOrInitialMapOffset;
+ class BodyDescriptor;
+
private:
DECL_ACCESSORS(raw_code, CodeT)
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
index e08a0f1789..de934b82f4 100644
--- a/deps/v8/src/objects/js-function.tq
+++ b/deps/v8/src/objects/js-function.tq
@@ -3,11 +3,9 @@
// found in the LICENSE file.
@abstract
-@generateCppClass
extern class JSFunctionOrBoundFunction extends JSObject {
}
-@generateCppClass
extern class JSBoundFunction extends JSFunctionOrBoundFunction {
// The wrapped function object.
bound_target_function: Callable;
@@ -20,6 +18,7 @@ extern class JSBoundFunction extends JSFunctionOrBoundFunction {
}
@highestInstanceTypeWithinParentClassRange
+@doNotGenerateCppClass
extern class JSFunction extends JSFunctionOrBoundFunction {
shared_function_info: SharedFunctionInfo;
context: Context;
diff --git a/deps/v8/src/objects/js-generator.tq b/deps/v8/src/objects/js-generator.tq
index 51725dc964..0aabb16bdb 100644
--- a/deps/v8/src/objects/js-generator.tq
+++ b/deps/v8/src/objects/js-generator.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class JSGeneratorObject extends JSObject {
function: JSFunction;
context: Context;
@@ -26,12 +25,10 @@ extern class JSGeneratorObject extends JSObject {
parameters_and_registers: FixedArray;
}
-@generateCppClass
extern class JSAsyncFunctionObject extends JSGeneratorObject {
promise: JSPromise;
}
-@generateCppClass
extern class JSAsyncGeneratorObject extends JSGeneratorObject {
// Pointer to the head of a singly linked list of AsyncGeneratorRequest, or
// undefined.
@@ -40,7 +37,6 @@ extern class JSAsyncGeneratorObject extends JSGeneratorObject {
is_awaiting: Smi;
}
-@generateCppClass
extern class AsyncGeneratorRequest extends Struct {
next: AsyncGeneratorRequest|Undefined;
resume_mode: Smi;
diff --git a/deps/v8/src/objects/js-list-format.tq b/deps/v8/src/objects/js-list-format.tq
index 95d80ea96d..88c83a619f 100644
--- a/deps/v8/src/objects/js-list-format.tq
+++ b/deps/v8/src/objects/js-list-format.tq
@@ -11,7 +11,6 @@ bitfield struct JSListFormatFlags extends uint31 {
Type: JSListFormatType: 2 bit; // "type" is a reserved word.
}
-@generateCppClass
extern class JSListFormat extends JSObject {
locale: String;
icu_formatter: Foreign; // Managed<icu::ListFormatter>
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 98ce154336..64644abad2 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -178,12 +178,18 @@ int32_t weekdayFromEDaysOfWeek(icu::Calendar::EDaysOfWeek eDaysOfWeek) {
} // namespace
bool JSLocale::Is38AlphaNumList(const std::string& value) {
- std::size_t found = value.find("-");
- if (found == std::string::npos) {
+ std::size_t found_dash = value.find("-");
+ std::size_t found_underscore = value.find("_");
+ if (found_dash == std::string::npos &&
+ found_underscore == std::string::npos) {
return IsAlphanum(value, 3, 8);
}
- return IsAlphanum(value.substr(0, found), 3, 8) &&
- JSLocale::Is38AlphaNumList(value.substr(found + 1));
+ if (found_underscore == std::string::npos || found_dash < found_underscore) {
+ return IsAlphanum(value.substr(0, found_dash), 3, 8) &&
+ JSLocale::Is38AlphaNumList(value.substr(found_dash + 1));
+ }
+ return IsAlphanum(value.substr(0, found_underscore), 3, 8) &&
+ JSLocale::Is38AlphaNumList(value.substr(found_underscore + 1));
}
bool JSLocale::Is3Alpha(const std::string& value) {
@@ -425,8 +431,13 @@ MaybeHandle<JSLocale> JSLocale::Maximize(Isolate* isolate,
// Base name is not changed
result = source;
}
- DCHECK(U_SUCCESS(status));
- DCHECK(!result.isBogus());
+ if (U_FAILURE(status) || result.isBogus()) {
+ // Due to https://unicode-org.atlassian.net/browse/ICU-21639
+ // Valid but super long locale will fail. Just throw here for now.
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters),
+ JSLocale);
+ }
return Construct(isolate, result);
}
@@ -455,8 +466,13 @@ MaybeHandle<JSLocale> JSLocale::Minimize(Isolate* isolate,
// Base name is not changed
result = source;
}
- DCHECK(U_SUCCESS(status));
- DCHECK(!result.isBogus());
+ if (U_FAILURE(status) || result.isBogus()) {
+ // Due to https://unicode-org.atlassian.net/browse/ICU-21639
+ // Valid but super long locale will fail. Just throw here for now.
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters),
+ JSLocale);
+ }
return Construct(isolate, result);
}
diff --git a/deps/v8/src/objects/js-locale.tq b/deps/v8/src/objects/js-locale.tq
index 55c80f926f..ef0b2debe4 100644
--- a/deps/v8/src/objects/js-locale.tq
+++ b/deps/v8/src/objects/js-locale.tq
@@ -4,7 +4,6 @@
#include 'src/objects/js-locale.h'
-@generateCppClass
extern class JSLocale extends JSObject {
icu_locale: Foreign; // Managed<icu::Locale>
}
diff --git a/deps/v8/src/objects/js-number-format.tq b/deps/v8/src/objects/js-number-format.tq
index b1b63016f1..fe618daa3b 100644
--- a/deps/v8/src/objects/js-number-format.tq
+++ b/deps/v8/src/objects/js-number-format.tq
@@ -4,7 +4,6 @@
#include 'src/objects/js-number-format.h'
-@generateCppClass
extern class JSNumberFormat extends JSObject {
locale: String;
icu_number_formatter:
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index cf0180e716..6be8267a55 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -622,6 +622,11 @@ DEF_GETTER(JSObject, HasTypedArrayElements, bool) {
return map(cage_base).has_typed_array_elements();
}
+DEF_GETTER(JSObject, HasTypedArrayOrRabGsabTypedArrayElements, bool) {
+ DCHECK(!elements(cage_base).is_null());
+ return map(cage_base).has_typed_array_or_rab_gsab_typed_array_elements();
+}
+
#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
DEF_GETTER(JSObject, HasFixed##Type##Elements, bool) { \
return map(cage_base).elements_kind() == TYPE##_ELEMENTS; \
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 4c98722d15..cdd16a65a6 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -303,6 +303,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
descriptors.PatchValue(map->instance_descriptors(isolate));
}
} else {
+ // No element indexes should get here or the exclusion check may
+ // yield false negatives for type mismatch.
if (excluded_properties != nullptr &&
HasExcludedProperty(excluded_properties, next_key)) {
continue;
@@ -381,6 +383,11 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
// 4. Repeat for each element nextKey of keys in List order,
for (int i = 0; i < keys->length(); ++i) {
Handle<Object> next_key(keys->get(i), isolate);
+ if (excluded_properties != nullptr &&
+ HasExcludedProperty(excluded_properties, next_key)) {
+ continue;
+ }
+
// 4a i. Let desc be ? from.[[GetOwnProperty]](nextKey).
PropertyDescriptor desc;
Maybe<bool> found =
@@ -404,11 +411,6 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
Just(ShouldThrow::kThrowOnError)),
Nothing<bool>());
} else {
- if (excluded_properties != nullptr &&
- HasExcludedProperty(excluded_properties, next_key)) {
- continue;
- }
-
// 4a ii 2. Perform ! CreateDataProperty(target, nextKey, propValue).
PropertyKey key(isolate, next_key);
LookupIterator it(isolate, target, key, LookupIterator::OWN);
@@ -2334,10 +2336,15 @@ int JSObject::GetHeaderSize(InstanceType type,
return WasmTableObject::kHeaderSize;
case WASM_VALUE_OBJECT_TYPE:
return WasmValueObject::kHeaderSize;
- case WASM_EXCEPTION_OBJECT_TYPE:
- return WasmExceptionObject::kHeaderSize;
+ case WASM_TAG_OBJECT_TYPE:
+ return WasmTagObject::kHeaderSize;
#endif // V8_ENABLE_WEBASSEMBLY
default: {
+ // Special type check for API Objects because they are in a large variable
+ // instance type range.
+ if (InstanceTypeChecker::IsJSApiObject(type)) {
+ return JSObject::kHeaderSize;
+ }
std::stringstream ss;
ss << type;
FATAL("unexpected instance type: %s\n", ss.str().c_str());
@@ -2860,14 +2867,16 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
}
int old_number_of_fields;
- int number_of_fields = new_map->NumberOfFields();
+ int number_of_fields =
+ new_map->NumberOfFields(ConcurrencyMode::kNotConcurrent);
int inobject = new_map->GetInObjectProperties();
int unused = new_map->UnusedPropertyFields();
// Nothing to do if no functions were converted to fields and no smis were
// converted to doubles.
if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
- unused, &old_number_of_fields)) {
+ unused, &old_number_of_fields,
+ ConcurrencyMode::kNotConcurrent)) {
object->set_map(*new_map, kReleaseStore);
return;
}
@@ -3194,7 +3203,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
}
map = MapUpdater{isolate, map}.ReconfigureElementsKind(to_kind);
}
- int number_of_fields = map->NumberOfFields();
+ int number_of_fields = map->NumberOfFields(ConcurrencyMode::kNotConcurrent);
int inobject = map->GetInObjectProperties();
int unused = map->UnusedPropertyFields();
int total_size = number_of_fields + unused;
@@ -3954,6 +3963,17 @@ bool JSObject::IsExtensible(Handle<JSObject> object) {
return object->map().is_extensible();
}
+// static
+MaybeHandle<Object> JSObject::ReadFromOptionsBag(Handle<Object> options,
+ Handle<String> option_name,
+ Isolate* isolate) {
+ if (options->IsJSReceiver()) {
+ Handle<JSReceiver> js_options = Handle<JSReceiver>::cast(options);
+ return JSObject::GetProperty(isolate, js_options, option_name);
+ }
+ return MaybeHandle<Object>(isolate->factory()->undefined_value());
+}
+
template <typename Dictionary>
void JSObject::ApplyAttributesToDictionary(
Isolate* isolate, ReadOnlyRoots roots, Handle<Dictionary> dictionary,
@@ -5132,18 +5152,18 @@ bool JSObject::IsApiWrapper() {
// *_API_* types are generated through templates which can have embedder
// fields. The other types have their embedder fields added at compile time.
auto instance_type = map().instance_type();
- return instance_type == JS_API_OBJECT_TYPE ||
- instance_type == JS_ARRAY_BUFFER_TYPE ||
+ return instance_type == JS_ARRAY_BUFFER_TYPE ||
instance_type == JS_DATA_VIEW_TYPE ||
instance_type == JS_GLOBAL_OBJECT_TYPE ||
instance_type == JS_GLOBAL_PROXY_TYPE ||
instance_type == JS_SPECIAL_API_OBJECT_TYPE ||
- instance_type == JS_TYPED_ARRAY_TYPE;
+ instance_type == JS_TYPED_ARRAY_TYPE ||
+ InstanceTypeChecker::IsJSApiObject(instance_type);
}
bool JSObject::IsDroppableApiWrapper() {
auto instance_type = map().instance_type();
- return instance_type == JS_API_OBJECT_TYPE ||
+ return InstanceTypeChecker::IsJSApiObject(instance_type) ||
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
}
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index ae7e91c000..7452237006 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -364,6 +364,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
DECL_GETTER(HasNonextensibleElements, bool)
DECL_GETTER(HasTypedArrayElements, bool)
+ DECL_GETTER(HasTypedArrayOrRabGsabTypedArrayElements, bool)
DECL_GETTER(HasFixedUint8ClampedElements, bool)
DECL_GETTER(HasFixedArrayElements, bool)
@@ -715,6 +716,10 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
static bool IsExtensible(Handle<JSObject> object);
+ static MaybeHandle<Object> ReadFromOptionsBag(Handle<Object> options,
+ Handle<String> option_name,
+ Isolate* isolate);
+
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
DECL_PRINTER(JSObject)
@@ -793,6 +798,8 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
static const int kMaxInstanceSize = 255 * kTaggedSize;
+ static const int kMapCacheSize = 128;
+
// When extending the backing storage for property values, we increase
// its size by more than the 1 entry necessary, so sequentially adding fields
// to the same object requires fewer allocations and copies.
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index e21f874bbb..fd48d43045 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -5,13 +5,13 @@
// JSReceiver corresponds to objects in the JS sense.
@abstract
@highestInstanceTypeWithinParentClassRange
+@doNotGenerateCppClass
extern class JSReceiver extends HeapObject {
properties_or_hash: SwissNameDictionary|FixedArrayBase|PropertyArray|Smi;
}
type Constructor extends JSReceiver;
-@generateCppClass
@apiExposedInstanceTypeValue(0x421)
@highestInstanceTypeWithinParentClassRange
extern class JSObject extends JSReceiver {
@@ -42,13 +42,11 @@ macro NewJSObject(implicit context: Context)(): JSObject {
}
@abstract
-@generateCppClass
@lowestInstanceTypeWithinParentClassRange
extern class JSCustomElementsObject extends JSObject {
}
@abstract
-@generateCppClass
@lowestInstanceTypeWithinParentClassRange
extern class JSSpecialObject extends JSCustomElementsObject {
}
@@ -93,23 +91,21 @@ macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map):
SlackTrackingMode::kWithSlackTracking);
}
-@generateCppClass
extern class JSGlobalProxy extends JSSpecialObject {
// [native_context]: the owner native context of this global proxy object.
// It is null value if this object is not used by any context.
native_context: Object;
}
+@doNotGenerateCppClass
extern class JSGlobalObject extends JSSpecialObject {
native_context: NativeContext;
global_proxy: JSGlobalProxy;
}
-@generateCppClass
-extern class JSPrimitiveWrapper extends JSCustomElementsObject {
- value: JSAny;
-}
+extern class JSPrimitiveWrapper extends JSCustomElementsObject { value: JSAny; }
+@doNotGenerateCppClass
extern class JSMessageObject extends JSObject {
// Tagged fields.
message_type: Smi;
@@ -126,7 +122,6 @@ extern class JSMessageObject extends JSObject {
error_level: Smi;
}
-@generateCppClass
extern class JSDate extends JSObject {
// If one component is NaN, all of them are, indicating a NaN time value.
@@ -147,7 +142,6 @@ extern class JSDate extends JSObject {
cache_stamp: Undefined|Smi|NaN;
}
-@generateCppClass
extern class JSAsyncFromSyncIterator extends JSObject {
sync_iterator: JSReceiver;
// The "next" method is loaded during GetIterator, and is not reloaded for
@@ -155,7 +149,6 @@ extern class JSAsyncFromSyncIterator extends JSObject {
next: Object;
}
-@generateCppClass
extern class JSStringIterator extends JSObject {
// The [[IteratedString]] slot.
string: String;
diff --git a/deps/v8/src/objects/js-plural-rules.tq b/deps/v8/src/objects/js-plural-rules.tq
index 818cff5787..697108609b 100644
--- a/deps/v8/src/objects/js-plural-rules.tq
+++ b/deps/v8/src/objects/js-plural-rules.tq
@@ -9,7 +9,6 @@ bitfield struct JSPluralRulesFlags extends uint31 {
Type: JSPluralRulesType: 1 bit; // "type" is a reserved word.
}
-@generateCppClass
extern class JSPluralRules extends JSObject {
locale: String;
flags: SmiTagged<JSPluralRulesFlags>;
diff --git a/deps/v8/src/objects/js-promise.tq b/deps/v8/src/objects/js-promise.tq
index a904a9d8da..be8fb06637 100644
--- a/deps/v8/src/objects/js-promise.tq
+++ b/deps/v8/src/objects/js-promise.tq
@@ -10,7 +10,6 @@ bitfield struct JSPromiseFlags extends uint31 {
async_task_id: int32: 22 bit;
}
-@generateCppClass
extern class JSPromise extends JSObject {
macro Status(): PromiseState {
return this.flags.status;
diff --git a/deps/v8/src/objects/js-proxy.tq b/deps/v8/src/objects/js-proxy.tq
index a7d8a12030..b91c0de5d0 100644
--- a/deps/v8/src/objects/js-proxy.tq
+++ b/deps/v8/src/objects/js-proxy.tq
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class JSProxy extends JSReceiver {
target: JSReceiver|Null;
handler: JSReceiver|Null;
}
+@doNotGenerateCppClass
extern shape JSProxyRevocableResult extends JSObject {
proxy: JSAny;
revoke: JSAny;
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.tq b/deps/v8/src/objects/js-regexp-string-iterator.tq
index 4daed7af2d..a5efd26144 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.tq
+++ b/deps/v8/src/objects/js-regexp-string-iterator.tq
@@ -8,7 +8,6 @@ bitfield struct JSRegExpStringIteratorFlags extends uint31 {
unicode: bool: 1 bit;
}
-@generateCppClass
extern class JSRegExpStringIterator extends JSObject {
// The [[IteratingRegExp]] internal property.
iterating_reg_exp: JSReceiver;
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index e2a29c6dcd..029964faa2 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -52,10 +52,9 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
: c == 'y' ? base::Optional<Flag>(kSticky)
: c == 'u' ? base::Optional<Flag>(kUnicode)
: c == 's' ? base::Optional<Flag>(kDotAll)
+ : c == 'd' ? base::Optional<Flag>(kHasIndices)
: (FLAG_enable_experimental_regexp_engine && c == 'l')
? base::Optional<Flag>(kLinear)
- : (FLAG_harmony_regexp_match_indices && c == 'd')
- ? base::Optional<Flag>(kHasIndices)
: base::Optional<Flag>();
// clang-format on
}
@@ -226,11 +225,11 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
// Descriptor array index to important methods in the prototype.
static const int kExecFunctionDescriptorIndex = 1;
- static const int kSymbolMatchFunctionDescriptorIndex = 13;
- static const int kSymbolMatchAllFunctionDescriptorIndex = 14;
- static const int kSymbolReplaceFunctionDescriptorIndex = 15;
- static const int kSymbolSearchFunctionDescriptorIndex = 16;
- static const int kSymbolSplitFunctionDescriptorIndex = 17;
+ static const int kSymbolMatchFunctionDescriptorIndex = 14;
+ static const int kSymbolMatchAllFunctionDescriptorIndex = 15;
+ static const int kSymbolReplaceFunctionDescriptorIndex = 16;
+ static const int kSymbolSearchFunctionDescriptorIndex = 17;
+ static const int kSymbolSplitFunctionDescriptorIndex = 18;
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
diff --git a/deps/v8/src/objects/js-regexp.tq b/deps/v8/src/objects/js-regexp.tq
index d8cff3fced..328dd94efb 100644
--- a/deps/v8/src/objects/js-regexp.tq
+++ b/deps/v8/src/objects/js-regexp.tq
@@ -13,7 +13,6 @@ bitfield struct JSRegExpFlags extends uint31 {
has_indices: bool: 1 bit;
}
-@generateCppClass
extern class JSRegExp extends JSObject {
data: FixedArray|Undefined;
source: String|Undefined;
@@ -39,6 +38,7 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
extern class JSRegExpConstructor extends JSFunction
generates 'TNode<JSFunction>';
+@doNotGenerateCppClass
extern shape JSRegExpResult extends JSArray {
// In-object properties:
// The below fields are externally exposed.
@@ -52,10 +52,12 @@ extern shape JSRegExpResult extends JSArray {
regexp_last_index: Smi;
}
+@doNotGenerateCppClass
extern shape JSRegExpResultWithIndices extends JSRegExpResult {
indices: JSAny;
}
+@doNotGenerateCppClass
extern shape JSRegExpResultIndices extends JSArray {
// In-object properties:
// The groups field is externally exposed.
diff --git a/deps/v8/src/objects/js-relative-time-format.tq b/deps/v8/src/objects/js-relative-time-format.tq
index 70b5e82245..6a88686f4f 100644
--- a/deps/v8/src/objects/js-relative-time-format.tq
+++ b/deps/v8/src/objects/js-relative-time-format.tq
@@ -10,7 +10,6 @@ bitfield struct JSRelativeTimeFormatFlags extends uint31 {
numeric: JSRelativeTimeFormatNumeric: 1 bit;
}
-@generateCppClass
extern class JSRelativeTimeFormat extends JSObject {
locale: String;
numberingSystem: String;
diff --git a/deps/v8/src/objects/js-segment-iterator.tq b/deps/v8/src/objects/js-segment-iterator.tq
index 502070cefd..3b1ea6ad69 100644
--- a/deps/v8/src/objects/js-segment-iterator.tq
+++ b/deps/v8/src/objects/js-segment-iterator.tq
@@ -8,7 +8,6 @@ bitfield struct JSSegmentIteratorFlags extends uint31 {
granularity: JSSegmenterGranularity: 2 bit;
}
-@generateCppClass
extern class JSSegmentIterator extends JSObject {
icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
unicode_string: Foreign; // Managed<icu::UnicodeString>
diff --git a/deps/v8/src/objects/js-segmenter.tq b/deps/v8/src/objects/js-segmenter.tq
index fdd888b428..70853aae72 100644
--- a/deps/v8/src/objects/js-segmenter.tq
+++ b/deps/v8/src/objects/js-segmenter.tq
@@ -10,7 +10,6 @@ bitfield struct JSSegmenterFlags extends uint31 {
granularity: JSSegmenterGranularity: 2 bit;
}
-@generateCppClass
extern class JSSegmenter extends JSObject {
locale: String;
icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
diff --git a/deps/v8/src/objects/js-segments.tq b/deps/v8/src/objects/js-segments.tq
index f891e26ca0..868dd8a1d9 100644
--- a/deps/v8/src/objects/js-segments.tq
+++ b/deps/v8/src/objects/js-segments.tq
@@ -8,7 +8,6 @@ bitfield struct JSSegmentsFlags extends uint31 {
granularity: JSSegmenterGranularity: 2 bit;
}
-@generateCppClass
extern class JSSegments extends JSObject {
icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
unicode_string: Foreign; // Managed<icu::UnicodeString>
diff --git a/deps/v8/src/objects/js-weak-refs.tq b/deps/v8/src/objects/js-weak-refs.tq
index 3447e31b71..36f3817ac7 100644
--- a/deps/v8/src/objects/js-weak-refs.tq
+++ b/deps/v8/src/objects/js-weak-refs.tq
@@ -6,6 +6,7 @@ bitfield struct FinalizationRegistryFlags extends uint31 {
scheduled_for_cleanup: bool: 1 bit;
}
+@doNotGenerateCppClass
extern class JSFinalizationRegistry extends JSObject {
native_context: NativeContext;
cleanup: Callable;
@@ -18,7 +19,6 @@ extern class JSFinalizationRegistry extends JSObject {
flags: SmiTagged<FinalizationRegistryFlags>;
}
-@generateCppClass
extern class WeakCell extends HeapObject {
finalization_registry: Undefined|JSFinalizationRegistry;
target: Undefined|JSReceiver;
@@ -40,7 +40,4 @@ extern class WeakCell extends HeapObject {
key_list_next: Undefined|WeakCell;
}
-@generateCppClass
-extern class JSWeakRef extends JSObject {
- target: Undefined|JSReceiver;
-}
+extern class JSWeakRef extends JSObject { target: Undefined|JSReceiver; }
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index 798402de4b..815d9ac504 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -100,10 +100,6 @@ Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
if (keys_.is_null()) {
return isolate_->factory()->empty_fixed_array();
}
- if (mode_ == KeyCollectionMode::kOwnOnly &&
- keys_->map() == ReadOnlyRoots(isolate_).fixed_array_map()) {
- return Handle<FixedArray>::cast(keys_);
- }
USE(ContainsOnlyValidKeys);
Handle<FixedArray> result =
OrderedHashSet::ConvertToKeysArray(isolate(), keys(), convert);
@@ -224,14 +220,12 @@ Maybe<bool> KeyAccumulator::AddKeysFromJSProxy(Handle<JSProxy> proxy,
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, keys, FilterProxyKeys(this, proxy, keys, filter_),
Nothing<bool>());
- if (mode_ == KeyCollectionMode::kOwnOnly) {
- // If we collect only the keys from a JSProxy do not sort or deduplicate.
- keys_ = keys;
- return Just(true);
- }
}
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(
- AddKeys(keys, is_for_in_ ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT));
+ // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-ownpropertykeys
+ // As of 10.5.11.9 says, the keys collected from Proxy should not contain
+ // any duplicates. And the order of the keys is preserved by the
+ // OrderedHashTable.
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(AddKeys(keys, CONVERT_TO_ARRAY_INDEX));
return Just(true);
}
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index c5d1bd4b8c..4abe2a5ad3 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -141,10 +141,7 @@ class KeyAccumulator final {
void set_may_have_elements(bool value) { may_have_elements_ = value; }
Isolate* isolate_;
- // keys_ is either an Handle<OrderedHashSet> or in the case of own JSProxy
- // keys a Handle<FixedArray>. The OrderedHashSet is in-place converted to the
- // result list, a FixedArray containing all collected keys.
- Handle<FixedArray> keys_;
+ Handle<OrderedHashSet> keys_;
Handle<Map> first_prototype_map_;
Handle<JSReceiver> receiver_;
Handle<JSReceiver> last_non_empty_prototype_;
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index de4c900f4c..7406a9dff1 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -37,6 +37,12 @@ inline int EncodeComputedEntry(ClassBoilerplate::ValueKind value_kind,
return flags;
}
+constexpr AccessorComponent ToAccessorComponent(
+ ClassBoilerplate::ValueKind value_kind) {
+ return value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
+ : ACCESSOR_SETTER;
+}
+
template <typename IsolateT>
void AddToDescriptorArrayTemplate(
IsolateT* isolate, Handle<DescriptorArray> descriptor_array_template,
@@ -55,9 +61,7 @@ void AddToDescriptorArrayTemplate(
DCHECK(value_kind == ClassBoilerplate::kGetter ||
value_kind == ClassBoilerplate::kSetter);
Handle<AccessorPair> pair = isolate->factory()->NewAccessorPair();
- pair->set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
- : ACCESSOR_SETTER,
- *value);
+ pair->set(ToAccessorComponent(value_kind), *value);
d = Descriptor::AccessorConstant(name, pair, DONT_ENUM);
}
descriptor_array_template->Append(&d);
@@ -83,9 +87,7 @@ void AddToDescriptorArrayTemplate(
descriptor_array_template->Set(entry, &d);
pair = *new_pair;
}
- pair.set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
- : ACCESSOR_SETTER,
- *value);
+ pair.set(ToAccessorComponent(value_kind), *value, kReleaseStore);
}
}
}
@@ -175,11 +177,8 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
if (value_kind == ClassBoilerplate::kData) {
value_handle = handle(value, isolate);
} else {
- AccessorComponent component = value_kind == ClassBoilerplate::kGetter
- ? ACCESSOR_GETTER
- : ACCESSOR_SETTER;
Handle<AccessorPair> pair(isolate->factory()->NewAccessorPair());
- pair->set(component, value);
+ pair->set(ToAccessorComponent(value_kind), value);
value_handle = pair;
}
@@ -305,9 +304,7 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
}
}
} else { // if (value_kind == ClassBoilerplate::kData) ends here
- AccessorComponent component = value_kind == ClassBoilerplate::kGetter
- ? ACCESSOR_GETTER
- : ACCESSOR_SETTER;
+ AccessorComponent component = ToAccessorComponent(value_kind);
if (existing_value.IsAccessorPair()) {
// Update respective component of existing AccessorPair.
AccessorPair current_pair = AccessorPair::cast(existing_value);
@@ -315,7 +312,7 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
int existing_component_index =
GetExistingValueIndex(current_pair.get(component));
if (existing_component_index < key_index) {
- current_pair.set(component, value);
+ current_pair.set(component, value, kReleaseStore);
} else {
// The existing accessor property overwrites the computed one, update
// its enumeration order accordingly.
diff --git a/deps/v8/src/objects/literal-objects.tq b/deps/v8/src/objects/literal-objects.tq
index bb087f7a5a..4b8dedab4f 100644
--- a/deps/v8/src/objects/literal-objects.tq
+++ b/deps/v8/src/objects/literal-objects.tq
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class ArrayBoilerplateDescription extends Struct {
flags: Smi;
constant_elements: FixedArrayBase;
}
-@generateCppClass
extern class RegExpBoilerplateDescription extends Struct {
data: FixedArray;
source: String;
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 9ec1f7fa2a..283e4f84d5 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -1473,34 +1473,9 @@ ConcurrentLookupIterator::TryGetOwnConstantElement(
JSPrimitiveWrapper js_value = JSPrimitiveWrapper::cast(holder);
String wrapped_string = String::cast(js_value.value());
-
- // The access guard below protects string accesses related to internalized
- // strings.
- // TODO(jgruber): Support other string kinds.
- Map wrapped_string_map = wrapped_string.map(isolate, kAcquireLoad);
- InstanceType wrapped_type = wrapped_string_map.instance_type();
- if (!(InstanceTypeChecker::IsInternalizedString(wrapped_type)) ||
- InstanceTypeChecker::IsThinString(wrapped_type)) {
- return kGaveUp;
- }
-
- const uint32_t length = static_cast<uint32_t>(wrapped_string.length());
- if (index >= length) return kGaveUp;
-
- uint16_t charcode;
- {
- SharedStringAccessGuardIfNeeded access_guard(local_isolate);
- charcode = wrapped_string.Get(static_cast<int>(index));
- }
-
- if (charcode > unibrow::Latin1::kMaxChar) return kGaveUp;
-
- Object value = isolate->factory()->single_character_string_cache()->get(
- charcode, kRelaxedLoad);
- if (value == ReadOnlyRoots(isolate).undefined_value()) return kGaveUp;
-
- *result_out = value;
- return kPresent;
+ return ConcurrentLookupIterator::TryGetOwnChar(
+ static_cast<String*>(result_out), isolate, local_isolate,
+ wrapped_string, index);
} else {
DCHECK(!IsFrozenElementsKind(elements_kind));
DCHECK(!IsDictionaryElementsKind(elements_kind));
@@ -1512,14 +1487,48 @@ ConcurrentLookupIterator::TryGetOwnConstantElement(
}
// static
+ConcurrentLookupIterator::Result ConcurrentLookupIterator::TryGetOwnChar(
+ String* result_out, Isolate* isolate, LocalIsolate* local_isolate,
+ String string, size_t index) {
+ DisallowGarbageCollection no_gc;
+ // The access guard below protects string accesses related to internalized
+ // strings.
+ // TODO(jgruber): Support other string kinds.
+ Map string_map = string.map(isolate, kAcquireLoad);
+ InstanceType type = string_map.instance_type();
+ if (!(InstanceTypeChecker::IsInternalizedString(type)) ||
+ InstanceTypeChecker::IsThinString(type)) {
+ return kGaveUp;
+ }
+
+ const uint32_t length = static_cast<uint32_t>(string.length());
+ if (index >= length) return kGaveUp;
+
+ uint16_t charcode;
+ {
+ SharedStringAccessGuardIfNeeded access_guard(local_isolate);
+ charcode = string.Get(static_cast<int>(index));
+ }
+
+ if (charcode > unibrow::Latin1::kMaxChar) return kGaveUp;
+
+ Object value = isolate->factory()->single_character_string_cache()->get(
+ charcode, kRelaxedLoad);
+ if (value == ReadOnlyRoots(isolate).undefined_value()) return kGaveUp;
+
+ *result_out = String::cast(value);
+ return kPresent;
+}
+
+// static
base::Optional<PropertyCell> ConcurrentLookupIterator::TryGetPropertyCell(
Isolate* isolate, LocalIsolate* local_isolate,
Handle<JSGlobalObject> holder, Handle<Name> name) {
DisallowGarbageCollection no_gc;
Map holder_map = holder->map();
- if (holder_map.is_access_check_needed()) return {};
- if (holder_map.has_named_interceptor()) return {};
+ CHECK(!holder_map.is_access_check_needed());
+ CHECK(!holder_map.has_named_interceptor());
GlobalDictionary dict = holder->global_dictionary(kAcquireLoad);
base::Optional<PropertyCell> cell =
@@ -1534,7 +1543,7 @@ base::Optional<PropertyCell> ConcurrentLookupIterator::TryGetPropertyCell(
base::Optional<Name> maybe_cached_property_name =
FunctionTemplateInfo::TryGetCachedPropertyName(
isolate, AccessorPair::cast(maybe_accessor_pair)
- .getter(isolate, kRelaxedLoad));
+ .getter(isolate, kAcquireLoad));
if (!maybe_cached_property_name.has_value()) return {};
cell = dict.TryFindPropertyCellForConcurrentLookupIterator(
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index 4147559d09..de678f35b0 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -345,6 +345,13 @@ class ConcurrentLookupIterator final : public AllStatic {
JSObject holder, FixedArrayBase elements, ElementsKind elements_kind,
size_t index);
+ // Implements the own data property lookup for the specialized case of
+ // strings.
+ V8_EXPORT_PRIVATE static Result TryGetOwnChar(String* result_out,
+ Isolate* isolate,
+ LocalIsolate* local_isolate,
+ String string, size_t index);
+
// This method reimplements the following sequence in a concurrent setting:
//
// LookupIterator it(holder, isolate, name, LookupIterator::OWN);
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index d170341b9a..572b3f9299 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -33,8 +33,7 @@ namespace internal {
#include "torque-generated/src/objects/map-tq-inl.inc"
-OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
-CAST_ACCESSOR(Map)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Map)
ACCESSORS(Map, instance_descriptors, DescriptorArray,
kInstanceDescriptorsOffset)
@@ -191,7 +190,8 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
return external > limit || counts.GetTotal() > kMaxNumberOfDescriptors;
} else {
int limit = std::max({kFastPropertiesSoftLimit, GetInObjectProperties()});
- int external = NumberOfFields() - GetInObjectProperties();
+ int external = NumberOfFields(ConcurrencyMode::kNotConcurrent) -
+ GetInObjectProperties();
return external > limit;
}
}
@@ -290,14 +290,14 @@ int Map::inobject_properties_start_or_constructor_function_index() const {
// TODO(solanes, v8:7790, v8:11353): Make this and the setter non-atomic
// when TSAN sees the map's store synchronization.
return RELAXED_READ_BYTE_FIELD(
- *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+ *this, kInobjectPropertiesStartOrConstructorFunctionIndexOffset);
}
void Map::set_inobject_properties_start_or_constructor_function_index(
int value) {
CHECK_LT(static_cast<unsigned>(value), 256);
RELAXED_WRITE_BYTE_FIELD(
- *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
+ *this, kInobjectPropertiesStartOrConstructorFunctionIndexOffset,
static_cast<byte>(value));
}
@@ -742,9 +742,10 @@ void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
}
// static
-Map Map::ElementsTransitionMap(Isolate* isolate) {
+Map Map::ElementsTransitionMap(Isolate* isolate, ConcurrencyMode cmode) {
DisallowGarbageCollection no_gc;
- return TransitionsAccessor(isolate, *this, &no_gc)
+ return TransitionsAccessor(isolate, *this, &no_gc,
+ cmode == ConcurrencyMode::kConcurrent)
.SearchSpecial(ReadOnlyRoots(isolate).elements_transition_symbol());
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index feb060fa51..ba7961a9ca 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -282,6 +282,125 @@ Handle<Map> MapUpdater::UpdateImpl() {
return result_map_;
}
+namespace {
+
+struct IntegrityLevelTransitionInfo {
+ explicit IntegrityLevelTransitionInfo(Map map)
+ : integrity_level_source_map(map) {}
+
+ bool has_integrity_level_transition = false;
+ PropertyAttributes integrity_level = NONE;
+ Map integrity_level_source_map;
+ Symbol integrity_level_symbol;
+};
+
+IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
+ Map map, Isolate* isolate, DisallowGarbageCollection* no_gc,
+ ConcurrencyMode cmode) {
+ const bool is_concurrent = cmode == ConcurrencyMode::kConcurrent;
+ IntegrityLevelTransitionInfo info(map);
+
+ // Figure out the most restrictive integrity level transition (it should
+ // be the last one in the transition tree).
+ DCHECK(!map.is_extensible());
+ Map previous = Map::cast(map.GetBackPointer(isolate));
+ TransitionsAccessor last_transitions(isolate, previous, no_gc, is_concurrent);
+ if (!last_transitions.HasIntegrityLevelTransitionTo(
+ map, &info.integrity_level_symbol, &info.integrity_level)) {
+ // The last transition was not integrity level transition - just bail out.
+ // This can happen in the following cases:
+ // - there are private symbol transitions following the integrity level
+ // transitions (see crbug.com/v8/8854).
+ // - there is a getter added in addition to an existing setter (or a setter
+ // in addition to an existing getter).
+ return info;
+ }
+
+ Map source_map = previous;
+ // Now walk up the back pointer chain and skip all integrity level
+ // transitions. If we encounter any non-integrity level transition interleaved
+ // with integrity level transitions, just bail out.
+ while (!source_map.is_extensible()) {
+ previous = Map::cast(source_map.GetBackPointer(isolate));
+ TransitionsAccessor transitions(isolate, previous, no_gc, is_concurrent);
+ if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
+ return info;
+ }
+ source_map = previous;
+ }
+
+ // Integrity-level transitions never change number of descriptors.
+ CHECK_EQ(map.NumberOfOwnDescriptors(), source_map.NumberOfOwnDescriptors());
+
+ info.has_integrity_level_transition = true;
+ info.integrity_level_source_map = source_map;
+ return info;
+}
+
+} // namespace
+
+// static
+base::Optional<Map> MapUpdater::TryUpdateNoLock(Isolate* isolate, Map old_map,
+ ConcurrencyMode cmode) {
+ DisallowGarbageCollection no_gc;
+
+ // Check the state of the root map.
+ Map root_map = old_map.FindRootMap(isolate);
+ if (root_map.is_deprecated()) {
+ JSFunction constructor = JSFunction::cast(root_map.GetConstructor());
+ DCHECK(constructor.has_initial_map());
+ DCHECK(constructor.initial_map().is_dictionary_map());
+ if (constructor.initial_map().elements_kind() != old_map.elements_kind()) {
+ return {};
+ }
+ return constructor.initial_map();
+ }
+ if (!old_map.EquivalentToForTransition(root_map, cmode)) return {};
+
+ ElementsKind from_kind = root_map.elements_kind();
+ ElementsKind to_kind = old_map.elements_kind();
+
+ IntegrityLevelTransitionInfo info(old_map);
+ if (root_map.is_extensible() != old_map.is_extensible()) {
+ DCHECK(!old_map.is_extensible());
+ DCHECK(root_map.is_extensible());
+ info = DetectIntegrityLevelTransitions(old_map, isolate, &no_gc, cmode);
+ // Bail out if there were some private symbol transitions mixed up
+ // with the integrity level transitions.
+ if (!info.has_integrity_level_transition) return Map();
+ // Make sure to replay the original elements kind transitions, before
+ // the integrity level transition sets the elements to dictionary mode.
+ DCHECK(to_kind == DICTIONARY_ELEMENTS ||
+ to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
+ IsTypedArrayElementsKind(to_kind) ||
+ IsAnyHoleyNonextensibleElementsKind(to_kind));
+ to_kind = info.integrity_level_source_map.elements_kind();
+ }
+ if (from_kind != to_kind) {
+ // Try to follow existing elements kind transitions.
+ root_map = root_map.LookupElementsTransitionMap(isolate, to_kind, cmode);
+ if (root_map.is_null()) return {};
+ // From here on, use the map with correct elements kind as root map.
+ }
+
+ // Replay the transitions as they were before the integrity level transition.
+ Map result = root_map.TryReplayPropertyTransitions(
+ isolate, info.integrity_level_source_map, cmode);
+ if (result.is_null()) return {};
+
+ if (info.has_integrity_level_transition) {
+ // Now replay the integrity level transition.
+ result = TransitionsAccessor(isolate, result, &no_gc,
+ cmode == ConcurrencyMode::kConcurrent)
+ .SearchSpecial(info.integrity_level_symbol);
+ }
+ if (result.is_null()) return {};
+
+ DCHECK_EQ(old_map.elements_kind(), result.elements_kind());
+ DCHECK_EQ(old_map.instance_type(), result.instance_type());
+ return result;
+}
+
void MapUpdater::GeneralizeField(Handle<Map> map, InternalIndex modify_index,
PropertyConstness new_constness,
Representation new_representation,
@@ -423,7 +542,8 @@ MapUpdater::State MapUpdater::FindRootMap() {
return state_;
}
- if (!old_map_->EquivalentToForTransition(*root_map_)) {
+ if (!old_map_->EquivalentToForTransition(*root_map_,
+ ConcurrencyMode::kNotConcurrent)) {
return Normalize("Normalize_NotEquivalent");
} else if (old_map_->is_extensible() != root_map_->is_extensible()) {
DCHECK(!old_map_->is_extensible());
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
index c901782bf1..c5b425764a 100644
--- a/deps/v8/src/objects/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -67,6 +67,12 @@ class V8_EXPORT_PRIVATE MapUpdater {
// version and performs the steps 1-6.
Handle<Map> Update();
+ // As above but does not mutate maps; instead, we attempt to replay existing
+ // transitions to find an updated map. No lock is taken.
+ static base::Optional<Map> TryUpdateNoLock(Isolate* isolate, Map old_map,
+ ConcurrencyMode cmode)
+ V8_WARN_UNUSED_RESULT;
+
static Handle<Map> ReconfigureExistingProperty(Isolate* isolate,
Handle<Map> map,
InternalIndex descriptor,
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 50d5728b0e..a8fdce3189 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -112,6 +112,10 @@ VisitorId Map::GetVisitorId(Map map) {
UNREACHABLE();
}
+ if (InstanceTypeChecker::IsJSApiObject(map.instance_type())) {
+ return kVisitJSApiObject;
+ }
+
switch (instance_type) {
case BYTE_ARRAY_TYPE:
return kVisitByteArray;
@@ -286,7 +290,7 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_SEGMENTS_TYPE:
#endif // V8_INTL_SUPPORT
#if V8_ENABLE_WEBASSEMBLY
- case WASM_EXCEPTION_OBJECT_TYPE:
+ case WASM_TAG_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
@@ -443,28 +447,33 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
PropertyConstness::kConst, representation, flag);
}
-bool Map::InstancesNeedRewriting(Map target) const {
- int target_number_of_fields = target.NumberOfFields();
+bool Map::InstancesNeedRewriting(Map target, ConcurrencyMode cmode) const {
+ int target_number_of_fields = target.NumberOfFields(cmode);
int target_inobject = target.GetInObjectProperties();
int target_unused = target.UnusedPropertyFields();
int old_number_of_fields;
return InstancesNeedRewriting(target, target_number_of_fields,
target_inobject, target_unused,
- &old_number_of_fields);
+ &old_number_of_fields, cmode);
}
bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
int target_inobject, int target_unused,
- int* old_number_of_fields) const {
+ int* old_number_of_fields,
+ ConcurrencyMode cmode) const {
// If fields were added (or removed), rewrite the instance.
- *old_number_of_fields = NumberOfFields();
+ *old_number_of_fields = NumberOfFields(cmode);
DCHECK(target_number_of_fields >= *old_number_of_fields);
if (target_number_of_fields != *old_number_of_fields) return true;
// If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray old_desc = instance_descriptors();
- DescriptorArray new_desc = target.instance_descriptors();
+ DescriptorArray old_desc = cmode == ConcurrencyMode::kConcurrent
+ ? instance_descriptors(kAcquireLoad)
+ : instance_descriptors();
+ DescriptorArray new_desc = cmode == ConcurrencyMode::kConcurrent
+ ? target.instance_descriptors(kAcquireLoad)
+ : target.instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
if (new_desc.GetDetails(i).representation().IsDouble() !=
old_desc.GetDetails(i).representation().IsDouble()) {
@@ -487,8 +496,10 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
return true;
}
-int Map::NumberOfFields() const {
- DescriptorArray descriptors = instance_descriptors();
+int Map::NumberOfFields(ConcurrencyMode cmode) const {
+ DescriptorArray descriptors = cmode == ConcurrencyMode::kConcurrent
+ ? instance_descriptors(kAcquireLoad)
+ : instance_descriptors();
int result = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).location() == kField) result++;
@@ -517,7 +528,8 @@ Map::FieldCounts Map::GetFieldCounts() const {
}
bool Map::HasOutOfObjectProperties() const {
- return GetInObjectProperties() < NumberOfFields();
+ return GetInObjectProperties() <
+ NumberOfFields(ConcurrencyMode::kNotConcurrent);
}
void Map::DeprecateTransitionTree(Isolate* isolate) {
@@ -605,7 +617,6 @@ namespace {
Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
DisallowGarbageCollection no_gc;
- DisallowDeoptimization no_deoptimization(isolate);
Map target = old_map;
do {
@@ -632,12 +643,12 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
}
}
- SLOW_DCHECK(Map::TryUpdateSlow(isolate, old_map) == target);
+ SLOW_DCHECK(MapUpdater::TryUpdateNoLock(
+ isolate, old_map, ConcurrencyMode::kNotConcurrent) == target);
return target;
}
} // namespace
-// TODO(ishell): Move TryUpdate() and friends to MapUpdater
// static
MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
DisallowGarbageCollection no_gc;
@@ -652,149 +663,40 @@ MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
}
}
- Map new_map = TryUpdateSlow(isolate, *old_map);
- if (new_map.is_null()) return MaybeHandle<Map>();
+ base::Optional<Map> new_map = MapUpdater::TryUpdateNoLock(
+ isolate, *old_map, ConcurrencyMode::kNotConcurrent);
+ if (!new_map.has_value()) return MaybeHandle<Map>();
if (FLAG_fast_map_update) {
- TransitionsAccessor(isolate, *old_map, &no_gc).SetMigrationTarget(new_map);
+ TransitionsAccessor(isolate, *old_map, &no_gc)
+ .SetMigrationTarget(new_map.value());
}
- return handle(new_map, isolate);
-}
-
-namespace {
-
-struct IntegrityLevelTransitionInfo {
- explicit IntegrityLevelTransitionInfo(Map map)
- : integrity_level_source_map(map) {}
-
- bool has_integrity_level_transition = false;
- PropertyAttributes integrity_level = NONE;
- Map integrity_level_source_map;
- Symbol integrity_level_symbol;
-};
-
-IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
- Map map, Isolate* isolate, DisallowGarbageCollection* no_gc) {
- IntegrityLevelTransitionInfo info(map);
-
- // Figure out the most restrictive integrity level transition (it should
- // be the last one in the transition tree).
- DCHECK(!map.is_extensible());
- Map previous = Map::cast(map.GetBackPointer(isolate));
- TransitionsAccessor last_transitions(isolate, previous, no_gc);
- if (!last_transitions.HasIntegrityLevelTransitionTo(
- map, &(info.integrity_level_symbol), &(info.integrity_level))) {
- // The last transition was not integrity level transition - just bail out.
- // This can happen in the following cases:
- // - there are private symbol transitions following the integrity level
- // transitions (see crbug.com/v8/8854).
- // - there is a getter added in addition to an existing setter (or a setter
- // in addition to an existing getter).
- return info;
- }
-
- Map source_map = previous;
- // Now walk up the back pointer chain and skip all integrity level
- // transitions. If we encounter any non-integrity level transition interleaved
- // with integrity level transitions, just bail out.
- while (!source_map.is_extensible()) {
- previous = Map::cast(source_map.GetBackPointer(isolate));
- TransitionsAccessor transitions(isolate, previous, no_gc);
- if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
- return info;
- }
- source_map = previous;
- }
-
- // Integrity-level transitions never change number of descriptors.
- CHECK_EQ(map.NumberOfOwnDescriptors(), source_map.NumberOfOwnDescriptors());
-
- info.has_integrity_level_transition = true;
- info.integrity_level_source_map = source_map;
- return info;
-}
-
-} // namespace
-
-Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
- DisallowGarbageCollection no_gc;
- DisallowDeoptimization no_deoptimization(isolate);
-
- // Check the state of the root map.
- Map root_map = old_map.FindRootMap(isolate);
- if (root_map.is_deprecated()) {
- JSFunction constructor = JSFunction::cast(root_map.GetConstructor());
- DCHECK(constructor.has_initial_map());
- DCHECK(constructor.initial_map().is_dictionary_map());
- if (constructor.initial_map().elements_kind() != old_map.elements_kind()) {
- return Map();
- }
- return constructor.initial_map();
- }
- if (!old_map.EquivalentToForTransition(root_map)) return Map();
-
- ElementsKind from_kind = root_map.elements_kind();
- ElementsKind to_kind = old_map.elements_kind();
-
- IntegrityLevelTransitionInfo info(old_map);
- if (root_map.is_extensible() != old_map.is_extensible()) {
- DCHECK(!old_map.is_extensible());
- DCHECK(root_map.is_extensible());
- info = DetectIntegrityLevelTransitions(old_map, isolate, &no_gc);
- // Bail out if there were some private symbol transitions mixed up
- // with the integrity level transitions.
- if (!info.has_integrity_level_transition) return Map();
- // Make sure to replay the original elements kind transitions, before
- // the integrity level transition sets the elements to dictionary mode.
- DCHECK(to_kind == DICTIONARY_ELEMENTS ||
- to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
- IsTypedArrayElementsKind(to_kind) ||
- IsAnyHoleyNonextensibleElementsKind(to_kind));
- to_kind = info.integrity_level_source_map.elements_kind();
- }
- if (from_kind != to_kind) {
- // Try to follow existing elements kind transitions.
- root_map = root_map.LookupElementsTransitionMap(isolate, to_kind);
- if (root_map.is_null()) return Map();
- // From here on, use the map with correct elements kind as root map.
- }
-
- // Replay the transitions as they were before the integrity level transition.
- Map result = root_map.TryReplayPropertyTransitions(
- isolate, info.integrity_level_source_map);
- if (result.is_null()) return Map();
-
- if (info.has_integrity_level_transition) {
- // Now replay the integrity level transition.
- result = TransitionsAccessor(isolate, result, &no_gc)
- .SearchSpecial(info.integrity_level_symbol);
- }
-
- DCHECK_IMPLIES(!result.is_null(),
- old_map.elements_kind() == result.elements_kind());
- DCHECK_IMPLIES(!result.is_null(),
- old_map.instance_type() == result.instance_type());
- return result;
+ return handle(new_map.value(), isolate);
}
-Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
+Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
+ ConcurrencyMode cmode) {
DisallowGarbageCollection no_gc;
- DisallowDeoptimization no_deoptimization(isolate);
- int root_nof = NumberOfOwnDescriptors();
-
- int old_nof = old_map.NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map.instance_descriptors(isolate);
+ const bool is_concurrent = cmode == ConcurrencyMode::kConcurrent;
+ const int root_nof = NumberOfOwnDescriptors();
+ const int old_nof = old_map.NumberOfOwnDescriptors();
+ // TODO(jgruber,chromium:1239009): The main thread should use non-atomic
+ // reads, but this currently leads to odd behavior (see the linked bug).
+ // Investigate and fix this properly. Also below and in called functions.
+ DescriptorArray old_descriptors =
+ old_map.instance_descriptors(isolate, kAcquireLoad);
Map new_map = *this;
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
Map transition =
- TransitionsAccessor(isolate, new_map, &no_gc)
+ TransitionsAccessor(isolate, new_map, &no_gc, is_concurrent)
.SearchTransition(old_descriptors.GetKey(i), old_details.kind(),
old_details.attributes());
if (transition.is_null()) return Map();
new_map = transition;
- DescriptorArray new_descriptors = new_map.instance_descriptors(isolate);
+ DescriptorArray new_descriptors =
+ new_map.instance_descriptors(isolate, kAcquireLoad);
PropertyDetails new_details = new_descriptors.GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
@@ -953,39 +855,42 @@ static bool HasElementsKind(MapHandles const& maps,
}
Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
- MapHandles const& candidates) {
+ MapHandles const& candidates,
+ ConcurrencyMode cmode) {
DisallowGarbageCollection no_gc;
- DisallowDeoptimization no_deoptimization(isolate);
if (IsDetached(isolate)) return Map();
ElementsKind kind = elements_kind();
- bool packed = IsFastPackedElementsKind(kind);
+ bool is_packed = IsFastPackedElementsKind(kind);
Map transition;
if (IsTransitionableFastElementsKind(kind)) {
// Check the state of the root map.
Map root_map = FindRootMap(isolate);
- if (!EquivalentToForElementsKindTransition(root_map)) return Map();
- root_map = root_map.LookupElementsTransitionMap(isolate, kind);
+ if (!EquivalentToForElementsKindTransition(root_map, cmode)) return Map();
+ root_map = root_map.LookupElementsTransitionMap(isolate, kind, cmode);
DCHECK(!root_map.is_null());
// Starting from the next existing elements kind transition try to
// replay the property transitions that does not involve instance rewriting
// (ElementsTransitionAndStoreStub does not support that).
- for (root_map = root_map.ElementsTransitionMap(isolate);
+ for (root_map = root_map.ElementsTransitionMap(isolate, cmode);
!root_map.is_null() && root_map.has_fast_elements();
- root_map = root_map.ElementsTransitionMap(isolate)) {
+ root_map = root_map.ElementsTransitionMap(isolate, cmode)) {
// If root_map's elements kind doesn't match any of the elements kind in
// the candidates there is no need to do any additional work.
if (!HasElementsKind(candidates, root_map.elements_kind())) continue;
- Map current = root_map.TryReplayPropertyTransitions(isolate, *this);
+ Map current =
+ root_map.TryReplayPropertyTransitions(isolate, *this, cmode);
if (current.is_null()) continue;
- if (InstancesNeedRewriting(current)) continue;
+ if (InstancesNeedRewriting(current, cmode)) continue;
+ const bool current_is_packed =
+ IsFastPackedElementsKind(current.elements_kind());
if (ContainsMap(candidates, current) &&
- (packed || !IsFastPackedElementsKind(current.elements_kind()))) {
+ (is_packed || !current_is_packed)) {
transition = current;
- packed = packed && IsFastPackedElementsKind(current.elements_kind());
+ is_packed = is_packed && current_is_packed;
}
}
}
@@ -993,7 +898,8 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
}
static Map FindClosestElementsTransition(Isolate* isolate, Map map,
- ElementsKind to_kind) {
+ ElementsKind to_kind,
+ ConcurrencyMode cmode) {
// Ensure we are requested to search elements kind transition "near the root".
DCHECK_EQ(map.FindRootMap(isolate).NumberOfOwnDescriptors(),
map.NumberOfOwnDescriptors());
@@ -1001,7 +907,7 @@ static Map FindClosestElementsTransition(Isolate* isolate, Map map,
ElementsKind kind = map.elements_kind();
while (kind != to_kind) {
- Map next_map = current_map.ElementsTransitionMap(isolate);
+ Map next_map = current_map.ElementsTransitionMap(isolate, cmode);
if (next_map.is_null()) return current_map;
kind = next_map.elements_kind();
current_map = next_map;
@@ -1011,8 +917,9 @@ static Map FindClosestElementsTransition(Isolate* isolate, Map map,
return current_map;
}
-Map Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
- Map to_map = FindClosestElementsTransition(isolate, *this, to_kind);
+Map Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind,
+ ConcurrencyMode cmode) {
+ Map to_map = FindClosestElementsTransition(isolate, *this, to_kind, cmode);
if (to_map.elements_kind() == to_kind) return to_map;
return Map();
}
@@ -1113,10 +1020,21 @@ static Handle<Map> AddMissingElementsTransitions(Isolate* isolate,
}
// static
+base::Optional<Map> Map::TryAsElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind kind,
+ ConcurrencyMode cmode) {
+ Map closest_map = FindClosestElementsTransition(isolate, *map, kind, cmode);
+ if (closest_map.elements_kind() != kind) return {};
+ return closest_map;
+}
+
+// static
Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
ElementsKind kind) {
- Handle<Map> closest_map(FindClosestElementsTransition(isolate, *map, kind),
- isolate);
+ Handle<Map> closest_map(
+ FindClosestElementsTransition(isolate, *map, kind,
+ ConcurrencyMode::kNotConcurrent),
+ isolate);
if (closest_map->elements_kind() == kind) {
return closest_map;
@@ -1378,7 +1296,7 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
result->set_owns_descriptors(false);
result->UpdateDescriptors(isolate, descriptors, number_of_own_descriptors);
- DCHECK_EQ(result->NumberOfFields(),
+ DCHECK_EQ(result->NumberOfFields(ConcurrencyMode::kNotConcurrent),
result->GetInObjectProperties() - result->UnusedPropertyFields());
}
@@ -1587,7 +1505,8 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
DCHECK_EQ(map->FindRootMap(isolate).NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
- maybe_elements_transition_map = map->ElementsTransitionMap(isolate);
+ maybe_elements_transition_map =
+ map->ElementsTransitionMap(isolate, ConcurrencyMode::kNotConcurrent);
DCHECK(
maybe_elements_transition_map.is_null() ||
(maybe_elements_transition_map.elements_kind() == DICTIONARY_ELEMENTS &&
@@ -2133,7 +2052,8 @@ bool CheckEquivalent(const Map first, const Map second) {
} // namespace
-bool Map::EquivalentToForTransition(const Map other) const {
+bool Map::EquivalentToForTransition(const Map other,
+ ConcurrencyMode cmode) const {
CHECK_EQ(GetConstructor(), other.GetConstructor());
CHECK_EQ(instance_type(), other.instance_type());
@@ -2145,19 +2065,28 @@ bool Map::EquivalentToForTransition(const Map other) const {
// not equivalent to strict function.
int nof =
std::min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
- return instance_descriptors().IsEqualUpTo(other.instance_descriptors(),
- nof);
+ DescriptorArray this_descriptors = cmode == ConcurrencyMode::kConcurrent
+ ? instance_descriptors(kAcquireLoad)
+ : instance_descriptors();
+ DescriptorArray that_descriptors =
+ cmode == ConcurrencyMode::kConcurrent
+ ? other.instance_descriptors(kAcquireLoad)
+ : other.instance_descriptors();
+ return this_descriptors.IsEqualUpTo(that_descriptors, nof);
}
return true;
}
-bool Map::EquivalentToForElementsKindTransition(const Map other) const {
- if (!EquivalentToForTransition(other)) return false;
+bool Map::EquivalentToForElementsKindTransition(const Map other,
+ ConcurrencyMode cmode) const {
+ if (!EquivalentToForTransition(other, cmode)) return false;
#ifdef DEBUG
// Ensure that we don't try to generate elements kind transitions from maps
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = cmode == ConcurrencyMode::kConcurrent
+ ? instance_descriptors(kAcquireLoad)
+ : instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 355d86a332..74d2a859e8 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -188,7 +188,7 @@ using MapHandles = std::vector<Handle<Map>>;
// | | [raw_transitions] |
// +---------------+-------------------------------------------------+
-class Map : public HeapObject {
+class Map : public TorqueGeneratedMap<Map, HeapObject> {
public:
// Instance size.
// Size in bytes or kVariableSizeSentinel if instances do not have
@@ -436,7 +436,7 @@ class Map : public HeapObject {
// elements or an object with any frozen elements, or a slow arguments object.
bool MayHaveReadOnlyElementsInPrototypeChain(Isolate* isolate);
- inline Map ElementsTransitionMap(Isolate* isolate);
+ inline Map ElementsTransitionMap(Isolate* isolate, ConcurrencyMode cmode);
inline FixedArrayBase GetInitialElements() const;
@@ -497,15 +497,16 @@ class Map : public HeapObject {
};
FieldCounts GetFieldCounts() const;
- int NumberOfFields() const;
+ int NumberOfFields(ConcurrencyMode cmode) const;
bool HasOutOfObjectProperties() const;
// TODO(ishell): candidate with JSObject::MigrateToMap().
- bool InstancesNeedRewriting(Map target) const;
+ bool InstancesNeedRewriting(Map target, ConcurrencyMode cmode) const;
bool InstancesNeedRewriting(Map target, int target_number_of_fields,
int target_inobject, int target_unused,
- int* old_number_of_fields) const;
+ int* old_number_of_fields,
+ ConcurrencyMode cmode) const;
// Returns true if the |field_type| is the most general one for
// given |representation|.
static inline bool IsMostGeneralFieldType(Representation representation,
@@ -653,6 +654,7 @@ class Map : public HeapObject {
DECL_BOOLEAN_ACCESSORS(is_deprecated)
inline bool CanBeDeprecated() const;
+
// Returns a non-deprecated version of the input. If the input was not
// deprecated, it is directly returned. Otherwise, the non-deprecated version
// is found by re-transitioning from the root of the transition tree using the
@@ -660,8 +662,6 @@ class Map : public HeapObject {
// is found.
V8_EXPORT_PRIVATE static MaybeHandle<Map> TryUpdate(
Isolate* isolate, Handle<Map> map) V8_WARN_UNUSED_RESULT;
- V8_EXPORT_PRIVATE static Map TryUpdateSlow(Isolate* isolate,
- Map map) V8_WARN_UNUSED_RESULT;
// Returns a non-deprecated version of the input. This method may deprecate
// existing maps along the way if encodings conflict. Not for use while
@@ -701,6 +701,10 @@ class Map : public HeapObject {
static Handle<Map> TransitionElementsTo(Isolate* isolate, Handle<Map> map,
ElementsKind to_kind);
+ static base::Optional<Map> TryAsElementsKind(Isolate* isolate,
+ Handle<Map> map,
+ ElementsKind kind,
+ ConcurrencyMode cmode);
V8_EXPORT_PRIVATE static Handle<Map> AsElementsKind(Isolate* isolate,
Handle<Map> map,
ElementsKind kind);
@@ -750,8 +754,6 @@ class Map : public HeapObject {
// Returns the number of enumerable properties.
int NumberOfEnumerableProperties() const;
- DECL_CAST(Map)
-
static inline int SlackForArraySize(int old_size, int size_limit);
V8_EXPORT_PRIVATE static void EnsureDescriptorSlack(Isolate* isolate,
@@ -770,7 +772,7 @@ class Map : public HeapObject {
// elements_kind that's found in |candidates|, or |nullptr| if no match is
// found at all.
V8_EXPORT_PRIVATE Map FindElementsKindTransitionedMap(
- Isolate* isolate, MapHandles const& candidates);
+ Isolate* isolate, MapHandles const& candidates, ConcurrencyMode cmode);
inline bool CanTransition() const;
@@ -811,9 +813,6 @@ class Map : public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_MAP_FIELDS)
-
STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
class BodyDescriptor;
@@ -867,21 +866,24 @@ class Map : public HeapObject {
// Returns the map that this (root) map transitions to if its elements_kind
// is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
- Map LookupElementsTransitionMap(Isolate* isolate, ElementsKind elements_kind);
+ Map LookupElementsTransitionMap(Isolate* isolate, ElementsKind elements_kind,
+ ConcurrencyMode cmode);
// Tries to replay property transitions starting from this (root) map using
// the descriptor array of the |map|. The |root_map| is expected to have
// proper elements kind and therefore elements kinds transitions are not
// taken by this function. Returns |nullptr| if matching transition map is
// not found.
- Map TryReplayPropertyTransitions(Isolate* isolate, Map map);
+ Map TryReplayPropertyTransitions(Isolate* isolate, Map map,
+ ConcurrencyMode cmode);
static void ConnectTransition(Isolate* isolate, Handle<Map> parent,
Handle<Map> child, Handle<Name> name,
SimpleTransitionFlag flag);
- bool EquivalentToForTransition(const Map other) const;
- bool EquivalentToForElementsKindTransition(const Map other) const;
+ bool EquivalentToForTransition(const Map other, ConcurrencyMode cmode) const;
+ bool EquivalentToForElementsKindTransition(const Map other,
+ ConcurrencyMode cmode) const;
static Handle<Map> RawCopy(Isolate* isolate, Handle<Map> map,
int instance_size, int inobject_properties);
static Handle<Map> ShareDescriptor(Isolate* isolate, Handle<Map> map,
@@ -923,6 +925,10 @@ class Map : public HeapObject {
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
DECL_RELEASE_SETTER(instance_descriptors, DescriptorArray)
+ // Hide inherited accessors from the generated superclass.
+ DECL_ACCESSORS(constructor_or_back_pointer_or_native_context, Object)
+ DECL_ACCESSORS(transitions_or_prototype_info, Object)
+
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
@@ -930,7 +936,7 @@ class Map : public HeapObject {
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
- OBJECT_CONSTRUCTORS(Map, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Map)
};
// The cache for maps used by normalized (dictionary mode) objects.
diff --git a/deps/v8/src/objects/map.tq b/deps/v8/src/objects/map.tq
index 49b2e5be36..2221684832 100644
--- a/deps/v8/src/objects/map.tq
+++ b/deps/v8/src/objects/map.tq
@@ -53,7 +53,7 @@ extern class Map extends HeapObject {
}
instance_size_in_words: uint8;
- in_object_properties_start_or_constructor_function_index: uint8;
+ inobject_properties_start_or_constructor_function_index: uint8;
used_or_unused_instance_size_in_words: uint8;
visitor_id: uint8;
instance_type: InstanceType;
diff --git a/deps/v8/src/objects/megadom-handler.tq b/deps/v8/src/objects/megadom-handler.tq
index 7daaa5a8d8..abcfa583a5 100644
--- a/deps/v8/src/objects/megadom-handler.tq
+++ b/deps/v8/src/objects/megadom-handler.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
@generatePrint
@generateBodyDescriptor
extern class MegaDomHandler extends HeapObject {
diff --git a/deps/v8/src/objects/microtask.tq b/deps/v8/src/objects/microtask.tq
index 72c9cb5d2a..1e7f1525c4 100644
--- a/deps/v8/src/objects/microtask.tq
+++ b/deps/v8/src/objects/microtask.tq
@@ -3,17 +3,14 @@
// found in the LICENSE file.
@abstract
-@generateCppClass
extern class Microtask extends Struct {
}
-@generateCppClass
extern class CallbackTask extends Microtask {
callback: Foreign;
data: Foreign;
}
-@generateCppClass
extern class CallableTask extends Microtask {
callable: JSReceiver;
context: Context;
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index a01c258e02..dcde9513a2 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -20,7 +20,7 @@ namespace internal {
#include "torque-generated/src/objects/module-tq-inl.inc"
-OBJECT_CONSTRUCTORS_IMPL(Module, HeapObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Module)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace)
NEVER_READ_ONLY_SPACE_IMPL(Module)
@@ -28,14 +28,6 @@ NEVER_READ_ONLY_SPACE_IMPL(ModuleRequest)
NEVER_READ_ONLY_SPACE_IMPL(SourceTextModule)
NEVER_READ_ONLY_SPACE_IMPL(SyntheticModule)
-CAST_ACCESSOR(Module)
-ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
-ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
-ACCESSORS(Module, exception, Object, kExceptionOffset)
-ACCESSORS(Module, top_level_capability, HeapObject, kTopLevelCapabilityOffset)
-SMI_ACCESSORS(Module, status, kStatusOffset)
-SMI_ACCESSORS(Module, hash, kHashOffset)
-
BOOL_ACCESSORS(SourceTextModule, flags, async, AsyncBit::kShift)
BIT_FIELD_ACCESSORS(SourceTextModule, flags, async_evaluating_ordinal,
SourceTextModule::AsyncEvaluatingOrdinalBits)
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index eb7887f139..2945f36a14 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -37,11 +37,11 @@ void PrintModuleName(Module module, std::ostream& os) {
#endif // OBJECT_PRINT
}
-void PrintStatusTransition(Module module, Module::Status new_status) {
+void PrintStatusTransition(Module module, Module::Status old_status) {
if (!FLAG_trace_module_status) return;
StdoutStream os;
- os << "Changing module status from " << module.status() << " to "
- << new_status << " for ";
+ os << "Changing module status from " << old_status << " to "
+ << module.status() << " for ";
PrintModuleName(module, os);
}
@@ -56,9 +56,12 @@ void PrintStatusMessage(Module module, const char* message) {
void SetStatusInternal(Module module, Module::Status new_status) {
DisallowGarbageCollection no_gc;
#ifdef DEBUG
- PrintStatusTransition(module, new_status);
-#endif // DEBUG
+ Module::Status old_status = static_cast<Module::Status>(module.status());
+ module.set_status(new_status);
+ PrintStatusTransition(module, old_status);
+#else
module.set_status(new_status);
+#endif // DEBUG
}
} // end namespace
@@ -100,8 +103,7 @@ void Module::RecordError(Isolate* isolate, Handle<Module> module,
void Module::ResetGraph(Isolate* isolate, Handle<Module> module) {
DCHECK_NE(module->status(), kEvaluating);
- if (module->status() != kPreInstantiating &&
- module->status() != kInstantiating) {
+ if (module->status() != kPreLinking && module->status() != kLinking) {
return;
}
@@ -127,8 +129,7 @@ void Module::ResetGraph(Isolate* isolate, Handle<Module> module) {
}
void Module::Reset(Isolate* isolate, Handle<Module> module) {
- DCHECK(module->status() == kPreInstantiating ||
- module->status() == kInstantiating);
+ DCHECK(module->status() == kPreLinking || module->status() == kLinking);
DCHECK(module->exception().IsTheHole(isolate));
// The namespace object cannot exist, because it would have been created
// by RunInitializationCode, which is called only after this module's SCC
@@ -145,7 +146,7 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
}
module->set_exports(*exports);
- SetStatusInternal(*module, kUninstantiated);
+ SetStatusInternal(*module, kUnlinked);
}
Object Module::GetException() {
@@ -160,7 +161,7 @@ MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Handle<String> export_name,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
- DCHECK_GE(module->status(), kPreInstantiating);
+ DCHECK_GE(module->status(), kPreLinking);
DCHECK_NE(module->status(), kEvaluating);
if (module->IsSourceTextModule()) {
@@ -185,7 +186,7 @@ bool Module::Instantiate(
if (!PrepareInstantiate(isolate, module, context, callback,
callback_without_import_assertions)) {
ResetGraph(isolate, module);
- DCHECK_EQ(module->status(), kUninstantiated);
+ DCHECK_EQ(module->status(), kUnlinked);
return false;
}
Zone zone(isolate->allocator(), ZONE_NAME);
@@ -193,10 +194,10 @@ bool Module::Instantiate(
unsigned dfs_index = 0;
if (!FinishInstantiate(isolate, module, &stack, &dfs_index, &zone)) {
ResetGraph(isolate, module);
- DCHECK_EQ(module->status(), kUninstantiated);
+ DCHECK_EQ(module->status(), kUnlinked);
return false;
}
- DCHECK(module->status() == kInstantiated || module->status() == kEvaluated ||
+ DCHECK(module->status() == kLinked || module->status() == kEvaluated ||
module->status() == kErrored);
DCHECK(stack.empty());
return true;
@@ -207,9 +208,9 @@ bool Module::PrepareInstantiate(
v8::Module::ResolveModuleCallback callback,
DeprecatedResolveCallback callback_without_import_assertions) {
DCHECK_NE(module->status(), kEvaluating);
- DCHECK_NE(module->status(), kInstantiating);
- if (module->status() >= kPreInstantiating) return true;
- module->SetStatus(kPreInstantiating);
+ DCHECK_NE(module->status(), kLinking);
+ if (module->status() >= kPreLinking) return true;
+ module->SetStatus(kPreLinking);
STACK_CHECK(isolate, false);
if (module->IsSourceTextModule()) {
@@ -226,8 +227,8 @@ bool Module::FinishInstantiate(Isolate* isolate, Handle<Module> module,
ZoneForwardList<Handle<SourceTextModule>>* stack,
unsigned* dfs_index, Zone* zone) {
DCHECK_NE(module->status(), kEvaluating);
- if (module->status() >= kInstantiating) return true;
- DCHECK_EQ(module->status(), kPreInstantiating);
+ if (module->status() >= kLinking) return true;
+ DCHECK_EQ(module->status(), kPreLinking);
STACK_CHECK(isolate, false);
if (module->IsSourceTextModule()) {
@@ -273,7 +274,7 @@ MaybeHandle<Object> Module::EvaluateMaybeAsync(Isolate* isolate,
// Start of Evaluate () Concrete Method
// 2. Assert: module.[[Status]] is "linked" or "evaluated".
- CHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+ CHECK(module->status() == kLinked || module->status() == kEvaluated);
// 3. If module.[[Status]] is "evaluated", set module to
// module.[[CycleRoot]].
@@ -313,7 +314,7 @@ MaybeHandle<Object> Module::InnerEvaluate(Isolate* isolate,
//
// However, SyntheticModules transition directly to 'Evaluated,' so we should
// never see an 'Evaluating' module at this point.
- CHECK_EQ(module->status(), kInstantiated);
+ CHECK_EQ(module->status(), kLinked);
if (module->IsSourceTextModule()) {
return SourceTextModule::Evaluate(isolate,
@@ -443,7 +444,7 @@ bool Module::IsGraphAsync(Isolate* isolate) const {
do {
SourceTextModule current = worklist.back();
worklist.pop_back();
- DCHECK_GE(current.status(), kInstantiated);
+ DCHECK_GE(current.status(), kLinked);
if (current.async()) return true;
FixedArray requested_modules = current.requested_modules();
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index a114a34a97..05ea04ccd9 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -32,42 +32,26 @@ class Zone;
// Module is the base class for ECMAScript module types, roughly corresponding
// to Abstract Module Record.
// https://tc39.github.io/ecma262/#sec-abstract-module-records
-class Module : public HeapObject {
+class Module : public TorqueGeneratedModule<Module, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
- DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
- // The complete export table, mapping an export name to its cell.
- DECL_ACCESSORS(exports, ObjectHashTable)
-
- // Hash for this object (a random non-zero Smi).
- DECL_INT_ACCESSORS(hash)
-
- // Status.
- DECL_INT_ACCESSORS(status)
enum Status {
// Order matters!
- kUninstantiated,
- kPreInstantiating,
- kInstantiating,
- kInstantiated,
+ kUnlinked,
+ kPreLinking,
+ kLinking,
+ kLinked,
kEvaluating,
+ kEvaluatingAsync,
kEvaluated,
kErrored
};
- // The namespace object (or undefined).
- DECL_ACCESSORS(module_namespace, HeapObject)
-
// The exception in the case {status} is kErrored.
Object GetException();
- DECL_ACCESSORS(exception, Object)
-
- // The top level promise capability of this module. Will only be defined
- // for cycle roots.
- DECL_ACCESSORS(top_level_capability, HeapObject)
// Returns if this module or any transitively requested module is [[Async]],
// i.e. has a top-level await.
@@ -101,10 +85,6 @@ class Module : public HeapObject {
static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
Handle<Module> module);
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_MODULE_FIELDS)
-
using BodyDescriptor =
FixedBodyDescriptor<kExportsOffset, kHeaderSize, kHeaderSize>;
@@ -142,7 +122,7 @@ class Module : public HeapObject {
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerEvaluate(
Isolate* isolate, Handle<Module> module);
- // Set module's status back to kUninstantiated and reset other internal state.
+ // Set module's status back to kUnlinked and reset other internal state.
// This is used when instantiation fails.
static void Reset(Isolate* isolate, Handle<Module> module);
static void ResetGraph(Isolate* isolate, Handle<Module> module);
@@ -155,7 +135,7 @@ class Module : public HeapObject {
static void RecordError(Isolate* isolate, Handle<Module> module,
Handle<Object> error);
- OBJECT_CONSTRUCTORS(Module, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Module)
};
// When importing a module namespace (import * as foo from "bar"), a
diff --git a/deps/v8/src/objects/module.tq b/deps/v8/src/objects/module.tq
index 2d8e8b6327..b1535c3134 100644
--- a/deps/v8/src/objects/module.tq
+++ b/deps/v8/src/objects/module.tq
@@ -4,15 +4,17 @@
@abstract
extern class Module extends HeapObject {
+ // The complete export table, mapping an export name to its cell.
exports: ObjectHashTable;
+ // Hash for this object (a random non-zero Smi).
hash: Smi;
status: Smi;
module_namespace: JSModuleNamespace|Undefined;
+ // The exception in the case {status} is kErrored.
exception: Object;
+ // The top level promise capability of this module. Will only be defined
+ // for cycle roots.
top_level_capability: JSPromise|Undefined;
}
-@generateCppClass
-extern class JSModuleNamespace extends JSSpecialObject {
- module: Module;
-}
+extern class JSModuleNamespace extends JSSpecialObject { module: Module; }
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index 93c0cd3fa9..a1f4eb5368 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -10,6 +10,7 @@
#include "src/objects/map-inl.h"
#include "src/objects/name.h"
#include "src/objects/primitive-heap-object-inl.h"
+#include "src/objects/string-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -97,6 +98,14 @@ uint32_t Name::EnsureHash() {
return String::cast(*this).ComputeAndSetHash();
}
+uint32_t Name::EnsureHash(const SharedStringAccessGuardIfNeeded& access_guard) {
+ // Fast case: has hash code already been computed?
+ uint32_t field = raw_hash_field();
+ if (IsHashFieldComputed(field)) return field >> kHashShift;
+ // Slow case: compute hash code and set it. Has to be a string.
+ return String::cast(*this).ComputeAndSetHash(access_guard);
+}
+
uint32_t Name::hash() const {
uint32_t field = raw_hash_field();
DCHECK(IsHashFieldComputed(field));
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 1d4981f85c..cbd7065367 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -18,6 +18,8 @@ namespace internal {
#include "torque-generated/src/objects/name-tq.inc"
+class SharedStringAccessGuardIfNeeded;
+
// The Name abstract class captures anything that can be used as a property
// name, i.e., strings and symbols. All names store a hash value.
class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
@@ -27,7 +29,11 @@ class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
// Returns a hash value used for the property table. Ensures that the hash
// value is computed.
+ //
+ // The overload without SharedStringAccessGuardIfNeeded can only be called on
+ // the main thread.
inline uint32_t EnsureHash();
+ inline uint32_t EnsureHash(const SharedStringAccessGuardIfNeeded&);
// Returns a hash value used for the property table (same as Hash()), assumes
// the hash is already computed.
diff --git a/deps/v8/src/objects/name.tq b/deps/v8/src/objects/name.tq
index b0d892b070..55f70d26b5 100644
--- a/deps/v8/src/objects/name.tq
+++ b/deps/v8/src/objects/name.tq
@@ -3,7 +3,6 @@
// found in the LICENSE file.
@abstract
-@generateCppClass
extern class Name extends PrimitiveHeapObject {
raw_hash_field: NameHash;
}
@@ -28,7 +27,6 @@ bitfield struct SymbolFlags extends uint32 {
is_private_brand: bool: 1 bit;
}
-@generateCppClass
extern class Symbol extends Name {
flags: SymbolFlags;
description: String|Undefined;
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 4e817a23f1..e5ba2684b2 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -217,7 +217,7 @@ class ZoneForwardList;
V(UniqueName) \
IF_WASM(V, WasmArray) \
IF_WASM(V, WasmCapiFunctionData) \
- IF_WASM(V, WasmExceptionObject) \
+ IF_WASM(V, WasmTagObject) \
IF_WASM(V, WasmExceptionPackage) \
IF_WASM(V, WasmExportedFunctionData) \
IF_WASM(V, WasmFunctionData) \
@@ -270,6 +270,7 @@ class ZoneForwardList;
V(FreeSpaceOrFiller) \
V(FunctionContext) \
V(JSApiObject) \
+ V(JSLastDummyApiObject) \
V(JSPromiseConstructor) \
V(JSArrayConstructor) \
V(JSRegExpConstructor) \
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index 1d240729ff..1aa9dc10b4 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -17,11 +17,14 @@
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
#undef DECL_INT32_ACCESSORS
+#undef DECL_RELAXED_INT32_ACCESSORS
#undef DECL_UINT16_ACCESSORS
#undef DECL_INT16_ACCESSORS
#undef DECL_UINT8_ACCESSORS
#undef DECL_GETTER
#undef DEF_GETTER
+#undef DEF_RELAXED_GETTER
+#undef DEF_ACQUIRE_GETTER
#undef DECL_SETTER
#undef DECL_ACCESSORS
#undef DECL_ACCESSORS_LOAD_TAG
@@ -37,6 +40,7 @@
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef INT32_ACCESSORS
+#undef IMPLICIT_TAG_RELAXED_INT32_ACCESSORS
#undef RELAXED_INT32_ACCESSORS
#undef UINT16_ACCESSORS
#undef UINT8_ACCESSORS
@@ -45,6 +49,9 @@
#undef ACCESSORS
#undef RENAME_TORQUE_ACCESSORS
#undef RENAME_UINT16_TORQUE_ACCESSORS
+#undef ACCESSORS_RELAXED_CHECKED2
+#undef ACCESSORS_RELAXED_CHECKED
+#undef ACCESSORS_RELAXED
#undef RELAXED_ACCESSORS_CHECKED2
#undef RELAXED_ACCESSORS_CHECKED
#undef RELAXED_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index b57adfde2d..561b1de30b 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -62,6 +62,10 @@
#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
+#define DECL_RELAXED_INT32_ACCESSORS(name) \
+ inline int32_t name(RelaxedLoadTag) const; \
+ inline void set_##name(int32_t value, RelaxedStoreTag);
+
#define DECL_UINT16_ACCESSORS(name) \
inline uint16_t name() const; \
inline void set_##name(int value);
@@ -159,12 +163,21 @@
int32_t holder::name() const { return ReadField<int32_t>(offset); } \
void holder::set_##name(int32_t value) { WriteField<int32_t>(offset, value); }
-#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { \
- return RELAXED_READ_INT32_FIELD(*this, offset); \
- } \
- void holder::set_##name(int32_t value) { \
- RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
+// TODO(solanes): Use the non-implicit one, and change the uses to use the tag.
+#define IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(holder, name, offset) \
+ int32_t holder::name() const { \
+ return RELAXED_READ_INT32_FIELD(*this, offset); \
+ } \
+ void holder::set_##name(int32_t value) { \
+ RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
+ }
+
+#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
+ int32_t holder::name(RelaxedLoadTag) const { \
+ return RELAXED_READ_INT32_FIELD(*this, offset); \
+ } \
+ void holder::set_##name(int32_t value, RelaxedStoreTag) { \
+ RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
}
#define UINT16_ACCESSORS(holder, name, offset) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index bb3c6ca3ce..7750b26575 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -296,6 +296,39 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = JSObject::BodyDescriptor::kStartOffset;
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kStartOffset) return false;
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ // Iterate JSFunction header fields first.
+ int header_size = JSFunction::GetHeaderSize(map.has_prototype_slot());
+ DCHECK_GE(object_size, header_size);
+ IteratePointers(obj, kStartOffset, kCodeOffset, v);
+ // Code field is treated as a custom weak pointer. This field is visited as
+ // a weak pointer if the Code is baseline code and the bytecode array
+ // corresponding to this function is old. In the rest of the cases this
+ // field is treated as strong pointer.
+ IterateCustomWeakPointer(obj, kCodeOffset, v);
+ // Iterate rest of the header fields
+ DCHECK_GE(header_size, kCodeOffset);
+ IteratePointers(obj, kCodeOffset + kTaggedSize, header_size, v);
+ // Iterate rest of the fields starting after the header.
+ IterateJSObjectBodyImpl(map, obj, header_size, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -584,6 +617,7 @@ class WasmTypeInfo::BodyDescriptor final : public BodyDescriptorBase {
v);
IteratePointer(obj, kSupertypesOffset, v);
IteratePointer(obj, kSubtypesOffset, v);
+ IteratePointer(obj, kInstanceOffset, v);
}
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
@@ -686,6 +720,8 @@ class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
+ // The type is safe to use because it's kept alive by the {map}'s
+ // WasmTypeInfo.
if (!WasmArray::GcSafeType(map)->element_type().is_reference()) return;
IteratePointers(obj, WasmArray::kHeaderSize, object_size, v);
}
@@ -708,6 +744,8 @@ class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
WasmStruct wasm_struct = WasmStruct::cast(obj);
+ // The {type} is safe to use because it's kept alive by the {map}'s
+ // WasmTypeInfo.
wasm::StructType* type = WasmStruct::GcSafeType(map);
for (uint32_t i = 0; i < type->field_count(); i++) {
if (!type->field(i).is_reference()) continue;
@@ -893,9 +931,7 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
CodeDataContainer::kPointerFieldsWeakEndOffset, v);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // TODO(v8:11880): Currently, the |code| field is still compressed and
- // the |code_entry_point| field doesn't require custom visitation, so
- // nothing to do here yet.
+ v->VisitCodePointer(obj, obj.RawField(kCodeOffset));
}
}
@@ -970,6 +1006,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
}
UNREACHABLE();
}
+ if (InstanceTypeChecker::IsJSApiObject(type)) {
+ return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
+ }
switch (type) {
case EMBEDDER_DATA_ARRAY_TYPE:
@@ -1097,7 +1136,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_SEGMENTS_TYPE:
#endif // V8_INTL_SUPPORT
#if V8_ENABLE_WEBASSEMBLY
- case WASM_EXCEPTION_OBJECT_TYPE:
+ case WASM_TAG_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index 3f68db2b40..6800db3b78 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -34,7 +34,7 @@
#include "src/objects/oddball-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/property.h"
-#include "src/objects/regexp-match-info.h"
+#include "src/objects/regexp-match-info-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
@@ -439,7 +439,6 @@ bool Object::IsMinusZero() const {
i::IsMinusZero(HeapNumber::cast(*this).value());
}
-OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray)
OBJECT_CONSTRUCTORS_IMPL(BigIntBase, PrimitiveHeapObject)
OBJECT_CONSTRUCTORS_IMPL(BigInt, BigIntBase)
OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
@@ -449,7 +448,6 @@ OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
CAST_ACCESSOR(BigIntBase)
CAST_ACCESSOR(BigInt)
-CAST_ACCESSOR(RegExpMatchInfo)
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray. ByteArray is used
@@ -732,12 +730,8 @@ void HeapObject::set_map(Map value) {
#endif
}
-Map HeapObject::map(AcquireLoadTag tag) const {
- PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
- return HeapObject::map(cage_base, tag);
-}
-Map HeapObject::map(PtrComprCageBase cage_base, AcquireLoadTag tag) const {
- return map_word(cage_base, tag).ToMap();
+DEF_ACQUIRE_GETTER(HeapObject, map, Map) {
+ return map_word(cage_base, kAcquireLoad).ToMap();
}
void HeapObject::set_map(Map value, ReleaseStoreTag tag) {
@@ -783,11 +777,7 @@ ObjectSlot HeapObject::map_slot() const {
return ObjectSlot(MapField::address(*this));
}
-MapWord HeapObject::map_word(RelaxedLoadTag tag) const {
- PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
- return HeapObject::map_word(cage_base, tag);
-}
-MapWord HeapObject::map_word(PtrComprCageBase cage_base, RelaxedLoadTag) const {
+DEF_RELAXED_GETTER(HeapObject, map_word, MapWord) {
return MapField::Relaxed_Load_Map_Word(cage_base, *this);
}
@@ -795,11 +785,7 @@ void HeapObject::set_map_word(MapWord map_word, RelaxedStoreTag) {
MapField::Relaxed_Store_Map_Word(*this, map_word);
}
-MapWord HeapObject::map_word(AcquireLoadTag tag) const {
- PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
- return HeapObject::map_word(cage_base, tag);
-}
-MapWord HeapObject::map_word(PtrComprCageBase cage_base, AcquireLoadTag) const {
+DEF_ACQUIRE_GETTER(HeapObject, map_word, MapWord) {
return MapField::Acquire_Load_No_Unpack(cage_base, *this);
}
@@ -870,48 +856,6 @@ bool Object::ToIntegerIndex(size_t* index) const {
return false;
}
-int RegExpMatchInfo::NumberOfCaptureRegisters() {
- DCHECK_GE(length(), kLastMatchOverhead);
- Object obj = get(kNumberOfCapturesIndex);
- return Smi::ToInt(obj);
-}
-
-void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) {
- DCHECK_GE(length(), kLastMatchOverhead);
- set(kNumberOfCapturesIndex, Smi::FromInt(value));
-}
-
-String RegExpMatchInfo::LastSubject() {
- DCHECK_GE(length(), kLastMatchOverhead);
- return String::cast(get(kLastSubjectIndex));
-}
-
-void RegExpMatchInfo::SetLastSubject(String value, WriteBarrierMode mode) {
- DCHECK_GE(length(), kLastMatchOverhead);
- set(kLastSubjectIndex, value, mode);
-}
-
-Object RegExpMatchInfo::LastInput() {
- DCHECK_GE(length(), kLastMatchOverhead);
- return get(kLastInputIndex);
-}
-
-void RegExpMatchInfo::SetLastInput(Object value, WriteBarrierMode mode) {
- DCHECK_GE(length(), kLastMatchOverhead);
- set(kLastInputIndex, value, mode);
-}
-
-int RegExpMatchInfo::Capture(int i) {
- DCHECK_LT(i, NumberOfCaptureRegisters());
- Object obj = get(kFirstCaptureIndex + i);
- return Smi::ToInt(obj);
-}
-
-void RegExpMatchInfo::SetCapture(int i, int value) {
- DCHECK_LT(i, NumberOfCaptureRegisters());
- set(kFirstCaptureIndex + i, Smi::FromInt(value));
-}
-
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowGarbageCollection& promise) {
return GetWriteBarrierModeForObject(*this, &promise);
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index d86438dde0..2f16615536 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -1762,14 +1762,15 @@ bool Object::IterationHasObservableEffects() {
JSArray array = JSArray::cast(*this);
Isolate* isolate = array.GetIsolate();
-#ifdef V8_ENABLE_FORCE_SLOW_PATH
- if (isolate->force_slow_path()) return true;
-#endif
-
// Check that we have the original ArrayPrototype.
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Context> context;
+ if (!array.GetCreationContext().ToHandle(&context)) return false;
if (!array.map().prototype().IsJSObject()) return true;
JSObject array_proto = JSObject::cast(array.map().prototype());
- if (!isolate->is_initial_array_prototype(array_proto)) return true;
+ auto initial_array_prototype =
+ context->native_context().initial_array_prototype();
+ if (initial_array_prototype != array_proto) return true;
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
@@ -2363,7 +2364,8 @@ bool HeapObject::CanBeRehashed() const {
}
}
-void HeapObject::RehashBasedOnMap(Isolate* isolate) {
+template <typename IsolateT>
+void HeapObject::RehashBasedOnMap(IsolateT* isolate) {
switch (map().instance_type()) {
case HASH_TABLE_TYPE:
UNREACHABLE();
@@ -2399,11 +2401,11 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
case ORDERED_HASH_SET_TYPE:
UNREACHABLE(); // We'll rehash from the JSMap or JSSet referencing them.
case JS_MAP_TYPE: {
- JSMap::cast(*this).Rehash(isolate);
+ JSMap::cast(*this).Rehash(isolate->AsIsolate());
break;
}
case JS_SET_TYPE: {
- JSSet::cast(*this).Rehash(isolate);
+ JSSet::cast(*this).Rehash(isolate->AsIsolate());
break;
}
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
@@ -2419,6 +2421,8 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
UNREACHABLE();
}
}
+template void HeapObject::RehashBasedOnMap(Isolate* isolate);
+template void HeapObject::RehashBasedOnMap(LocalIsolate* isolate);
bool HeapObject::IsExternal(Isolate* isolate) const {
return map().FindRootMap(isolate) == isolate->heap()->external_map();
@@ -2767,12 +2771,11 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
Handle<Object> to_assign = value;
// Convert the incoming value to a number for storing into typed arrays.
- // TODO(v8:11111): Support RAB / GSAB.
if (it->IsElement() && receiver->IsJSObject(isolate) &&
- JSObject::cast(*receiver).HasTypedArrayElements(isolate)) {
+ JSObject::cast(*receiver).HasTypedArrayOrRabGsabTypedArrayElements(
+ isolate)) {
ElementsKind elements_kind = JSObject::cast(*receiver).GetElementsKind();
- if (elements_kind == BIGINT64_ELEMENTS ||
- elements_kind == BIGUINT64_ELEMENTS) {
+ if (IsBigIntTypedArrayElementsKind(elements_kind)) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, to_assign,
BigInt::FromObject(isolate, value),
Nothing<bool>());
@@ -4469,7 +4472,7 @@ Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
isolate, native_context,
Handle<FunctionTemplateInfo>::cast(accessor))
.ToHandleChecked();
- accessor_pair->set(component, *function);
+ accessor_pair->set(component, *function, kReleaseStore);
return function;
}
if (accessor->IsNull(isolate)) {
@@ -4703,7 +4706,8 @@ Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- Map maybe_elements_transition = current_map->ElementsTransitionMap(isolate);
+ Map maybe_elements_transition = current_map->ElementsTransitionMap(
+ isolate, ConcurrencyMode::kNotConcurrent);
if (!maybe_elements_transition.is_null()) {
new_map = handle(maybe_elements_transition, isolate);
} else {
@@ -5354,12 +5358,9 @@ Handle<Object> JSPromise::Fulfill(Handle<JSPromise> promise,
}
static void MoveMessageToPromise(Isolate* isolate, Handle<JSPromise> promise) {
- if (isolate->thread_local_top()->pending_message_obj_.IsTheHole(isolate)) {
- return;
- }
+ if (!isolate->has_pending_message()) return;
- Handle<Object> message =
- handle(isolate->thread_local_top()->pending_message_obj_, isolate);
+ Handle<Object> message = handle(isolate->pending_message(), isolate);
Handle<Symbol> key = isolate->factory()->promise_debug_message_symbol();
Object::SetProperty(isolate, promise, key, message, StoreOrigin::kMaybeKeyed,
Just(ShouldThrow::kThrowOnError))
@@ -5905,7 +5906,6 @@ GlobalDictionary::TryFindPropertyCellForConcurrentLookupIterator(
CHECK(element.IsPropertyCell(cage_base));
return PropertyCell::cast(element);
}
- return {};
}
Handle<StringSet> StringSet::New(Isolate* isolate) {
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 9ca08df612..eb31ec957d 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -81,7 +81,7 @@
// - JSSegments // If V8_INTL_SUPPORT enabled.
// - JSSegmentIterator // If V8_INTL_SUPPORT enabled.
// - JSV8BreakIterator // If V8_INTL_SUPPORT enabled.
-// - WasmExceptionObject
+// - WasmTagObject
// - WasmGlobalObject
// - WasmInstanceObject
// - WasmMemoryObject
@@ -592,7 +592,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
// Returns true if the result of iterating over the object is the same
// (including observable effects) as simply accessing the properties between 0
// and length.
- bool IterationHasObservableEffects();
+ V8_EXPORT_PRIVATE bool IterationHasObservableEffects();
// TC39 "Dynamic Code Brand Checks"
bool IsCodeLike(Isolate* isolate) const;
diff --git a/deps/v8/src/objects/ordered-hash-table.tq b/deps/v8/src/objects/ordered-hash-table.tq
index b7fb6b9bc2..82d49b27bc 100644
--- a/deps/v8/src/objects/ordered-hash-table.tq
+++ b/deps/v8/src/objects/ordered-hash-table.tq
@@ -16,6 +16,7 @@ const kSmallOrderedHashTableLoadFactor: constexpr int31
@noVerifier
@abstract
+@doNotGenerateCppClass
extern class SmallOrderedHashTable extends HeapObject
generates 'TNode<HeapObject>' {
}
@@ -23,6 +24,7 @@ extern class SmallOrderedHashTable extends HeapObject
extern macro SmallOrderedHashSetMapConstant(): Map;
const kSmallOrderedHashSetMap: Map = SmallOrderedHashSetMapConstant();
+@doNotGenerateCppClass
extern class SmallOrderedHashSet extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
@@ -61,6 +63,7 @@ struct HashMapEntry {
extern macro SmallOrderedHashMapMapConstant(): Map;
const kSmallOrderedHashMapMap: Map = SmallOrderedHashMapMapConstant();
+@doNotGenerateCppClass
extern class SmallOrderedHashMap extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
@@ -97,6 +100,7 @@ struct NameDictionaryEntry {
property_details: Smi|TheHole;
}
+@doNotGenerateCppClass
extern class SmallOrderedNameDictionary extends SmallOrderedHashTable {
hash: int32;
@if(TAGGED_SIZE_8_BYTES) padding_0: int32;
diff --git a/deps/v8/src/objects/primitive-heap-object.tq b/deps/v8/src/objects/primitive-heap-object.tq
index 244e507233..49ea6a5c89 100644
--- a/deps/v8/src/objects/primitive-heap-object.tq
+++ b/deps/v8/src/objects/primitive-heap-object.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
@abstract
extern class PrimitiveHeapObject extends HeapObject {
}
diff --git a/deps/v8/src/objects/promise.tq b/deps/v8/src/objects/promise.tq
index 90ef565cad..972988acf5 100644
--- a/deps/v8/src/objects/promise.tq
+++ b/deps/v8/src/objects/promise.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class PromiseCapability extends Struct {
promise: JSReceiver|Undefined;
// TODO(joshualitt): Can these be typed more specifically.
@@ -25,7 +24,6 @@ const kPromiseReactionPromiseOrCapabilityOffset: constexpr int31
const kPromiseReactionContinuationPreservedEmbedderDataOffset: constexpr int31
generates 'PromiseReaction::kContinuationPreservedEmbedderDataOffset';
-@generateCppClass
extern class PromiseReaction extends Struct {
next: PromiseReaction|Zero;
reject_handler: Callable|Undefined;
@@ -49,7 +47,6 @@ const kPromiseReactionJobTaskContinuationPreservedEmbedderDataOffset:
;
@abstract
-@generateCppClass
extern class PromiseReactionJobTask extends Microtask {
argument: Object;
context: Context;
@@ -60,15 +57,10 @@ extern class PromiseReactionJobTask extends Microtask {
continuation_preserved_embedder_data: Object|Undefined;
}
-@generateCppClass
-extern class PromiseFulfillReactionJobTask extends PromiseReactionJobTask {
-}
+extern class PromiseFulfillReactionJobTask extends PromiseReactionJobTask {}
-@generateCppClass
-extern class PromiseRejectReactionJobTask extends PromiseReactionJobTask {
-}
+extern class PromiseRejectReactionJobTask extends PromiseReactionJobTask {}
-@generateCppClass
extern class PromiseResolveThenableJobTask extends Microtask {
context: Context;
promise_to_resolve: JSPromise;
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index 5eea562bc1..df5f2a1502 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -18,8 +18,9 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(PropertyArray, HeapObject)
-CAST_ACCESSOR(PropertyArray)
+#include "torque-generated/src/objects/property-array-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(PropertyArray)
SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
RELEASE_ACQUIRE_SMI_ACCESSORS(PropertyArray, length_and_hash,
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 8ee51982c0..52242c87c9 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -14,7 +14,10 @@
namespace v8 {
namespace internal {
-class PropertyArray : public HeapObject {
+#include "torque-generated/src/objects/property-array-tq.inc"
+
+class PropertyArray
+ : public TorqueGeneratedPropertyArray<PropertyArray, HeapObject> {
public:
// [length]: length of the array.
inline int length() const;
@@ -47,13 +50,9 @@ class PropertyArray : public HeapObject {
}
static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
- DECL_CAST(PropertyArray)
DECL_PRINTER(PropertyArray)
DECL_VERIFIER(PropertyArray)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_PROPERTY_ARRAY_FIELDS)
-
// Garbage collection support.
using BodyDescriptor = FlexibleBodyDescriptor<kHeaderSize>;
@@ -70,7 +69,7 @@ class PropertyArray : public HeapObject {
DECL_RELEASE_ACQUIRE_INT_ACCESSORS(length_and_hash)
- OBJECT_CONSTRUCTORS(PropertyArray, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(PropertyArray)
};
} // namespace internal
diff --git a/deps/v8/src/objects/property-cell-inl.h b/deps/v8/src/objects/property-cell-inl.h
index 154dcab41f..dfaaf1c80a 100644
--- a/deps/v8/src/objects/property-cell-inl.h
+++ b/deps/v8/src/objects/property-cell-inl.h
@@ -16,9 +16,9 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(PropertyCell, HeapObject)
+#include "torque-generated/src/objects/property-cell-tq-inl.inc"
-CAST_ACCESSOR(PropertyCell)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PropertyCell)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(PropertyCell, name, Name, kNameOffset)
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index f4fb2391c3..38a83f590d 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -14,7 +14,10 @@
namespace v8 {
namespace internal {
-class PropertyCell : public HeapObject {
+#include "torque-generated/src/objects/property-cell-tq.inc"
+
+class PropertyCell
+ : public TorqueGeneratedPropertyCell<PropertyCell, HeapObject> {
public:
// [name]: the name of the global property.
DECL_GETTER(name, Name)
@@ -65,16 +68,12 @@ class PropertyCell : public HeapObject {
// approximation with false positives.
static bool CheckDataIsCompatible(PropertyDetails details, Object value);
- DECL_CAST(PropertyCell)
DECL_PRINTER(PropertyCell)
DECL_VERIFIER(PropertyCell)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_PROPERTY_CELL_FIELDS)
-
using BodyDescriptor = FixedBodyDescriptor<kNameOffset, kSize, kSize>;
- OBJECT_CONSTRUCTORS(PropertyCell, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(PropertyCell)
private:
friend class Factory;
diff --git a/deps/v8/src/objects/property-descriptor-object.tq b/deps/v8/src/objects/property-descriptor-object.tq
index 726769f29a..3f0acdd689 100644
--- a/deps/v8/src/objects/property-descriptor-object.tq
+++ b/deps/v8/src/objects/property-descriptor-object.tq
@@ -16,7 +16,6 @@ bitfield struct PropertyDescriptorObjectFlags extends uint31 {
has_set: bool: 1 bit;
}
-@generateCppClass
@generatePrint
extern class PropertyDescriptorObject extends Struct {
flags: SmiTagged<PropertyDescriptorObjectFlags>;
diff --git a/deps/v8/src/objects/prototype-info.tq b/deps/v8/src/objects/prototype-info.tq
index 96f65a053e..33248469c3 100644
--- a/deps/v8/src/objects/prototype-info.tq
+++ b/deps/v8/src/objects/prototype-info.tq
@@ -6,7 +6,6 @@ bitfield struct PrototypeInfoFlags extends uint31 {
should_be_fast: bool: 1 bit;
}
-@generateCppClass
extern class PrototypeInfo extends Struct {
// [module_namespace]: A backpointer to JSModuleNamespace from its
// PrototypeInfo (or undefined). This field is only used for JSModuleNamespace
diff --git a/deps/v8/src/objects/regexp-match-info-inl.h b/deps/v8/src/objects/regexp-match-info-inl.h
new file mode 100644
index 0000000000..463bcf9326
--- /dev/null
+++ b/deps/v8/src/objects/regexp-match-info-inl.h
@@ -0,0 +1,68 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_REGEXP_MATCH_INFO_INL_H_
+#define V8_OBJECTS_REGEXP_MATCH_INFO_INL_H_
+
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/regexp-match-info.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/regexp-match-info-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo)
+
+int RegExpMatchInfo::NumberOfCaptureRegisters() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ Object obj = get(kNumberOfCapturesIndex);
+ return Smi::ToInt(obj);
+}
+
+void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kNumberOfCapturesIndex, Smi::FromInt(value));
+}
+
+String RegExpMatchInfo::LastSubject() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ return String::cast(get(kLastSubjectIndex));
+}
+
+void RegExpMatchInfo::SetLastSubject(String value, WriteBarrierMode mode) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kLastSubjectIndex, value, mode);
+}
+
+Object RegExpMatchInfo::LastInput() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ return get(kLastInputIndex);
+}
+
+void RegExpMatchInfo::SetLastInput(Object value, WriteBarrierMode mode) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kLastInputIndex, value, mode);
+}
+
+int RegExpMatchInfo::Capture(int i) {
+ DCHECK_LT(i, NumberOfCaptureRegisters());
+ Object obj = get(kFirstCaptureIndex + i);
+ return Smi::ToInt(obj);
+}
+
+void RegExpMatchInfo::SetCapture(int i, int value) {
+ DCHECK_LT(i, NumberOfCaptureRegisters());
+ set(kFirstCaptureIndex + i, Smi::FromInt(value));
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_REGEXP_MATCH_INFO_INL_H_
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 9799c3282a..0d6f76fccf 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -27,7 +27,8 @@ class String;
// that there are at least two capture indices. The array also contains
// the subject string for the last successful match.
// After creation the result must be treated as a FixedArray in all regards.
-class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
+class RegExpMatchInfo
+ : public TorqueGeneratedRegExpMatchInfo<RegExpMatchInfo, FixedArray> {
public:
// Returns the number of captures, which is defined as the length of the
// matchIndices objects of the last match. matchIndices contains two indices
@@ -57,21 +58,16 @@ class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
static Handle<RegExpMatchInfo> ReserveCaptures(
Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture_count);
- DECL_CAST(RegExpMatchInfo)
-
static const int kNumberOfCapturesIndex = 0;
static const int kLastSubjectIndex = 1;
static const int kLastInputIndex = 2;
static const int kFirstCaptureIndex = 3;
static const int kLastMatchOverhead = kFirstCaptureIndex;
- DEFINE_FIELD_OFFSET_CONSTANTS(FixedArray::kHeaderSize,
- TORQUE_GENERATED_REG_EXP_MATCH_INFO_FIELDS)
-
// Every match info is guaranteed to have enough space to store two captures.
static const int kInitialCaptureIndices = 2;
- OBJECT_CONSTRUCTORS(RegExpMatchInfo, FixedArray);
+ TQ_OBJECT_CONSTRUCTORS(RegExpMatchInfo)
};
} // namespace internal
diff --git a/deps/v8/src/objects/scope-info.tq b/deps/v8/src/objects/scope-info.tq
index ffa8546df6..3f29d9ecf3 100644
--- a/deps/v8/src/objects/scope-info.tq
+++ b/deps/v8/src/objects/scope-info.tq
@@ -97,7 +97,6 @@ struct ModuleVariable {
properties: SmiTagged<VariableProperties>;
}
-@generateCppClass
@generateBodyDescriptor
extern class ScopeInfo extends HeapObject {
const flags: SmiTagged<ScopeFlags>;
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 4dd8bed382..3b7cbcb6d2 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -175,6 +175,16 @@ bool Script::HasValidSource() {
return true;
}
+bool Script::HasSourceURLComment() const {
+ return source_url().IsString() && String::cast(source_url()).length() != 0;
+}
+
+bool Script::IsMaybeUnfinalized(Isolate* isolate) const {
+ // TODO(v8:12051): A more robust detection, e.g. with a dedicated sentinel
+ // value.
+ return source().IsUndefined(isolate) || String::cast(source()).length() == 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 3d2ff73b99..10fe0f834e 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -142,6 +142,14 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
+ // If the script has a non-empty sourceURL comment.
+ inline bool HasSourceURLComment() const;
+
+ // Streaming compilation only attaches the source to the Script upon
+ // finalization. This predicate returns true, if this script may still be
+ // unfinalized.
+ inline bool IsMaybeUnfinalized(Isolate* isolate) const;
+
Object GetNameOrSourceURL();
// Retrieve source position from where eval was called.
diff --git a/deps/v8/src/objects/script.tq b/deps/v8/src/objects/script.tq
index 36a70dede5..8184481b77 100644
--- a/deps/v8/src/objects/script.tq
+++ b/deps/v8/src/objects/script.tq
@@ -14,7 +14,6 @@ bitfield struct ScriptFlags extends uint31 {
break_on_entry: bool: 1 bit;
}
-@generateCppClass
extern class Script extends Struct {
// [source]: the script source.
source: String|Undefined;
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index a01432278b..583ca8dccf 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -94,10 +94,8 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(BaselineData)
-OBJECT_CONSTRUCTORS_IMPL(InterpreterData, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(InterpreterData)
-CAST_ACCESSOR(InterpreterData)
-ACCESSORS(InterpreterData, bytecode_array, BytecodeArray, kBytecodeArrayOffset)
ACCESSORS(InterpreterData, raw_interpreter_trampoline, CodeT,
kInterpreterTrampolineOffset)
@@ -124,13 +122,21 @@ RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject,
RENAME_TORQUE_ACCESSORS(SharedFunctionInfo,
raw_outer_scope_info_or_feedback_metadata,
outer_scope_info_or_feedback_metadata, HeapObject)
+DEF_ACQUIRE_GETTER(SharedFunctionInfo,
+ raw_outer_scope_info_or_feedback_metadata, HeapObject) {
+ HeapObject value =
+ TaggedField<HeapObject, kOuterScopeInfoOrFeedbackMetadataOffset>::
+ Acquire_Load(cage_base, *this);
+ return value;
+}
+
RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo,
internal_formal_parameter_count,
formal_parameter_count)
RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
function_token_offset)
-RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
UINT8_ACCESSORS(SharedFunctionInfo, flags2, kFlags2Offset)
bool SharedFunctionInfo::HasSharedName() const {
@@ -166,13 +172,13 @@ void SharedFunctionInfo::SetName(String name) {
}
bool SharedFunctionInfo::is_script() const {
- return scope_info().is_script_scope() &&
+ return scope_info(kAcquireLoad).is_script_scope() &&
Script::cast(script()).compilation_type() ==
Script::COMPILATION_TYPE_HOST;
}
bool SharedFunctionInfo::needs_script_context() const {
- return is_script() && scope_info().ContextLocalCount() > 0;
+ return is_script() && scope_info(kAcquireLoad).ContextLocalCount() > 0;
}
template <typename IsolateT>
@@ -377,14 +383,18 @@ void SharedFunctionInfo::DontAdaptArguments() {
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
-ScopeInfo SharedFunctionInfo::scope_info() const {
- Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
+ScopeInfo SharedFunctionInfo::scope_info(AcquireLoadTag tag) const {
+ Object maybe_scope_info = name_or_scope_info(tag);
if (maybe_scope_info.IsScopeInfo()) {
return ScopeInfo::cast(maybe_scope_info);
}
return GetReadOnlyRoots().empty_scope_info();
}
+ScopeInfo SharedFunctionInfo::scope_info() const {
+ return scope_info(kAcquireLoad);
+}
+
void SharedFunctionInfo::SetScopeInfo(ScopeInfo scope_info,
WriteBarrierMode mode) {
// Move the existing name onto the ScopeInfo.
@@ -419,8 +429,9 @@ bool SharedFunctionInfo::HasOuterScopeInfo() const {
if (!outer_scope_info().IsScopeInfo()) return false;
outer_info = ScopeInfo::cast(outer_scope_info());
} else {
- if (!scope_info().HasOuterScopeInfo()) return false;
- outer_info = scope_info().OuterScopeInfo();
+ ScopeInfo info = scope_info(kAcquireLoad);
+ if (!info.HasOuterScopeInfo()) return false;
+ outer_info = info.OuterScopeInfo();
}
return !outer_info.IsEmpty();
}
@@ -428,7 +439,7 @@ bool SharedFunctionInfo::HasOuterScopeInfo() const {
ScopeInfo SharedFunctionInfo::GetOuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
if (!is_compiled()) return ScopeInfo::cast(outer_scope_info());
- return scope_info().OuterScopeInfo();
+ return scope_info(kAcquireLoad).OuterScopeInfo();
}
void SharedFunctionInfo::set_outer_scope_info(HeapObject value,
@@ -443,17 +454,21 @@ bool SharedFunctionInfo::HasFeedbackMetadata() const {
return raw_outer_scope_info_or_feedback_metadata().IsFeedbackMetadata();
}
+bool SharedFunctionInfo::HasFeedbackMetadata(AcquireLoadTag tag) const {
+ return raw_outer_scope_info_or_feedback_metadata(tag).IsFeedbackMetadata();
+}
+
FeedbackMetadata SharedFunctionInfo::feedback_metadata() const {
DCHECK(HasFeedbackMetadata());
return FeedbackMetadata::cast(raw_outer_scope_info_or_feedback_metadata());
}
-void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata value,
- WriteBarrierMode mode) {
- DCHECK(!HasFeedbackMetadata());
- DCHECK(value.IsFeedbackMetadata());
- set_raw_outer_scope_info_or_feedback_metadata(value, mode);
-}
+RELEASE_ACQUIRE_ACCESSORS_CHECKED2(SharedFunctionInfo, feedback_metadata,
+ FeedbackMetadata,
+ kOuterScopeInfoOrFeedbackMetadataOffset,
+ HasFeedbackMetadata(kAcquireLoad),
+ !HasFeedbackMetadata(kAcquireLoad) &&
+ value.IsFeedbackMetadata())
bool SharedFunctionInfo::is_compiled() const {
Object data = function_data(kAcquireLoad);
@@ -468,25 +483,40 @@ IsCompiledScope SharedFunctionInfo::is_compiled_scope(IsolateT* isolate) const {
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
Isolate* isolate)
- : retain_bytecode_(shared.HasBytecodeArray()
- ? handle(shared.GetBytecodeArray(isolate), isolate)
- : MaybeHandle<BytecodeArray>()),
- is_compiled_(shared.is_compiled()) {
- DCHECK_IMPLIES(!retain_bytecode_.is_null(), is_compiled());
+ : is_compiled_(shared.is_compiled()) {
+ if (shared.HasBaselineData()) {
+ retain_code_ = handle(shared.baseline_data(), isolate);
+ } else if (shared.HasBytecodeArray()) {
+ retain_code_ = handle(shared.GetBytecodeArray(isolate), isolate);
+ } else {
+ retain_code_ = MaybeHandle<HeapObject>();
+ }
+
+ DCHECK_IMPLIES(!retain_code_.is_null(), is_compiled());
}
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
LocalIsolate* isolate)
- : retain_bytecode_(shared.HasBytecodeArray()
- ? isolate->heap()->NewPersistentHandle(
- shared.GetBytecodeArray(isolate))
- : MaybeHandle<BytecodeArray>()),
- is_compiled_(shared.is_compiled()) {
- DCHECK_IMPLIES(!retain_bytecode_.is_null(), is_compiled());
+ : is_compiled_(shared.is_compiled()) {
+ if (shared.HasBaselineData()) {
+ retain_code_ = isolate->heap()->NewPersistentHandle(shared.baseline_data());
+ } else if (shared.HasBytecodeArray()) {
+ retain_code_ =
+ isolate->heap()->NewPersistentHandle(shared.GetBytecodeArray(isolate));
+ } else {
+ retain_code_ = MaybeHandle<HeapObject>();
+ }
+
+ DCHECK_IMPLIES(!retain_code_.is_null(), is_compiled());
}
bool SharedFunctionInfo::has_simple_parameters() {
- return scope_info().HasSimpleParameters();
+ return scope_info(kAcquireLoad).HasSimpleParameters();
+}
+
+bool SharedFunctionInfo::CanCollectSourcePosition(Isolate* isolate) {
+ return FLAG_enable_lazy_source_positions && HasBytecodeArray() &&
+ !GetBytecodeArray(isolate).HasSourcePositionTable();
}
bool SharedFunctionInfo::IsApiFunction() const {
@@ -575,8 +605,9 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
set_function_data(bytecode, kReleaseStore);
}
-bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
- if (mode == BytecodeFlushMode::kDoNotFlushBytecode) return false;
+bool SharedFunctionInfo::ShouldFlushCode(
+ base::EnumSet<CodeFlushMode> code_flush_mode) {
+ if (IsFlushingDisabled(code_flush_mode)) return false;
// TODO(rmcilroy): Enable bytecode flushing for resumable functions.
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
@@ -587,9 +618,20 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
+ if (data.IsBaselineData()) {
+ // If baseline code flushing isn't enabled and we have baseline data on SFI
+ // we cannot flush baseline / bytecode.
+ if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
+ data =
+ ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
+ } else if (!IsByteCodeFlushingEnabled(code_flush_mode)) {
+ // If bytecode flushing isn't enabled and there is no baseline code there is
+ // nothing to flush.
+ return false;
+ }
if (!data.IsBytecodeArray()) return false;
- if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
+ if (IsStressFlushingEnabled(code_flush_mode)) return true;
BytecodeArray bytecode = BytecodeArray::cast(data);
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index ec92317d11..22e98a140c 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -670,8 +670,7 @@ void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
// static
void SharedFunctionInfo::EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
- if (FLAG_enable_lazy_source_positions && shared_info->HasBytecodeArray() &&
- !shared_info->GetBytecodeArray(isolate).HasSourcePositionTable()) {
+ if (shared_info->CanCollectSourcePosition(isolate)) {
Compiler::CollectSourcePositions(isolate, shared_info);
}
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 1081481d7a..fd19f90165 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -141,22 +141,17 @@ class UncompiledDataWithPreparseData
TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData)
};
-class InterpreterData : public Struct {
+class InterpreterData
+ : public TorqueGeneratedInterpreterData<InterpreterData, Struct> {
public:
- DECL_ACCESSORS(bytecode_array, BytecodeArray)
DECL_ACCESSORS(interpreter_trampoline, Code)
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_INTERPRETER_DATA_FIELDS)
-
- DECL_CAST(InterpreterData)
DECL_PRINTER(InterpreterData)
- DECL_VERIFIER(InterpreterData)
private:
DECL_ACCESSORS(raw_interpreter_trampoline, CodeT)
- OBJECT_CONSTRUCTORS(InterpreterData, Struct);
+ TQ_OBJECT_CONSTRUCTORS(InterpreterData)
};
class BaselineData : public TorqueGeneratedBaselineData<BaselineData, Struct> {
@@ -220,6 +215,8 @@ class SharedFunctionInfo
static const int kNotFound = -1;
+ DECL_ACQUIRE_GETTER(scope_info, ScopeInfo)
+ // Deprecated, use the ACQUIRE version instead.
DECL_GETTER(scope_info, ScopeInfo)
// Set scope_info without moving the existing name onto the ScopeInfo.
@@ -245,6 +242,7 @@ class SharedFunctionInfo
// [outer scope info | feedback metadata] Shared storage for outer scope info
// (on uncompiled functions) and feedback metadata (on compiled functions).
DECL_ACCESSORS(raw_outer_scope_info_or_feedback_metadata, HeapObject)
+ DECL_ACQUIRE_GETTER(raw_outer_scope_info_or_feedback_metadata, HeapObject)
private:
using TorqueGeneratedSharedFunctionInfo::
outer_scope_info_or_feedback_metadata;
@@ -259,7 +257,9 @@ class SharedFunctionInfo
// [feedback metadata] Metadata template for feedback vectors of instances of
// this function.
inline bool HasFeedbackMetadata() const;
- DECL_ACCESSORS(feedback_metadata, FeedbackMetadata)
+ inline bool HasFeedbackMetadata(AcquireLoadTag tag) const;
+ inline FeedbackMetadata feedback_metadata() const;
+ DECL_RELEASE_ACQUIRE_ACCESSORS(feedback_metadata, FeedbackMetadata)
// Returns if this function has been compiled yet. Note: with bytecode
// flushing, any GC after this call is made could cause the function
@@ -534,7 +534,7 @@ class SharedFunctionInfo
// Returns true if the function has old bytecode that could be flushed. This
// function shouldn't access any flags as it is used by concurrent marker.
// Hence it takes the mode as an argument.
- inline bool ShouldFlushBytecode(BytecodeFlushMode mode);
+ inline bool ShouldFlushCode(base::EnumSet<CodeFlushMode> code_flush_mode);
enum Inlineability {
kIsInlineable,
@@ -575,6 +575,7 @@ class SharedFunctionInfo
void SetFunctionTokenPosition(int function_token_position,
int start_position);
+ inline bool CanCollectSourcePosition(Isolate* isolate);
static void EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info);
@@ -696,12 +697,12 @@ class V8_NODISCARD IsCompiledScope {
inline IsCompiledScope(const SharedFunctionInfo shared, Isolate* isolate);
inline IsCompiledScope(const SharedFunctionInfo shared,
LocalIsolate* isolate);
- inline IsCompiledScope() : retain_bytecode_(), is_compiled_(false) {}
+ inline IsCompiledScope() : retain_code_(), is_compiled_(false) {}
inline bool is_compiled() const { return is_compiled_; }
private:
- MaybeHandle<BytecodeArray> retain_bytecode_;
+ MaybeHandle<HeapObject> retain_code_;
bool is_compiled_;
};
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 2a08c51088..0b0930b6b4 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class PreparseData extends HeapObject {
// TODO(v8:8983): Add declaration for variable-sized region.
data_length: int32;
@@ -15,7 +14,6 @@ extern class InterpreterData extends Struct {
@ifnot(V8_EXTERNAL_CODE_SPACE) interpreter_trampoline: Code;
}
-@generateCppClass
@generatePrint
extern class BaselineData extends Struct {
@if(V8_EXTERNAL_CODE_SPACE) baseline_code: CodeDataContainer;
@@ -57,6 +55,10 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 {
@customCppClass
@customMap // Just to place the map at the beginning of the roots array.
class SharedFunctionInfo extends HeapObject {
+ // function_data field is treated as a custom weak pointer. We visit this
+ // field as a weak pointer if there is aged bytecode. If there is no bytecode
+ // or if the bytecode is young then we treat it as a strong pointer. This is
+ // done to support flushing of bytecode.
weak function_data: Object;
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject;
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index 957d36ba1f..cf1773f2d6 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -89,13 +89,14 @@ struct SourceTextModule::AsyncEvaluatingOrdinalCompare {
SharedFunctionInfo SourceTextModule::GetSharedFunctionInfo() const {
DisallowGarbageCollection no_gc;
switch (status()) {
- case kUninstantiated:
- case kPreInstantiating:
+ case kUnlinked:
+ case kPreLinking:
return SharedFunctionInfo::cast(code());
- case kInstantiating:
+ case kLinking:
return JSFunction::cast(code()).shared();
- case kInstantiated:
+ case kLinked:
case kEvaluating:
+ case kEvaluatingAsync:
case kEvaluated:
return JSGeneratorObject::cast(code()).function().shared();
case kErrored:
@@ -390,13 +391,13 @@ bool SourceTextModule::PrepareInstantiate(
entry);
}
- DCHECK_EQ(module->status(), kPreInstantiating);
+ DCHECK_EQ(module->status(), kPreLinking);
return true;
}
bool SourceTextModule::RunInitializationCode(Isolate* isolate,
Handle<SourceTextModule> module) {
- DCHECK_EQ(module->status(), kInstantiating);
+ DCHECK_EQ(module->status(), kLinking);
Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -421,7 +422,7 @@ bool SourceTextModule::RunInitializationCode(Isolate* isolate,
bool SourceTextModule::MaybeTransitionComponent(
Isolate* isolate, Handle<SourceTextModule> module,
ZoneForwardList<Handle<SourceTextModule>>* stack, Status new_status) {
- DCHECK(new_status == kInstantiated || new_status == kEvaluated);
+ DCHECK(new_status == kLinked || new_status == kEvaluated);
SLOW_DCHECK(
// {module} is on the {stack}.
std::count_if(stack->begin(), stack->end(),
@@ -435,8 +436,8 @@ bool SourceTextModule::MaybeTransitionComponent(
ancestor = stack->front();
stack->pop_front();
DCHECK_EQ(ancestor->status(),
- new_status == kInstantiated ? kInstantiating : kEvaluating);
- if (new_status == kInstantiated) {
+ new_status == kLinked ? kLinking : kEvaluating);
+ if (new_status == kLinked) {
if (!SourceTextModule::RunInitializationCode(isolate, ancestor))
return false;
} else if (new_status == kEvaluated) {
@@ -461,7 +462,7 @@ bool SourceTextModule::FinishInstantiate(
Factory::JSFunctionBuilder{isolate, shared, isolate->native_context()}
.Build();
module->set_code(*function);
- module->SetStatus(kInstantiating);
+ module->SetStatus(kLinking);
module->set_dfs_index(*dfs_index);
module->set_dfs_ancestor_index(*dfs_index);
stack->push_front(module);
@@ -478,16 +479,16 @@ bool SourceTextModule::FinishInstantiate(
}
DCHECK_NE(requested_module->status(), kEvaluating);
- DCHECK_GE(requested_module->status(), kInstantiating);
+ DCHECK_GE(requested_module->status(), kLinking);
SLOW_DCHECK(
// {requested_module} is instantiating iff it's on the {stack}.
- (requested_module->status() == kInstantiating) ==
+ (requested_module->status() == kLinking) ==
std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
return *m == *requested_module;
}));
- if (requested_module->status() == kInstantiating) {
- // SyntheticModules go straight to kInstantiated so this must be a
+ if (requested_module->status() == kLinking) {
+ // SyntheticModules go straight to kLinked so this must be a
// SourceTextModule
module->set_dfs_ancestor_index(std::min(
module->dfs_ancestor_index(),
@@ -531,14 +532,14 @@ bool SourceTextModule::FinishInstantiate(
}
}
- return MaybeTransitionComponent(isolate, module, stack, kInstantiated);
+ return MaybeTransitionComponent(isolate, module, stack, kLinked);
}
void SourceTextModule::FetchStarExports(Isolate* isolate,
Handle<SourceTextModule> module,
Zone* zone,
UnorderedModuleSet* visited) {
- DCHECK_GE(module->status(), Module::kInstantiating);
+ DCHECK_GE(module->status(), Module::kLinking);
if (module->module_namespace().IsJSModuleNamespace()) return; // Shortcut.
@@ -729,7 +730,7 @@ MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync(
MaybeHandle<Object> SourceTextModule::Evaluate(
Isolate* isolate, Handle<SourceTextModule> module) {
// Evaluate () Concrete Method continued from EvaluateMaybeAsync.
- CHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+ CHECK(module->status() == kLinked || module->status() == kEvaluated);
// 5. Let stack be a new empty List.
Zone zone(isolate->allocator(), ZONE_NAME);
@@ -767,21 +768,25 @@ MaybeHandle<Object> SourceTextModule::Evaluate(
void SourceTextModule::AsyncModuleExecutionFulfilled(
Isolate* isolate, Handle<SourceTextModule> module) {
- // 1. Assert: module.[[AsyncEvaluating]] is true.
+ // 1. If module.[[Status]] is evaluated, then
+ if (module->status() == kErrored) {
+ // a. Assert: module.[[EvaluationError]] is not empty.
+ DCHECK(!module->exception().IsTheHole(isolate));
+ // b. Return.
+ return;
+ }
+ // 3. Assert: module.[[AsyncEvaluating]] is true.
DCHECK(module->IsAsyncEvaluating());
-
- // 2. Assert: module.[[EvaluationError]] is undefined.
+ // 4. Assert: module.[[EvaluationError]] is empty.
CHECK_EQ(module->status(), kEvaluated);
-
- // 3. Set module.[[AsyncEvaluating]] to false.
+ // 5. Set module.[[AsyncEvaluating]] to false.
isolate->DidFinishModuleAsyncEvaluation(module->async_evaluating_ordinal());
module->set_async_evaluating_ordinal(kAsyncEvaluateDidFinish);
-
- // 4. If module.[[TopLevelCapability]] is not empty, then
+ // TODO(cbruni): update to match spec.
+ // 7. If module.[[TopLevelCapability]] is not empty, then
if (!module->top_level_capability().IsUndefined(isolate)) {
// a. Assert: module.[[CycleRoot]] is equal to module.
DCHECK_EQ(*module->GetCycleRoot(isolate), *module);
-
// i. Perform ! Call(module.[[TopLevelCapability]].[[Resolve]], undefined,
// «undefined»).
Handle<JSPromise> capability(
@@ -790,21 +795,21 @@ void SourceTextModule::AsyncModuleExecutionFulfilled(
.ToHandleChecked();
}
- // 5. Let execList be a new empty List.
+ // 8. Let execList be a new empty List.
Zone zone(isolate->allocator(), ZONE_NAME);
AsyncParentCompletionSet exec_list(&zone);
- // 6. Perform ! GatherAsyncParentCompletions(module, execList).
+ // 9. Perform ! GatherAsyncParentCompletions(module, execList).
GatherAsyncParentCompletions(isolate, &zone, module, &exec_list);
- // 7. Let sortedExecList be a List of elements that are the elements of
+ // 10. Let sortedExecList be a List of elements that are the elements of
// execList, in the order in which they had their [[AsyncEvaluating]]
// fields set to true in InnerModuleEvaluation.
//
// This step is implemented by AsyncParentCompletionSet, which is a set
// ordered on async_evaluating_ordinal.
- // 8. Assert: All elements of sortedExecList have their [[AsyncEvaluating]]
+ // 11. Assert: All elements of sortedExecList have their [[AsyncEvaluating]]
// field set to true, [[PendingAsyncDependencies]] field set to 0 and
// [[EvaluationError]] field set to undefined.
#ifdef DEBUG
@@ -815,7 +820,7 @@ void SourceTextModule::AsyncModuleExecutionFulfilled(
}
#endif
- // 9. For each Module m of sortedExecList, do
+ // 12. For each Module m of sortedExecList, do
for (Handle<SourceTextModule> m : exec_list) {
// i. If m.[[AsyncEvaluating]] is false, then
if (!m->IsAsyncEvaluating()) {
@@ -863,31 +868,37 @@ void SourceTextModule::AsyncModuleExecutionFulfilled(
void SourceTextModule::AsyncModuleExecutionRejected(
Isolate* isolate, Handle<SourceTextModule> module,
Handle<Object> exception) {
- DCHECK(isolate->is_catchable_by_javascript(*exception));
+ // 1. If module.[[Status]] is evaluated, then
+ if (module->status() == kErrored) {
+ // a. Assert: module.[[EvaluationError]] is not empty.
+ DCHECK(!module->exception().IsTheHole(isolate));
+ // b. Return.
+ return;
+ }
+ // TODO(cbruni): update to match spec.
+ DCHECK(isolate->is_catchable_by_javascript(*exception));
// 1. Assert: module.[[Status]] is "evaluated".
CHECK(module->status() == kEvaluated || module->status() == kErrored);
-
// 2. If module.[[AsyncEvaluating]] is false,
if (!module->IsAsyncEvaluating()) {
- // a. Assert: module.[[EvaluationError]] is not undefined.
+ // a. Assert: module.[[EvaluationError]] is not empty.
CHECK_EQ(module->status(), kErrored);
-
// b. Return undefined.
return;
}
- // 4. Set module.[[EvaluationError]] to ThrowCompletion(error).
+ // 5. Set module.[[EvaluationError]] to ThrowCompletion(error).
Module::RecordError(isolate, module, exception);
- // 5. Set module.[[AsyncEvaluating]] to false.
+ // 6. Set module.[[AsyncEvaluating]] to false.
isolate->DidFinishModuleAsyncEvaluation(module->async_evaluating_ordinal());
module->set_async_evaluating_ordinal(kAsyncEvaluateDidFinish);
- // 6. For each Module m of module.[[AsyncParentModules]], do
+ // 7. For each Module m of module.[[AsyncParentModules]], do
for (int i = 0; i < module->AsyncParentModuleCount(); i++) {
Handle<SourceTextModule> m = module->GetAsyncParentModule(isolate, i);
-
+ // TODO(cbruni): update to match spec.
// a. If module.[[DFSIndex]] is not equal to module.[[DFSAncestorIndex]],
// then
if (module->dfs_index() != module->dfs_ancestor_index()) {
@@ -899,19 +910,16 @@ void SourceTextModule::AsyncModuleExecutionRejected(
AsyncModuleExecutionRejected(isolate, m, exception);
}
- // 7. If module.[[TopLevelCapability]] is not undefined, then
+ // 8. If module.[[TopLevelCapability]] is not empty, then
if (!module->top_level_capability().IsUndefined(isolate)) {
// a. Assert: module.[[CycleRoot]] is equal to module.
DCHECK_EQ(*module->GetCycleRoot(isolate), *module);
-
// b. Perform ! Call(module.[[TopLevelCapability]].[[Reject]],
// undefined, «error»).
Handle<JSPromise> capability(
JSPromise::cast(module->top_level_capability()), isolate);
JSPromise::Reject(capability, exception);
}
-
- // 8. Return undefined.
}
void SourceTextModule::ExecuteAsyncModule(Isolate* isolate,
@@ -1040,7 +1048,7 @@ MaybeHandle<Object> SourceTextModule::InnerModuleEvaluation(
}
// 4. Assert: module.[[Status]] is "linked".
- CHECK_EQ(module->status(), kInstantiated);
+ CHECK_EQ(module->status(), kLinked);
// 5. Set module.[[Status]] to "evaluating".
module->SetStatus(kEvaluating);
@@ -1189,7 +1197,7 @@ void SourceTextModule::Reset(Isolate* isolate,
Handle<FixedArray> requested_modules =
factory->NewFixedArray(module->requested_modules().length());
- if (module->status() == kInstantiating) {
+ if (module->status() == kLinking) {
module->set_code(JSFunction::cast(module->code()).shared());
}
module->set_regular_exports(*regular_exports);
diff --git a/deps/v8/src/objects/source-text-module.tq b/deps/v8/src/objects/source-text-module.tq
index a3d565c908..d378d5a862 100644
--- a/deps/v8/src/objects/source-text-module.tq
+++ b/deps/v8/src/objects/source-text-module.tq
@@ -9,7 +9,6 @@ bitfield struct SourceTextModuleFlags extends uint31 {
async_evaluating_ordinal: uint32: 30 bit;
}
-@generateCppClass
extern class SourceTextModule extends Module {
// The code representing this module, or an abstraction thereof.
code: SharedFunctionInfo|JSFunction|JSGeneratorObject;
@@ -48,7 +47,6 @@ extern class SourceTextModule extends Module {
flags: SmiTagged<SourceTextModuleFlags>;
}
-@generateCppClass
@generatePrint
extern class ModuleRequest extends Struct {
specifier: String;
@@ -61,7 +59,6 @@ extern class ModuleRequest extends Struct {
position: Smi;
}
-@generateCppClass
extern class SourceTextModuleInfoEntry extends Struct {
export_name: String|Undefined;
local_name: String|Undefined;
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 08675bc49e..7ccdd6d955 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -68,7 +68,11 @@ int StackFrameInfo::GetLineNumber(Handle<StackFrameInfo> info) {
Handle<Script> script;
if (GetScript(isolate, info).ToHandle(&script)) {
int position = GetSourcePosition(info);
- return Script::GetLineNumber(script, position) + 1;
+ int line_number = Script::GetLineNumber(script, position) + 1;
+ if (script->HasSourceURLComment()) {
+ line_number -= script->line_offset();
+ }
+ return line_number;
}
return Message::kNoLineNumberInfo;
}
@@ -84,7 +88,13 @@ int StackFrameInfo::GetColumnNumber(Handle<StackFrameInfo> info) {
#endif // V8_ENABLE_WEBASSEMBLY
Handle<Script> script;
if (GetScript(isolate, info).ToHandle(&script)) {
- return Script::GetColumnNumber(script, position) + 1;
+ int column_number = Script::GetColumnNumber(script, position) + 1;
+ if (script->HasSourceURLComment()) {
+ if (Script::GetLineNumber(script, position) == script->line_offset()) {
+ column_number -= script->column_offset();
+ }
+ }
+ return column_number;
}
return Message::kNoColumnInfo;
}
@@ -708,12 +718,6 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
}
#if V8_ENABLE_WEBASSEMBLY
-bool IsAnonymousWasmScript(Isolate* isolate, Handle<Object> url) {
- Handle<String> prefix =
- isolate->factory()->NewStringFromStaticChars("wasm://wasm/");
- return StringIndexOf(isolate, Handle<String>::cast(url), prefix) == 0;
-}
-
void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
IncrementalStringBuilder* builder) {
Handle<Object> module_name = StackFrameInfo::GetWasmModuleName(frame);
@@ -733,7 +737,7 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
}
Handle<Object> url(frame->GetScriptNameOrSourceURL(), isolate);
- if (IsNonEmptyString(url) && !IsAnonymousWasmScript(isolate, url)) {
+ if (IsNonEmptyString(url)) {
builder->AppendString(Handle<String>::cast(url));
} else {
builder->AppendCString("<anonymous>");
diff --git a/deps/v8/src/objects/stack-frame-info.tq b/deps/v8/src/objects/stack-frame-info.tq
index 0ecc0dc4ff..5e60628aa5 100644
--- a/deps/v8/src/objects/stack-frame-info.tq
+++ b/deps/v8/src/objects/stack-frame-info.tq
@@ -14,7 +14,6 @@ bitfield struct StackFrameInfoFlags extends uint31 {
is_source_position_computed: bool: 1 bit;
}
-@generateCppClass
extern class StackFrameInfo extends Struct {
receiver_or_instance: JSAny;
function: JSFunction|Smi;
diff --git a/deps/v8/src/objects/string-comparator.cc b/deps/v8/src/objects/string-comparator.cc
index 79ec348c71..3330f772ff 100644
--- a/deps/v8/src/objects/string-comparator.cc
+++ b/deps/v8/src/objects/string-comparator.cc
@@ -9,17 +9,19 @@
namespace v8 {
namespace internal {
-void StringComparator::State::Init(String string) {
- ConsString cons_string = String::VisitFlat(this, string);
+void StringComparator::State::Init(
+ String string, const SharedStringAccessGuardIfNeeded& access_guard) {
+ ConsString cons_string = String::VisitFlat(this, string, 0, access_guard);
iter_.Reset(cons_string);
if (!cons_string.is_null()) {
int offset;
string = iter_.Next(&offset);
- String::VisitFlat(this, string, offset);
+ String::VisitFlat(this, string, offset, access_guard);
}
}
-void StringComparator::State::Advance(int consumed) {
+void StringComparator::State::Advance(
+ int consumed, const SharedStringAccessGuardIfNeeded& access_guard) {
DCHECK(consumed <= length_);
// Still in buffer.
if (length_ != consumed) {
@@ -36,13 +38,15 @@ void StringComparator::State::Advance(int consumed) {
String next = iter_.Next(&offset);
DCHECK_EQ(0, offset);
DCHECK(!next.is_null());
- String::VisitFlat(this, next);
+ String::VisitFlat(this, next, 0, access_guard);
}
-bool StringComparator::Equals(String string_1, String string_2) {
+bool StringComparator::Equals(
+ String string_1, String string_2,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
int length = string_1.length();
- state_1_.Init(string_1);
- state_2_.Init(string_2);
+ state_1_.Init(string_1, access_guard);
+ state_2_.Init(string_2, access_guard);
while (true) {
int to_check = std::min(state_1_.length_, state_2_.length_);
DCHECK(to_check > 0 && to_check <= length);
@@ -65,8 +69,8 @@ bool StringComparator::Equals(String string_1, String string_2) {
length -= to_check;
// Exit condition. Strings are equal.
if (length == 0) return true;
- state_1_.Advance(to_check);
- state_2_.Advance(to_check);
+ state_1_.Advance(to_check, access_guard);
+ state_2_.Advance(to_check, access_guard);
}
}
diff --git a/deps/v8/src/objects/string-comparator.h b/deps/v8/src/objects/string-comparator.h
index 59acd42087..8f3420945a 100644
--- a/deps/v8/src/objects/string-comparator.h
+++ b/deps/v8/src/objects/string-comparator.h
@@ -20,7 +20,8 @@ class StringComparator {
State(const State&) = delete;
State& operator=(const State&) = delete;
- void Init(String string);
+ void Init(String string,
+ const SharedStringAccessGuardIfNeeded& access_guard);
inline void VisitOneByteString(const uint8_t* chars, int length) {
is_one_byte_ = true;
@@ -34,7 +35,8 @@ class StringComparator {
length_ = length;
}
- void Advance(int consumed);
+ void Advance(int consumed,
+ const SharedStringAccessGuardIfNeeded& access_guard);
ConsStringIterator iter_;
bool is_one_byte_;
@@ -57,7 +59,8 @@ class StringComparator {
return CompareCharsEqual(a, b, to_check);
}
- bool Equals(String string_1, String string_2);
+ bool Equals(String string_1, String string_2,
+ const SharedStringAccessGuardIfNeeded& access_guard);
private:
State state_1_;
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index d5e7fda181..ba2d463047 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -109,13 +109,9 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(InternalizedString)
TQ_OBJECT_CONSTRUCTORS_IMPL(ConsString)
TQ_OBJECT_CONSTRUCTORS_IMPL(ThinString)
TQ_OBJECT_CONSTRUCTORS_IMPL(SlicedString)
-OBJECT_CONSTRUCTORS_IMPL(ExternalString, String)
-OBJECT_CONSTRUCTORS_IMPL(ExternalOneByteString, ExternalString)
-OBJECT_CONSTRUCTORS_IMPL(ExternalTwoByteString, ExternalString)
-
-CAST_ACCESSOR(ExternalOneByteString)
-CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalTwoByteString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ExternalString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ExternalOneByteString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ExternalTwoByteString)
StringShape::StringShape(const String str)
: type_(str.map(kAcquireLoad).instance_type()) {
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index de275b8977..cff50bea79 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -510,6 +510,8 @@ template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
template Handle<String> StringTable::LookupKey(Isolate* isolate,
StringTableInsertionKey* key);
+template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
+ StringTableInsertionKey* key);
StringTable::Data* StringTable::EnsureCapacity(PtrComprCageBase cage_base,
int additional_elements) {
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index 7af83a5175..4b18ee3d05 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -10,6 +10,8 @@
#include "src/execution/thread-id.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/local-factory-inl.h"
+#include "src/heap/local-heap-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/numbers/conversions.h"
@@ -93,26 +95,32 @@ void MigrateExternalStringResource(Isolate* isolate, ExternalString from,
}
}
+void MigrateExternalString(Isolate* isolate, String string,
+ String internalized) {
+ if (internalized.IsExternalOneByteString()) {
+ MigrateExternalStringResource(isolate, ExternalString::cast(string),
+ ExternalOneByteString::cast(internalized));
+ } else if (internalized.IsExternalTwoByteString()) {
+ MigrateExternalStringResource(isolate, ExternalString::cast(string),
+ ExternalTwoByteString::cast(internalized));
+ } else {
+ // If the external string is duped into an existing non-external
+ // internalized string, free its resource (it's about to be rewritten
+ // into a ThinString below).
+ isolate->heap()->FinalizeExternalString(string);
+ }
+}
+
} // namespace
-void String::MakeThin(Isolate* isolate, String internalized) {
+template <typename IsolateT>
+void String::MakeThin(IsolateT* isolate, String internalized) {
DisallowGarbageCollection no_gc;
DCHECK_NE(*this, internalized);
DCHECK(internalized.IsInternalizedString());
if (this->IsExternalString()) {
- if (internalized.IsExternalOneByteString()) {
- MigrateExternalStringResource(isolate, ExternalString::cast(*this),
- ExternalOneByteString::cast(internalized));
- } else if (internalized.IsExternalTwoByteString()) {
- MigrateExternalStringResource(isolate, ExternalString::cast(*this),
- ExternalTwoByteString::cast(internalized));
- } else {
- // If the external string is duped into an existing non-external
- // internalized string, free its resource (it's about to be rewritten
- // into a ThinString below).
- isolate->heap()->FinalizeExternalString(*this);
- }
+ MigrateExternalString(isolate->AsIsolate(), *this, internalized);
}
bool has_pointers = StringShape(*this).IsIndirect();
@@ -131,9 +139,8 @@ void String::MakeThin(Isolate* isolate, String internalized) {
Address thin_end = thin.address() + ThinString::kSize;
int size_delta = old_size - ThinString::kSize;
if (size_delta != 0) {
- Heap* heap = isolate->heap();
- if (!heap->IsLargeObject(thin)) {
- heap->CreateFillerObjectAt(
+ if (!Heap::IsLargeObject(thin)) {
+ isolate->heap()->CreateFillerObjectAt(
thin_end, size_delta,
has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
} else {
@@ -144,6 +151,9 @@ void String::MakeThin(Isolate* isolate, String internalized) {
}
}
+template void String::MakeThin(Isolate* isolate, String internalized);
+template void String::MakeThin(LocalIsolate* isolate, String internalized);
+
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Disallow garbage collection to avoid possible GC vs string access deadlock.
DisallowGarbageCollection no_gc;
@@ -633,6 +643,7 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
}
+// static
template <typename sinkchar>
void String::WriteToFlat(String source, sinkchar* sink, int from, int to) {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(source));
@@ -640,6 +651,7 @@ void String::WriteToFlat(String source, sinkchar* sink, int from, int to) {
SharedStringAccessGuardIfNeeded::NotNeeded());
}
+// static
template <typename sinkchar>
void String::WriteToFlat(String source, sinkchar* sink, int from, int to,
const SharedStringAccessGuardIfNeeded& access_guard) {
@@ -794,6 +806,13 @@ template Handle<FixedArray> String::CalculateLineEnds(LocalIsolate* isolate,
bool include_ending_line);
bool String::SlowEquals(String other) const {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(other));
+ return SlowEquals(other, SharedStringAccessGuardIfNeeded::NotNeeded());
+}
+
+bool String::SlowEquals(
+ String other, const SharedStringAccessGuardIfNeeded& access_guard) const {
DisallowGarbageCollection no_gc;
// Fast check: negative check with lengths.
int len = length();
@@ -833,16 +852,18 @@ bool String::SlowEquals(String other) const {
// We know the strings are both non-empty. Compare the first chars
// before we try to flatten the strings.
- if (this->Get(0) != other.Get(0)) return false;
+ if (this->Get(0, access_guard) != other.Get(0, access_guard)) return false;
if (IsSeqOneByteString() && other.IsSeqOneByteString()) {
- const uint8_t* str1 = SeqOneByteString::cast(*this).GetChars(no_gc);
- const uint8_t* str2 = SeqOneByteString::cast(other).GetChars(no_gc);
+ const uint8_t* str1 =
+ SeqOneByteString::cast(*this).GetChars(no_gc, access_guard);
+ const uint8_t* str2 =
+ SeqOneByteString::cast(other).GetChars(no_gc, access_guard);
return CompareCharsEqual(str1, str2, len);
}
StringComparator comparator;
- return comparator.Equals(*this, other);
+ return comparator.Equals(*this, other, access_guard);
}
// static
@@ -1326,7 +1347,8 @@ bool String::HasOneBytePrefix(base::Vector<const char> str) {
namespace {
template <typename Char>
-uint32_t HashString(String string, size_t start, int length, uint64_t seed) {
+uint32_t HashString(String string, size_t start, int length, uint64_t seed,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
DisallowGarbageCollection no_gc;
if (length > String::kMaxHashCalcLength) {
@@ -1340,10 +1362,10 @@ uint32_t HashString(String string, size_t start, int length, uint64_t seed) {
DCHECK_EQ(0, start);
DCHECK(!string.IsFlat());
buffer.reset(new Char[length]);
- String::WriteToFlat(string, buffer.get(), 0, length);
+ String::WriteToFlat(string, buffer.get(), 0, length, access_guard);
chars = buffer.get();
} else {
- chars = string.GetChars<Char>(no_gc) + start;
+ chars = string.GetChars<Char>(no_gc, access_guard) + start;
}
return StringHasher::HashSequentialString<Char>(chars, length, seed);
@@ -1352,6 +1374,11 @@ uint32_t HashString(String string, size_t start, int length, uint64_t seed) {
} // namespace
uint32_t String::ComputeAndSetHash() {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return ComputeAndSetHash(SharedStringAccessGuardIfNeeded::NotNeeded());
+}
+uint32_t String::ComputeAndSetHash(
+ const SharedStringAccessGuardIfNeeded& access_guard) {
DisallowGarbageCollection no_gc;
// Should only be called if hash code has not yet been computed.
DCHECK(!HasHashCode());
@@ -1377,8 +1404,8 @@ uint32_t String::ComputeAndSetHash() {
}
uint32_t raw_hash_field =
string.IsOneByteRepresentation()
- ? HashString<uint8_t>(string, start, length(), seed)
- : HashString<uint16_t>(string, start, length(), seed);
+ ? HashString<uint8_t>(string, start, length(), seed, access_guard)
+ : HashString<uint16_t>(string, start, length(), seed, access_guard);
set_raw_hash_field(raw_hash_field);
// Check the hash code is there.
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 184f4ba4fb..3bb3ba1d6e 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -172,7 +172,8 @@ class String : public TorqueGeneratedString<String, Name> {
friend class IterableSubString;
};
- void MakeThin(Isolate* isolate, String canonical);
+ template <typename IsolateT>
+ void MakeThin(IsolateT* isolate, String canonical);
template <typename Char>
V8_INLINE base::Vector<const Char> GetCharVector(
@@ -570,6 +571,8 @@ class String : public TorqueGeneratedString<String, Name> {
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
V8_EXPORT_PRIVATE bool SlowEquals(String other) const;
+ V8_EXPORT_PRIVATE bool SlowEquals(
+ String other, const SharedStringAccessGuardIfNeeded&) const;
V8_EXPORT_PRIVATE static bool SlowEquals(Isolate* isolate, Handle<String> one,
Handle<String> two);
@@ -580,6 +583,8 @@ class String : public TorqueGeneratedString<String, Name> {
// Compute and set the hash code.
V8_EXPORT_PRIVATE uint32_t ComputeAndSetHash();
+ V8_EXPORT_PRIVATE uint32_t
+ ComputeAndSetHash(const SharedStringAccessGuardIfNeeded&);
TQ_OBJECT_CONSTRUCTORS(String)
};
@@ -820,14 +825,11 @@ class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
//
// The API expects that all ExternalStrings are created through the
// API. Therefore, ExternalStrings should not be used internally.
-class ExternalString : public String {
+class ExternalString
+ : public TorqueGeneratedExternalString<ExternalString, String> {
public:
- DECL_CAST(ExternalString)
DECL_VERIFIER(ExternalString)
- DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
- TORQUE_GENERATED_EXTERNAL_STRING_FIELDS)
-
// Size of uncached external strings.
static const int kUncachedSize =
kResourceOffset + FIELD_SIZE(kResourceOffset);
@@ -851,12 +853,19 @@ class ExternalString : public String {
STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
static const int kSizeOfAllExternalStrings = kHeaderSize;
- OBJECT_CONSTRUCTORS(ExternalString, String);
+ private:
+ // Hide generated accessors.
+ DECL_ACCESSORS(resource, void*)
+ DECL_ACCESSORS(resource_data, void*)
+
+ TQ_OBJECT_CONSTRUCTORS(ExternalString)
};
// The ExternalOneByteString class is an external string backed by an
// one-byte string.
-class ExternalOneByteString : public ExternalString {
+class ExternalOneByteString
+ : public TorqueGeneratedExternalOneByteString<ExternalOneByteString,
+ ExternalString> {
public:
static const bool kHasOneByteEncoding = true;
@@ -884,17 +893,11 @@ class ExternalOneByteString : public ExternalString {
inline uint8_t Get(int index,
const SharedStringAccessGuardIfNeeded& access_guard) const;
- DECL_CAST(ExternalOneByteString)
-
class BodyDescriptor;
- DEFINE_FIELD_OFFSET_CONSTANTS(
- ExternalString::kHeaderSize,
- TORQUE_GENERATED_EXTERNAL_ONE_BYTE_STRING_FIELDS)
-
STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
- OBJECT_CONSTRUCTORS(ExternalOneByteString, ExternalString);
+ TQ_OBJECT_CONSTRUCTORS(ExternalOneByteString)
private:
// The underlying resource as a non-const pointer.
@@ -903,7 +906,9 @@ class ExternalOneByteString : public ExternalString {
// The ExternalTwoByteString class is an external string backed by a UTF-16
// encoded string.
-class ExternalTwoByteString : public ExternalString {
+class ExternalTwoByteString
+ : public TorqueGeneratedExternalTwoByteString<ExternalTwoByteString,
+ ExternalString> {
public:
static const bool kHasOneByteEncoding = false;
@@ -934,17 +939,11 @@ class ExternalTwoByteString : public ExternalString {
// For regexp code.
inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
- DECL_CAST(ExternalTwoByteString)
-
class BodyDescriptor;
- DEFINE_FIELD_OFFSET_CONSTANTS(
- ExternalString::kHeaderSize,
- TORQUE_GENERATED_EXTERNAL_TWO_BYTE_STRING_FIELDS)
-
STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
- OBJECT_CONSTRUCTORS(ExternalTwoByteString, ExternalString);
+ TQ_OBJECT_CONSTRUCTORS(ExternalTwoByteString)
private:
// The underlying resource as a non-const pointer.
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index ad845760ae..9ab35d1e00 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -5,7 +5,6 @@
#include 'src/builtins/builtins-string-gen.h'
@abstract
-@generateCppClass
@reserveBitsInInstanceType(6)
extern class String extends Name {
macro StringInstanceType(): StringInstanceType {
@@ -31,7 +30,6 @@ bitfield struct StringInstanceType extends uint16 {
is_not_internalized: bool: 1 bit;
}
-@generateCppClass
@generateBodyDescriptor
@doNotGenerateCast
extern class ConsString extends String {
@@ -92,30 +90,25 @@ extern class ExternalTwoByteString extends ExternalString {
}
}
-@generateCppClass
@doNotGenerateCast
extern class InternalizedString extends String {
}
@abstract
-@generateCppClass
@doNotGenerateCast
extern class SeqString extends String {
}
-@generateCppClass
@generateBodyDescriptor
@doNotGenerateCast
extern class SeqOneByteString extends SeqString {
const chars[length]: char8;
}
-@generateCppClass
@generateBodyDescriptor
@doNotGenerateCast
extern class SeqTwoByteString extends SeqString {
const chars[length]: char16;
}
-@generateCppClass
@generateBodyDescriptor
@doNotGenerateCast
extern class SlicedString extends String {
@@ -123,7 +116,6 @@ extern class SlicedString extends String {
offset: Smi;
}
-@generateCppClass
@generateBodyDescriptor
@doNotGenerateCast
extern class ThinString extends String {
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index 3601f66dcc..e0e5e1cd5a 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -40,13 +40,17 @@ void AccessorPair::set(AccessorComponent component, Object value) {
}
}
-DEF_GETTER(AccessorPair, getter, Object) {
- return TorqueGeneratedClass::getter(cage_base);
+void AccessorPair::set(AccessorComponent component, Object value,
+ ReleaseStoreTag tag) {
+ if (component == ACCESSOR_GETTER) {
+ set_getter(value, tag);
+ } else {
+ set_setter(value, tag);
+ }
}
-DEF_RELAXED_GETTER(AccessorPair, getter, Object) {
- return TaggedField<Object, kGetterOffset>::Relaxed_Load(cage_base, *this);
-}
+RELEASE_ACQUIRE_ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
+RELEASE_ACQUIRE_ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
void AccessorPair::SetComponents(Object getter, Object setter) {
if (!getter.IsNull()) set_getter(getter);
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index 842260751f..2cc51c8544 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -47,9 +47,16 @@ class AccessorPair : public TorqueGeneratedAccessorPair<AccessorPair, Struct> {
inline Object get(AccessorComponent component);
inline void set(AccessorComponent component, Object value);
+ inline void set(AccessorComponent component, Object value,
+ ReleaseStoreTag tag);
- DECL_GETTER(getter, Object)
- DECL_RELAXED_GETTER(getter, Object)
+ using TorqueGeneratedAccessorPair::getter;
+ using TorqueGeneratedAccessorPair::set_getter;
+ DECL_RELEASE_ACQUIRE_ACCESSORS(getter, Object)
+
+ using TorqueGeneratedAccessorPair::set_setter;
+ using TorqueGeneratedAccessorPair::setter;
+ DECL_RELEASE_ACQUIRE_ACCESSORS(setter, Object)
// Note: Returns undefined if the component is not set.
static Handle<Object> GetComponent(Isolate* isolate,
diff --git a/deps/v8/src/objects/struct.tq b/deps/v8/src/objects/struct.tq
index ffd83e33a5..ec9782bab0 100644
--- a/deps/v8/src/objects/struct.tq
+++ b/deps/v8/src/objects/struct.tq
@@ -4,24 +4,20 @@
@abstract
@generatePrint
-@generateCppClass
extern class Struct extends HeapObject {
}
@generatePrint
-@generateCppClass
extern class Tuple2 extends Struct {
value1: Object;
value2: Object;
}
-@generateCppClass
extern class ClassPositions extends Struct {
start: Smi;
end: Smi;
}
-@generateCppClass
extern class AccessorPair extends Struct {
getter: Object;
setter: Object;
diff --git a/deps/v8/src/objects/swiss-name-dictionary.cc b/deps/v8/src/objects/swiss-name-dictionary.cc
index 9cf3421ca4..f464d341f9 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.cc
+++ b/deps/v8/src/objects/swiss-name-dictionary.cc
@@ -207,7 +207,8 @@ Handle<SwissNameDictionary> SwissNameDictionary::Shrink(
// storing it somewhere in the main table or the meta table, for those
// SwissNameDictionaries that we know will be in-place rehashed, most notably
// those stored in the snapshot.
-void SwissNameDictionary::Rehash(Isolate* isolate) {
+template <typename IsolateT>
+void SwissNameDictionary::Rehash(IsolateT* isolate) {
DisallowHeapAllocation no_gc;
struct Entry {
@@ -307,6 +308,10 @@ template V8_EXPORT_PRIVATE Handle<SwissNameDictionary>
SwissNameDictionary::Rehash(Isolate* isolate, Handle<SwissNameDictionary> table,
int new_capacity);
+template V8_EXPORT_PRIVATE void SwissNameDictionary::Rehash(
+ LocalIsolate* isolate);
+template V8_EXPORT_PRIVATE void SwissNameDictionary::Rehash(Isolate* isolate);
+
constexpr int SwissNameDictionary::kInitialCapacity;
constexpr int SwissNameDictionary::kGroupWidth;
diff --git a/deps/v8/src/objects/swiss-name-dictionary.h b/deps/v8/src/objects/swiss-name-dictionary.h
index 620e129227..42613e619c 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.h
+++ b/deps/v8/src/objects/swiss-name-dictionary.h
@@ -133,7 +133,8 @@ class V8_EXPORT_PRIVATE SwissNameDictionary : public HeapObject {
static Handle<SwissNameDictionary> Rehash(IsolateT* isolate,
Handle<SwissNameDictionary> table,
int new_capacity);
- void Rehash(Isolate* isolate);
+ template <typename IsolateT>
+ void Rehash(IsolateT* isolate);
inline void SetHash(int hash);
inline int Hash();
diff --git a/deps/v8/src/objects/swiss-name-dictionary.tq b/deps/v8/src/objects/swiss-name-dictionary.tq
index ff648a9a88..803014448e 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.tq
+++ b/deps/v8/src/objects/swiss-name-dictionary.tq
@@ -5,6 +5,7 @@
#include 'src/objects/swiss-name-dictionary.h'
@noVerifier
+@doNotGenerateCppClass
extern class SwissNameDictionary extends HeapObject {
hash: uint32;
const capacity: int32;
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
index 451dcc5160..0322ca9b8a 100644
--- a/deps/v8/src/objects/synthetic-module.cc
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -92,7 +92,7 @@ bool SyntheticModule::PrepareInstantiate(Isolate* isolate,
// just update status.
bool SyntheticModule::FinishInstantiate(Isolate* isolate,
Handle<SyntheticModule> module) {
- module->SetStatus(kInstantiated);
+ module->SetStatus(kLinked);
return true;
}
diff --git a/deps/v8/src/objects/synthetic-module.tq b/deps/v8/src/objects/synthetic-module.tq
index 69f66ebb4b..263dc00942 100644
--- a/deps/v8/src/objects/synthetic-module.tq
+++ b/deps/v8/src/objects/synthetic-module.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
extern class SyntheticModule extends Module {
name: String;
export_names: FixedArray;
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 7faf9e9ac9..d9fc0bb102 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
diff --git a/deps/v8/src/objects/template-objects.tq b/deps/v8/src/objects/template-objects.tq
index 38da71d616..2aa657977f 100644
--- a/deps/v8/src/objects/template-objects.tq
+++ b/deps/v8/src/objects/template-objects.tq
@@ -3,7 +3,6 @@
// found in the LICENSE file.
@generatePrint
-@generateCppClass
extern class CachedTemplateObject extends Struct {
slot_id: Smi;
template_object: JSArray;
@@ -11,7 +10,6 @@ extern class CachedTemplateObject extends Struct {
}
@generatePrint
-@generateCppClass
extern class TemplateObjectDescription extends Struct {
raw_strings: FixedArray;
cooked_strings: FixedArray;
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index 87788b4ace..bb0d6a8dc6 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -38,10 +38,12 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
AcceptAnyReceiverBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, published, PublishedBit::kShift)
-BIT_FIELD_ACCESSORS(FunctionTemplateInfo, flag, allowed_receiver_range_start,
- FunctionTemplateInfo::AllowedReceiverRangeStartBits)
-BIT_FIELD_ACCESSORS(FunctionTemplateInfo, flag, allowed_receiver_range_end,
- FunctionTemplateInfo::AllowedReceiverRangeEndBits)
+BIT_FIELD_ACCESSORS(
+ FunctionTemplateInfo, flag, allowed_receiver_instance_type_range_start,
+ FunctionTemplateInfo::AllowedReceiverInstanceTypeRangeStartBits)
+BIT_FIELD_ACCESSORS(
+ FunctionTemplateInfo, flag, allowed_receiver_instance_type_range_end,
+ FunctionTemplateInfo::AllowedReceiverInstanceTypeRangeEndBits)
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
@@ -85,10 +87,23 @@ RARE_ACCESSORS(c_function_overloads, CFunctionOverloads, FixedArray,
GetReadOnlyRoots(cage_base).empty_fixed_array())
#undef RARE_ACCESSORS
-int FunctionTemplateInfo::InstanceType() const { return instance_type(); }
+int FunctionTemplateInfo::InstanceType() const {
+ int type = instance_type();
+ DCHECK(type == kNoJSApiObjectType ||
+ (type >= Internals::kFirstJSApiObjectType &&
+ type <= Internals::kLastJSApiObjectType));
+ return type;
+}
void FunctionTemplateInfo::SetInstanceType(int instance_type) {
- set_instance_type(instance_type);
+ if (instance_type == 0) {
+ set_instance_type(kNoJSApiObjectType);
+ } else {
+ DCHECK_GT(instance_type, 0);
+ DCHECK_LT(Internals::kFirstJSApiObjectType + instance_type,
+ Internals::kLastJSApiObjectType);
+ set_instance_type(Internals::kFirstJSApiObjectType + instance_type);
+ }
}
bool TemplateInfo::should_cache() const {
diff --git a/deps/v8/src/objects/templates.cc b/deps/v8/src/objects/templates.cc
index e5a8beff71..91306861e6 100644
--- a/deps/v8/src/objects/templates.cc
+++ b/deps/v8/src/objects/templates.cc
@@ -18,6 +18,10 @@
namespace v8 {
namespace internal {
+bool FunctionTemplateInfo::HasInstanceType() {
+ return instance_type() != kNoJSApiObjectType;
+}
+
Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
Isolate* isolate, Handle<FunctionTemplateInfo> info,
MaybeHandle<Name> maybe_name) {
@@ -61,6 +65,17 @@ bool FunctionTemplateInfo::IsTemplateFor(Map map) const {
// There is a constraint on the object; check.
if (!map.IsJSObjectMap()) return false;
+
+ if (FLAG_embedder_instance_types) {
+ DCHECK_IMPLIES(allowed_receiver_instance_type_range_start() == 0,
+ allowed_receiver_instance_type_range_end() == 0);
+ if (base::IsInRange(map.instance_type(),
+ allowed_receiver_instance_type_range_start(),
+ allowed_receiver_instance_type_range_end())) {
+ return true;
+ }
+ }
+
// Fetch the constructor function of the object.
Object cons_obj = map.GetConstructor();
Object type;
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index c82c8dde8a..0b6de3d832 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -123,8 +123,10 @@ class FunctionTemplateInfo
// safely read concurrently.
DECL_BOOLEAN_ACCESSORS(published)
- DECL_INT_ACCESSORS(allowed_receiver_range_start)
- DECL_INT_ACCESSORS(allowed_receiver_range_end)
+ // This specifies the permissable range of instance type of objects that can
+ // be allowed to be used as receivers with the given template.
+ DECL_INT16_ACCESSORS(allowed_receiver_instance_type_range_start)
+ DECL_INT16_ACCESSORS(allowed_receiver_instance_type_range_end)
// End flag bits ---------------------
// Dispatched behavior.
@@ -157,6 +159,7 @@ class FunctionTemplateInfo
inline bool instantiated();
bool BreakAtEntry();
+ bool HasInstanceType();
// Helper function for cached accessors.
static base::Optional<Name> TryGetCachedPropertyName(Isolate* isolate,
@@ -174,6 +177,7 @@ class FunctionTemplateInfo
DEFINE_TORQUE_GENERATED_FUNCTION_TEMPLATE_INFO_FLAGS()
private:
+ static constexpr int kNoJSApiObjectType = 0;
static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
diff --git a/deps/v8/src/objects/templates.tq b/deps/v8/src/objects/templates.tq
index 1215cfd0c1..9406f62d7a 100644
--- a/deps/v8/src/objects/templates.tq
+++ b/deps/v8/src/objects/templates.tq
@@ -3,7 +3,6 @@
// found in the LICENSE file.
@abstract
-@generateCppClass
extern class TemplateInfo extends Struct {
tag: Smi;
serial_number: Smi;
@@ -12,7 +11,6 @@ extern class TemplateInfo extends Struct {
property_accessors: TemplateList|Undefined;
}
-@generateCppClass
@generatePrint
extern class FunctionTemplateRareData extends Struct {
// See DECL_RARE_ACCESSORS in FunctionTemplateInfo.
@@ -37,11 +35,10 @@ bitfield struct FunctionTemplateInfoFlags extends uint31 {
// Allowed receiver ranges are used for instance type checking to check
// whether the receiver calling the associated JSFunction is a compatible
// receiver.
- allowed_receiver_range_start: int32: 12 bit;
- allowed_receiver_range_end: int32: 12 bit;
+ allowed_receiver_instance_type_range_start: int16: 12 bit;
+ allowed_receiver_instance_type_range_end: int16: 12 bit;
}
-@generateCppClass
extern class FunctionTemplateInfo extends TemplateInfo {
// Handler invoked when calling an instance of this FunctionTemplateInfo.
// Either CallHandlerInfo or Undefined.
@@ -78,7 +75,6 @@ bitfield struct ObjectTemplateInfoFlags extends uint31 {
embedder_field_count: int32: 28 bit;
}
-@generateCppClass
extern class ObjectTemplateInfo extends TemplateInfo {
constructor: FunctionTemplateInfo|Undefined;
data: SmiTagged<ObjectTemplateInfoFlags>;
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index 8b27c49f6c..e842e5ae66 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -180,19 +180,22 @@ Map TransitionArray::SearchAndGetTargetForTesting(
return SearchAndGetTarget(kind, name, attributes);
}
-int TransitionArray::SearchSpecial(Symbol symbol, int* out_insertion_index) {
- return SearchName(symbol, out_insertion_index);
+int TransitionArray::SearchSpecial(Symbol symbol, bool concurrent_search,
+ int* out_insertion_index) {
+ return SearchName(symbol, concurrent_search, out_insertion_index);
}
-int TransitionArray::SearchName(Name name, int* out_insertion_index) {
+int TransitionArray::SearchName(Name name, bool concurrent_search,
+ int* out_insertion_index) {
DCHECK(name.IsUniqueName());
return internal::Search<ALL_ENTRIES>(this, name, number_of_entries(),
- out_insertion_index);
+ out_insertion_index, concurrent_search);
}
TransitionsAccessor::TransitionsAccessor(Isolate* isolate, Map map,
- DisallowGarbageCollection* no_gc)
- : isolate_(isolate), map_(map), concurrent_access_(false) {
+ DisallowGarbageCollection* no_gc,
+ bool concurrent_access)
+ : isolate_(isolate), map_(map), concurrent_access_(concurrent_access) {
Initialize();
USE(no_gc);
}
@@ -214,26 +217,32 @@ void TransitionsAccessor::Reload() {
int TransitionsAccessor::Capacity() { return transitions().Capacity(); }
-void TransitionsAccessor::Initialize() {
- raw_transitions_ = map_.raw_transitions(isolate_, kAcquireLoad);
+// static
+TransitionsAccessor::Encoding TransitionsAccessor::GetEncoding(
+ Isolate* isolate, MaybeObject raw_transitions) {
HeapObject heap_object;
- if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
- encoding_ = kUninitialized;
- } else if (raw_transitions_->IsWeak()) {
- encoding_ = kWeakRef;
- } else if (raw_transitions_->GetHeapObjectIfStrong(isolate_, &heap_object)) {
+ if (raw_transitions->IsSmi() || raw_transitions->IsCleared()) {
+ return kUninitialized;
+ } else if (raw_transitions->IsWeak()) {
+ return kWeakRef;
+ } else if (raw_transitions->GetHeapObjectIfStrong(isolate, &heap_object)) {
if (heap_object.IsTransitionArray()) {
- encoding_ = kFullTransitionArray;
+ return kFullTransitionArray;
} else if (heap_object.IsPrototypeInfo()) {
- encoding_ = kPrototypeInfo;
+ return kPrototypeInfo;
} else {
- DCHECK(map_.is_deprecated());
DCHECK(heap_object.IsMap());
- encoding_ = kMigrationTarget;
+ return kMigrationTarget;
}
} else {
UNREACHABLE();
}
+}
+
+void TransitionsAccessor::Initialize() {
+ raw_transitions_ = map_.raw_transitions(isolate_, kAcquireLoad);
+ encoding_ = GetEncoding(isolate_, raw_transitions_);
+ DCHECK_IMPLIES(encoding_ == kMigrationTarget, map_.is_deprecated());
#if DEBUG
needs_reload_ = false;
#endif
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index f2319a9c6f..2bc8cf8697 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -4,6 +4,7 @@
#include "src/objects/transitions.h"
+#include "src/base/small-vector.h"
#include "src/objects/objects-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/utils/utils.h"
@@ -94,7 +95,8 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
int insertion_index;
int index;
if (flag == SPECIAL_TRANSITION) {
- index = result->SearchSpecial(Symbol::cast(*name), &insertion_index);
+ index =
+ result->SearchSpecial(Symbol::cast(*name), false, &insertion_index);
} else {
PropertyDetails details = GetTargetDetails(*name, *target);
index = result->Search(details.kind(), *name, details.attributes(),
@@ -138,10 +140,11 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
TransitionArray array = transitions();
number_of_transitions = array.number_of_transitions();
- int index = is_special_transition
- ? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array.Search(details.kind(), *name, details.attributes(),
- &insertion_index);
+ int index =
+ is_special_transition
+ ? array.SearchSpecial(Symbol::cast(*name), false, &insertion_index)
+ : array.Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
// If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
@@ -185,10 +188,11 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
if (array.number_of_transitions() != number_of_transitions) {
DCHECK_LT(array.number_of_transitions(), number_of_transitions);
- int index = is_special_transition
- ? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array.Search(details.kind(), *name, details.attributes(),
- &insertion_index);
+ int index =
+ is_special_transition
+ ? array.SearchSpecial(Symbol::cast(*name), false, &insertion_index)
+ : array.Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
CHECK_EQ(index, kNotFound);
USE(index);
DCHECK_GE(insertion_index, 0);
@@ -240,7 +244,9 @@ Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
Map TransitionsAccessor::SearchSpecial(Symbol name) {
if (encoding() != kFullTransitionArray) return Map();
- int transition = transitions().SearchSpecial(name);
+ base::SharedMutexGuardIf<base::kShared> scope(
+ isolate_->full_transition_array_access(), concurrent_access_);
+ int transition = transitions().SearchSpecial(name, concurrent_access_);
if (transition == kNotFound) return Map();
return transitions().GetTarget(transition);
}
@@ -382,6 +388,9 @@ void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
int capacity = cache->length() - header;
int transitions = TransitionArray::NumberOfPrototypeTransitions(*cache) + 1;
+ base::SharedMutexGuard<base::kExclusive> scope(
+ isolate_->full_transition_array_access());
+
if (transitions > capacity) {
// Grow the array if compacting it doesn't free space.
if (!TransitionArray::CompactPrototypeTransitionArray(isolate_, *cache)) {
@@ -509,43 +518,59 @@ void TransitionsAccessor::EnsureHasFullTransitionArray() {
}
void TransitionsAccessor::TraverseTransitionTreeInternal(
- TraverseCallback callback, DisallowGarbageCollection* no_gc) {
- switch (encoding()) {
- case kPrototypeInfo:
- case kUninitialized:
- case kMigrationTarget:
- break;
- case kWeakRef: {
- Map simple_target =
- Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
- TransitionsAccessor(isolate_, simple_target, no_gc)
- .TraverseTransitionTreeInternal(callback, no_gc);
- break;
- }
- case kFullTransitionArray: {
- if (transitions().HasPrototypeTransitions()) {
- WeakFixedArray proto_trans = transitions().GetPrototypeTransitions();
- int length = TransitionArray::NumberOfPrototypeTransitions(proto_trans);
- for (int i = 0; i < length; ++i) {
- int index = TransitionArray::kProtoTransitionHeaderSize + i;
- MaybeObject target = proto_trans.Get(index);
- HeapObject heap_object;
- if (target->GetHeapObjectIfWeak(&heap_object)) {
- TransitionsAccessor(isolate_, Map::cast(heap_object), no_gc)
- .TraverseTransitionTreeInternal(callback, no_gc);
- } else {
- DCHECK(target->IsCleared());
+ const TraverseCallback& callback, DisallowGarbageCollection* no_gc) {
+ // Mostly arbitrary but more than enough to run the test suite in static
+ // memory.
+ static constexpr int kStaticStackSize = 16;
+ base::SmallVector<Map, kStaticStackSize> stack;
+ stack.emplace_back(map_);
+
+ // Pre-order iterative depth-first-search.
+ while (!stack.empty()) {
+ Map current_map = stack.back();
+ stack.pop_back();
+
+ callback(current_map);
+
+ MaybeObject raw_transitions =
+ current_map.raw_transitions(isolate_, kAcquireLoad);
+ Encoding encoding = GetEncoding(isolate_, raw_transitions);
+
+ switch (encoding) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ break;
+ case kWeakRef: {
+ stack.emplace_back(
+ Map::cast(raw_transitions->GetHeapObjectAssumeWeak()));
+ break;
+ }
+ case kFullTransitionArray: {
+ TransitionArray transitions =
+ TransitionArray::cast(raw_transitions->GetHeapObjectAssumeStrong());
+ if (transitions.HasPrototypeTransitions()) {
+ WeakFixedArray proto_trans = transitions.GetPrototypeTransitions();
+ int length =
+ TransitionArray::NumberOfPrototypeTransitions(proto_trans);
+ for (int i = 0; i < length; ++i) {
+ int index = TransitionArray::kProtoTransitionHeaderSize + i;
+ MaybeObject target = proto_trans.Get(index);
+ HeapObject heap_object;
+ if (target->GetHeapObjectIfWeak(&heap_object)) {
+ stack.emplace_back(Map::cast(heap_object));
+ } else {
+ DCHECK(target->IsCleared());
+ }
}
}
+ for (int i = 0; i < transitions.number_of_transitions(); ++i) {
+ stack.emplace_back(transitions.GetTarget(i));
+ }
+ break;
}
- for (int i = 0; i < transitions().number_of_transitions(); ++i) {
- TransitionsAccessor(isolate_, transitions().GetTarget(i), no_gc)
- .TraverseTransitionTreeInternal(callback, no_gc);
- }
- break;
}
}
- callback(map_);
}
#ifdef DEBUG
@@ -626,14 +651,14 @@ Map TransitionArray::SearchDetailsAndGetTarget(int transition,
int TransitionArray::Search(PropertyKind kind, Name name,
PropertyAttributes attributes,
int* out_insertion_index) {
- int transition = SearchName(name, out_insertion_index);
+ int transition = SearchName(name, false, out_insertion_index);
if (transition == kNotFound) return kNotFound;
return SearchDetails(transition, kind, attributes, out_insertion_index);
}
Map TransitionArray::SearchAndGetTarget(PropertyKind kind, Name name,
PropertyAttributes attributes) {
- int transition = SearchName(name, nullptr);
+ int transition = SearchName(name);
if (transition == kNotFound) {
return Map();
}
@@ -642,7 +667,7 @@ Map TransitionArray::SearchAndGetTarget(PropertyKind kind, Name name,
void TransitionArray::ForEachTransitionTo(
Name name, const ForEachTransitionCallback& callback) {
- int transition = SearchName(name, nullptr);
+ int transition = SearchName(name);
if (transition == kNotFound) return;
int nof_transitions = number_of_transitions();
diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h
index 7abd0a1b51..c59c48c757 100644
--- a/deps/v8/src/objects/transitions.h
+++ b/deps/v8/src/objects/transitions.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_TRANSITIONS_H_
#include "src/common/checks.h"
+#include "src/execution/isolate.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/elements-kind.h"
#include "src/objects/map.h"
@@ -45,12 +46,12 @@ using ForEachTransitionCallback = std::function<void(Map)>;
// cleared when the map they refer to is not otherwise reachable.
class V8_EXPORT_PRIVATE TransitionsAccessor {
public:
- // For concurrent access, use the other constructor.
- inline TransitionsAccessor(Isolate* isolate, Map map,
- DisallowGarbageCollection* no_gc);
// {concurrent_access} signals that the TransitionsAccessor will only be used
// in background threads. It acquires a reader lock for critical paths, as
// well as blocking the accessor from modifying the TransitionsArray.
+ inline TransitionsAccessor(Isolate* isolate, Map map,
+ DisallowGarbageCollection* no_gc,
+ bool concurrent_access = false);
inline TransitionsAccessor(Isolate* isolate, Handle<Map> map,
bool concurrent_access = false);
// Insert a new transition into |map|'s transition array, extending it
@@ -106,10 +107,12 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
// ===== ITERATION =====
using TraverseCallback = std::function<void(Map)>;
- // Traverse the transition tree in postorder.
- void TraverseTransitionTree(TraverseCallback callback) {
+ // Traverse the transition tree in preorder.
+ void TraverseTransitionTree(const TraverseCallback& callback) {
// Make sure that we do not allocate in the callback.
DisallowGarbageCollection no_gc;
+ base::SharedMutexGuardIf<base::kShared> scope(
+ isolate_->full_transition_array_access(), concurrent_access_);
TraverseTransitionTreeInternal(callback, &no_gc);
}
@@ -172,6 +175,9 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
friend class third_party_heap::Impl;
friend class TransitionArray;
+ static inline Encoding GetEncoding(Isolate* isolate,
+ MaybeObject raw_transitions);
+
inline PropertyDetails GetSimpleTargetDetails(Map transition);
static inline Name GetSimpleTransitionKey(Map transition);
@@ -197,7 +203,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
void SetPrototypeTransitions(Handle<WeakFixedArray> proto_transitions);
WeakFixedArray GetPrototypeTransitions();
- void TraverseTransitionTreeInternal(TraverseCallback callback,
+ void TraverseTransitionTreeInternal(const TraverseCallback& callback,
DisallowGarbageCollection* no_gc);
Isolate* isolate_;
@@ -329,9 +335,11 @@ class TransitionArray : public WeakFixedArray {
// Search a non-property transition (like elements kind, observe or frozen
// transitions).
- inline int SearchSpecial(Symbol symbol, int* out_insertion_index = nullptr);
+ inline int SearchSpecial(Symbol symbol, bool concurrent_search = false,
+ int* out_insertion_index = nullptr);
// Search a first transition for a given property name.
- inline int SearchName(Name name, int* out_insertion_index = nullptr);
+ inline int SearchName(Name name, bool concurrent_search = false,
+ int* out_insertion_index = nullptr);
int SearchDetails(int transition, PropertyKind kind,
PropertyAttributes attributes, int* out_insertion_index);
Map SearchDetailsAndGetTarget(int transition, PropertyKind kind,
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index a2345102c9..53bb0cf927 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -1704,9 +1704,6 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
if (!FLAG_enable_experimental_regexp_engine) {
bad_flags_mask |= JSRegExp::kLinear;
}
- if (!FLAG_harmony_regexp_match_indices) {
- bad_flags_mask |= JSRegExp::kHasIndices;
- }
if ((raw_flags & bad_flags_mask) ||
!JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index a002ef4866..a784cec756 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -110,6 +110,12 @@ class ObjectVisitor {
ObjectSlot end) = 0;
virtual void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) = 0;
+ // When V8_EXTERNAL_CODE_SPACE is enabled, visits a Code pointer slot.
+ // The values may be modified on return.
+ // Not used when V8_EXTERNAL_CODE_SPACE is not enabled (the Code pointer
+ // slots are visited as a part of on-heap slot visitation - via
+ // VisitPointers()).
+ virtual void VisitCodePointer(HeapObject host, CodeObjectSlot slot) = 0;
// Custom weak pointers must be ignored by the GC but not other
// visitors. They're used for e.g., lists that are recreated after GC. The
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index ba06f94402..fa90aba971 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -9,7 +9,7 @@
#include "src/ast/ast.h"
#include "src/base/logging.h"
#include "src/common/globals.h"
-#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
@@ -168,9 +168,10 @@ UnoptimizedCompileState::UnoptimizedCompileState(Isolate* isolate)
allocator_(isolate->allocator()),
ast_string_constants_(isolate->ast_string_constants()),
logger_(isolate->logger()),
- parallel_tasks_(isolate->compiler_dispatcher()->IsEnabled()
- ? new ParallelTasks(isolate->compiler_dispatcher())
- : nullptr) {}
+ parallel_tasks_(
+ isolate->lazy_compile_dispatcher()->IsEnabled()
+ ? new ParallelTasks(isolate->lazy_compile_dispatcher())
+ : nullptr) {}
UnoptimizedCompileState::UnoptimizedCompileState(
const UnoptimizedCompileState& other) V8_NOEXCEPT
@@ -332,7 +333,7 @@ void ParseInfo::CheckFlagsForFunctionFromScript(Script script) {
void UnoptimizedCompileState::ParallelTasks::Enqueue(
ParseInfo* outer_parse_info, const AstRawString* function_name,
FunctionLiteral* literal) {
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
dispatcher_->Enqueue(outer_parse_info, function_name, literal);
if (job_id) {
enqueued_jobs_.emplace_front(std::make_pair(literal, *job_id));
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 55ba2b1cd7..c6bcb221ea 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -31,7 +31,7 @@ class AccountingAllocator;
class AstRawString;
class AstStringConstants;
class AstValueFactory;
-class CompilerDispatcher;
+class LazyCompileDispatcher;
class DeclarationScope;
class FunctionLiteral;
class RuntimeCallStats;
@@ -155,8 +155,8 @@ class V8_EXPORT_PRIVATE UnoptimizedCompileState {
class ParallelTasks {
public:
- explicit ParallelTasks(CompilerDispatcher* compiler_dispatcher)
- : dispatcher_(compiler_dispatcher) {
+ explicit ParallelTasks(LazyCompileDispatcher* lazy_compile_dispatcher)
+ : dispatcher_(lazy_compile_dispatcher) {
DCHECK_NOT_NULL(dispatcher_);
}
@@ -169,10 +169,10 @@ class V8_EXPORT_PRIVATE UnoptimizedCompileState {
EnqueuedJobsIterator begin() { return enqueued_jobs_.begin(); }
EnqueuedJobsIterator end() { return enqueued_jobs_.end(); }
- CompilerDispatcher* dispatcher() { return dispatcher_; }
+ LazyCompileDispatcher* dispatcher() { return dispatcher_; }
private:
- CompilerDispatcher* dispatcher_;
+ LazyCompileDispatcher* dispatcher_;
std::forward_list<std::pair<FunctionLiteral*, uintptr_t>> enqueued_jobs_;
};
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 0abfbe8d8f..a541734bdd 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -17,7 +17,7 @@
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
-#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/logging/runtime-call-stats-scope.h"
@@ -516,8 +516,6 @@ void MaybeProcessSourceRanges(ParseInfo* parse_info, Expression* root,
void Parser::ParseProgram(Isolate* isolate, Handle<Script> script,
ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
- // TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
- // see comment for HistogramTimerScope class.
DCHECK_EQ(script->id(), flags().script_id());
// It's OK to use the Isolate & counters here, since this function is only
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index b4dc3e3d3f..a59c9359eb 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -74,7 +74,7 @@ ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener)
size_t profiler_count = isolate_->num_cpu_profilers();
profiler_count++;
isolate_->set_num_cpu_profilers(profiler_count);
- isolate_->set_is_profiling(true);
+ isolate_->SetIsProfiling(true);
#if V8_ENABLE_WEBASSEMBLY
wasm::GetWasmEngine()->EnableCodeLogging(isolate_);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -99,7 +99,7 @@ ProfilingScope::~ProfilingScope() {
DCHECK_GT(profiler_count, 0);
profiler_count--;
isolate_->set_num_cpu_profilers(profiler_count);
- if (profiler_count == 0) isolate_->set_is_profiling(false);
+ if (profiler_count == 0) isolate_->SetIsProfiling(false);
}
ProfilerEventsProcessor::ProfilerEventsProcessor(
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 1d5c74c1d7..231595dae7 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -733,11 +733,10 @@ class IndexedReferencesExtractor : public ObjectVisitor {
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
void VisitMapPointer(HeapObject object) override {
- if (generator_->visited_fields_[0]) {
- generator_->visited_fields_[0] = false;
- } else {
- VisitHeapObjectImpl(object.map(), 0);
- }
+ // TODO(v8:11880): support external code space (here object could be Code,
+ // so the V8 heap cage_base must be used here).
+ PtrComprCageBase cage_base = GetPtrComprCageBase(object);
+ VisitSlotImpl(cage_base, object.map_slot());
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
@@ -745,19 +744,19 @@ class IndexedReferencesExtractor : public ObjectVisitor {
// all the slots must point inside the object.
CHECK_LE(parent_start_, start);
CHECK_LE(end, parent_end_);
- for (MaybeObjectSlot p = start; p < end; ++p) {
- int field_index = static_cast<int>(p - parent_start_);
- if (generator_->visited_fields_[field_index]) {
- generator_->visited_fields_[field_index] = false;
- continue;
- }
- HeapObject heap_object;
- if ((*p)->GetHeapObject(&heap_object)) {
- VisitHeapObjectImpl(heap_object, field_index);
- }
+ PtrComprCageBase cage_base = GetPtrComprCageBase(host);
+ for (MaybeObjectSlot slot = start; slot < end; ++slot) {
+ VisitSlotImpl(cage_base, slot);
}
}
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
+ VisitSlotImpl(code_cage_base, slot);
+ }
+
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VisitHeapObjectImpl(target, -1);
@@ -768,6 +767,19 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
private:
+ template <typename TSlot>
+ V8_INLINE void VisitSlotImpl(PtrComprCageBase cage_base, TSlot slot) {
+ int field_index = static_cast<int>(MaybeObjectSlot(slot) - parent_start_);
+ if (generator_->visited_fields_[field_index]) {
+ generator_->visited_fields_[field_index] = false;
+ } else {
+ HeapObject heap_object;
+ if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ VisitHeapObjectImpl(heap_object, field_index);
+ }
+ }
+ }
+
V8_INLINE void VisitHeapObjectImpl(HeapObject heap_object, int field_index) {
DCHECK_LE(-1, field_index);
// The last parameter {field_offset} is only used to check some well-known
@@ -1200,9 +1212,17 @@ void V8HeapExplorer::TagBuiltinCodeObject(Code code, const char* name) {
}
void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
- TagObject(code.relocation_info(), "(code relocation info)");
- SetInternalReference(entry, "relocation_info", code.relocation_info(),
+ Object reloc_info_or_undefined = code.relocation_info_or_undefined();
+ TagObject(reloc_info_or_undefined, "(code relocation info)");
+ SetInternalReference(entry, "relocation_info", reloc_info_or_undefined,
Code::kRelocationInfoOffset);
+ if (reloc_info_or_undefined.IsUndefined()) {
+ // The code object was compiled directly on the heap, but it was not
+ // finalized.
+ DCHECK(code.kind() == CodeKind::BASELINE);
+ return;
+ }
+
TagObject(code.deoptimization_data(), "(code deopt data)");
SetInternalReference(entry, "deoptimization_data", code.deoptimization_data(),
Code::kDeoptimizationDataOffset);
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 5ccdb03025..06aefe9505 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -952,11 +952,13 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
+ const ProfileStackTrace empty_path;
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
- if (profile->context_filter().Accept(native_context_address)) {
- profile->AddPath(timestamp, path, src_line, update_stats,
- sampling_interval);
- }
+ // If the context filter check failed, omit the contents of the stack.
+ bool accepts_context =
+ profile->context_filter().Accept(native_context_address);
+ profile->AddPath(timestamp, accepts_context ? path : empty_path, src_line,
+ update_stats, sampling_interval);
}
current_profiles_semaphore_.Signal();
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 4bef9793ab..6a6b2c93ee 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -219,13 +219,10 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
int column = v8::AllocationProfile::kNoColumnNumberInfo;
std::vector<v8::AllocationProfile::Allocation> allocations;
allocations.reserve(node->allocations_.size());
- if (node->script_id_ != v8::UnboundScript::kNoScriptId &&
- scripts.find(node->script_id_) != scripts.end()) {
- // Cannot use std::map<T>::at because it is not available on android.
- auto non_const_scripts =
- const_cast<std::map<int, Handle<Script>>&>(scripts);
- Handle<Script> script = non_const_scripts[node->script_id_];
- if (!script.is_null()) {
+ if (node->script_id_ != v8::UnboundScript::kNoScriptId) {
+ auto script_iterator = scripts.find(node->script_id_);
+ if (script_iterator != scripts.end()) {
+ Handle<Script> script = script_iterator->second;
if (script->name().IsName()) {
Name name = Name::cast(script->name());
script_name = ToApiHandle<v8::String>(
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 2609d761fb..054aa3f80e 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -74,6 +74,23 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
return AddOrDisposeString(str.begin(), len);
}
+const char* StringsStorage::GetSymbol(Symbol sym) {
+ if (!sym.description().IsString()) {
+ return "<symbol>";
+ }
+ String description = String::cast(sym.description());
+ int length = std::min(FLAG_heap_snapshot_string_limit, description.length());
+ auto data = description.ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0,
+ length, &length);
+ if (sym.is_private_name()) {
+ return AddOrDisposeString(data.release(), length);
+ }
+ auto str_length = 8 + length + 1 + 1;
+ auto str_result = NewArray<char>(str_length);
+ snprintf(str_result, str_length, "<symbol %s>", data.get());
+ return AddOrDisposeString(str_result, str_length - 1);
+}
+
const char* StringsStorage::GetName(Name name) {
if (name.IsString()) {
String str = String::cast(name);
@@ -83,7 +100,7 @@ const char* StringsStorage::GetName(Name name) {
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
return AddOrDisposeString(data.release(), actual_length);
} else if (name.IsSymbol()) {
- return "<symbol>";
+ return GetSymbol(Symbol::cast(name));
}
return "";
}
@@ -106,7 +123,7 @@ const char* StringsStorage::GetConsName(const char* prefix, Name name) {
return AddOrDisposeString(cons_result, cons_length - 1);
} else if (name.IsSymbol()) {
- return "<symbol>";
+ return GetSymbol(Symbol::cast(name));
}
return "";
}
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index cac4a9fce9..7e39c0ee33 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -16,6 +16,7 @@ namespace v8 {
namespace internal {
class Name;
+class Symbol;
// Provides a storage of strings allocated in C++ heap, to hold them
// forever, even if they disappear from JS heap or external storage.
@@ -57,6 +58,7 @@ class V8_EXPORT_PRIVATE StringsStorage {
base::CustomMatcherHashMap::Entry* GetEntry(const char* str, int len);
PRINTF_FORMAT(2, 0)
const char* GetVFormatted(const char* format, va_list args);
+ const char* GetSymbol(Symbol sym);
base::CustomMatcherHashMap names_;
base::Mutex mutex_;
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.cc b/deps/v8/src/regexp/experimental/experimental-compiler.cc
index 277b8df570..8b1d841536 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.cc
@@ -64,17 +64,14 @@ class CanBeHandledVisitor final : private RegExpVisitor {
}
void* VisitCharacterClass(RegExpCharacterClass* node, void*) override {
- result_ = result_ && AreSuitableFlags(node->flags());
return nullptr;
}
void* VisitAssertion(RegExpAssertion* node, void*) override {
- result_ = result_ && AreSuitableFlags(node->flags());
return nullptr;
}
void* VisitAtom(RegExpAtom* node, void*) override {
- result_ = result_ && AreSuitableFlags(node->flags());
return nullptr;
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 64e7b869b0..bb82c270b7 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -747,7 +747,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ LoadU64(r4, MemOperand(frame_pointer(), kStartIndex));
__ subi(r3, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) {
- __ ShiftLeftImm(r0, r4, Operand(1));
+ __ ShiftLeftU64(r0, r4, Operand(1));
__ sub(r3, r3, r0);
} else {
__ sub(r3, r3, r4);
@@ -810,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ sub(r4, end_of_input_address(), r4);
// r4 is length of input in bytes.
if (mode_ == UC16) {
- __ ShiftRightImm(r4, r4, Operand(1));
+ __ ShiftRightU64(r4, r4, Operand(1));
}
// r4 is length of input in characters.
__ add(r4, r4, r5);
@@ -828,9 +828,9 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ mr(r25, r5);
}
if (mode_ == UC16) {
- __ ShiftRightArithImm(r5, r5, 1);
+ __ ShiftRightS64(r5, r5, Operand(1));
__ add(r5, r4, r5);
- __ ShiftRightArithImm(r6, r6, 1);
+ __ ShiftRightS64(r6, r6, Operand(1));
__ add(r6, r4, r6);
} else {
__ add(r5, r4, r5);
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index c2ce0c4c0f..2b9f767c24 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -280,8 +280,7 @@ class RegExpAssertion final : public RegExpTree {
NON_BOUNDARY = 5,
LAST_TYPE = NON_BOUNDARY,
};
- RegExpAssertion(AssertionType type, JSRegExp::Flags flags)
- : assertion_type_(type), flags_(flags) {}
+ explicit RegExpAssertion(AssertionType type) : assertion_type_(type) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpAssertion* AsAssertion() override;
@@ -291,11 +290,9 @@ class RegExpAssertion final : public RegExpTree {
int min_match() override { return 0; }
int max_match() override { return 0; }
AssertionType assertion_type() const { return assertion_type_; }
- JSRegExp::Flags flags() const { return flags_; }
private:
const AssertionType assertion_type_;
- const JSRegExp::Flags flags_;
};
@@ -312,21 +309,17 @@ class RegExpCharacterClass final : public RegExpTree {
using CharacterClassFlags = base::Flags<Flag>;
RegExpCharacterClass(
- Zone* zone, ZoneList<CharacterRange>* ranges, JSRegExp::Flags flags,
+ Zone* zone, ZoneList<CharacterRange>* ranges,
CharacterClassFlags character_class_flags = CharacterClassFlags())
- : set_(ranges),
- flags_(flags),
- character_class_flags_(character_class_flags) {
+ : set_(ranges), character_class_flags_(character_class_flags) {
// Convert the empty set of ranges to the negated Everything() range.
if (ranges->is_empty()) {
ranges->Add(CharacterRange::Everything(), zone);
character_class_flags_ ^= NEGATED;
}
}
- RegExpCharacterClass(base::uc16 type, JSRegExp::Flags flags)
- : set_(type),
- flags_(flags),
- character_class_flags_(CharacterClassFlags()) {}
+ explicit RegExpCharacterClass(base::uc16 type)
+ : set_(type), character_class_flags_(CharacterClassFlags()) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpCharacterClass* AsCharacterClass() override;
@@ -356,23 +349,19 @@ class RegExpCharacterClass final : public RegExpTree {
base::uc16 standard_type() const { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
bool is_negated() const { return (character_class_flags_ & NEGATED) != 0; }
- JSRegExp::Flags flags() const { return flags_; }
bool contains_split_surrogate() const {
return (character_class_flags_ & CONTAINS_SPLIT_SURROGATE) != 0;
}
private:
CharacterSet set_;
- const JSRegExp::Flags flags_;
CharacterClassFlags character_class_flags_;
};
class RegExpAtom final : public RegExpTree {
public:
- explicit RegExpAtom(base::Vector<const base::uc16> data,
- JSRegExp::Flags flags)
- : data_(data), flags_(flags) {}
+ explicit RegExpAtom(base::Vector<const base::uc16> data) : data_(data) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpAtom* AsAtom() override;
@@ -383,12 +372,9 @@ class RegExpAtom final : public RegExpTree {
void AppendToText(RegExpText* text, Zone* zone) override;
base::Vector<const base::uc16> data() { return data_; }
int length() { return data_.length(); }
- JSRegExp::Flags flags() const { return flags_; }
- bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
private:
base::Vector<const base::uc16> data_;
- const JSRegExp::Flags flags_;
};
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index c9d4cfc4f9..f668aa6d84 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -200,19 +200,17 @@ ZoneList<CharacterRange>* ToCanonicalZoneList(
}
void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success, UnicodeRangeSplitter* splitter,
- JSRegExp::Flags flags) {
+ RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
ZoneList<CharacterRange>* bmp =
ToCanonicalZoneList(splitter->bmp(), compiler->zone());
if (bmp == nullptr) return;
result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
- compiler->zone(), bmp, compiler->read_backward(), on_success, flags)));
+ compiler->zone(), bmp, compiler->read_backward(), on_success)));
}
void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* on_success,
- UnicodeRangeSplitter* splitter,
- JSRegExp::Flags flags) {
+ UnicodeRangeSplitter* splitter) {
ZoneList<CharacterRange>* non_bmp =
ToCanonicalZoneList(splitter->non_bmp(), compiler->zone());
if (non_bmp == nullptr) return;
@@ -237,7 +235,7 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Singleton(from_l),
CharacterRange::Range(from_t, to_t), compiler->read_backward(),
- on_success, flags)));
+ on_success)));
} else {
if (from_t != kTrailSurrogateStart) {
// Add [from_l][from_t-\udfff]
@@ -245,7 +243,7 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Singleton(from_l),
CharacterRange::Range(from_t, kTrailSurrogateEnd),
- compiler->read_backward(), on_success, flags)));
+ compiler->read_backward(), on_success)));
from_l++;
}
if (to_t != kTrailSurrogateEnd) {
@@ -254,7 +252,7 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Singleton(to_l),
CharacterRange::Range(kTrailSurrogateStart, to_t),
- compiler->read_backward(), on_success, flags)));
+ compiler->read_backward(), on_success)));
to_l--;
}
if (from_l <= to_l) {
@@ -263,7 +261,7 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
GuardedAlternative(TextNode::CreateForSurrogatePair(
zone, CharacterRange::Range(from_l, to_l),
CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
- compiler->read_backward(), on_success, flags)));
+ compiler->read_backward(), on_success)));
}
}
}
@@ -271,39 +269,38 @@ void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
- ZoneList<CharacterRange>* match, RegExpNode* on_success, bool read_backward,
- JSRegExp::Flags flags) {
+ ZoneList<CharacterRange>* match, RegExpNode* on_success,
+ bool read_backward) {
Zone* zone = compiler->zone();
RegExpNode* match_node = TextNode::CreateForCharacterRanges(
- zone, match, read_backward, on_success, flags);
+ zone, match, read_backward, on_success);
int stack_register = compiler->UnicodeLookaroundStackRegister();
int position_register = compiler->UnicodeLookaroundPositionRegister();
RegExpLookaround::Builder lookaround(false, match_node, stack_register,
position_register);
RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookbehind, !read_backward, lookaround.on_match_success(), flags);
+ zone, lookbehind, !read_backward, lookaround.on_match_success());
return lookaround.ForMatch(negative_match);
}
RegExpNode* MatchAndNegativeLookaroundInReadDirection(
RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
- bool read_backward, JSRegExp::Flags flags) {
+ bool read_backward) {
Zone* zone = compiler->zone();
int stack_register = compiler->UnicodeLookaroundStackRegister();
int position_register = compiler->UnicodeLookaroundPositionRegister();
RegExpLookaround::Builder lookaround(false, on_success, stack_register,
position_register);
RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookahead, read_backward, lookaround.on_match_success(), flags);
+ zone, lookahead, read_backward, lookaround.on_match_success());
return TextNode::CreateForCharacterRanges(
- zone, match, read_backward, lookaround.ForMatch(negative_match), flags);
+ zone, match, read_backward, lookaround.ForMatch(negative_match));
}
void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* on_success,
- UnicodeRangeSplitter* splitter,
- JSRegExp::Flags flags) {
+ UnicodeRangeSplitter* splitter) {
ZoneList<CharacterRange>* lead_surrogates =
ToCanonicalZoneList(splitter->lead_surrogates(), compiler->zone());
if (lead_surrogates == nullptr) return;
@@ -317,20 +314,19 @@ void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
// Reading backward. Assert that reading forward, there is no trail
// surrogate, and then backward match the lead surrogate.
match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, trail_surrogates, lead_surrogates, on_success, true, flags);
+ compiler, trail_surrogates, lead_surrogates, on_success, true);
} else {
// Reading forward. Forward match the lead surrogate and assert that
// no trail surrogate follows.
match = MatchAndNegativeLookaroundInReadDirection(
- compiler, lead_surrogates, trail_surrogates, on_success, false, flags);
+ compiler, lead_surrogates, trail_surrogates, on_success, false);
}
result->AddAlternative(GuardedAlternative(match));
}
void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
RegExpNode* on_success,
- UnicodeRangeSplitter* splitter,
- JSRegExp::Flags flags) {
+ UnicodeRangeSplitter* splitter) {
ZoneList<CharacterRange>* trail_surrogates =
ToCanonicalZoneList(splitter->trail_surrogates(), compiler->zone());
if (trail_surrogates == nullptr) return;
@@ -344,12 +340,12 @@ void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
// Reading backward. Backward match the trail surrogate and assert that no
// lead surrogate precedes it.
match = MatchAndNegativeLookaroundInReadDirection(
- compiler, trail_surrogates, lead_surrogates, on_success, true, flags);
+ compiler, trail_surrogates, lead_surrogates, on_success, true);
} else {
// Reading forward. Assert that reading backward, there is no lead
// surrogate, and then forward match the trail surrogate.
match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, lead_surrogates, trail_surrogates, on_success, false, flags);
+ compiler, lead_surrogates, trail_surrogates, on_success, false);
}
result->AddAlternative(GuardedAlternative(match));
}
@@ -365,9 +361,7 @@ RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
// the associated trail surrogate.
ZoneList<CharacterRange>* range = CharacterRange::List(
zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- return TextNode::CreateForCharacterRanges(zone, range, false, on_success,
- default_flags);
+ return TextNode::CreateForCharacterRanges(zone, range, false, on_success);
}
void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
@@ -410,10 +404,10 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
set_.Canonicalize();
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* ranges = this->ranges(zone);
- if (NeedsUnicodeCaseEquivalents(flags_)) {
+ if (NeedsUnicodeCaseEquivalents(compiler->flags())) {
AddUnicodeCaseEquivalents(ranges, zone);
}
- if (IsUnicode(flags_) && !compiler->one_byte() &&
+ if (IsUnicode(compiler->flags()) && !compiler->one_byte() &&
!contains_split_surrogate()) {
if (is_negated()) {
ZoneList<CharacterRange>* negated =
@@ -422,9 +416,8 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
ranges = negated;
}
if (ranges->length() == 0) {
- JSRegExp::Flags default_flags;
RegExpCharacterClass* fail =
- zone->New<RegExpCharacterClass>(zone, ranges, default_flags);
+ zone->New<RegExpCharacterClass>(zone, ranges);
return zone->New<TextNode>(fail, compiler->read_backward(), on_success);
}
if (standard_type() == '*') {
@@ -432,10 +425,10 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
} else {
ChoiceNode* result = zone->New<ChoiceNode>(2, zone);
UnicodeRangeSplitter splitter(ranges);
- AddBmpCharacters(compiler, result, on_success, &splitter, flags_);
- AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter, flags_);
- AddLoneLeadSurrogates(compiler, result, on_success, &splitter, flags_);
- AddLoneTrailSurrogates(compiler, result, on_success, &splitter, flags_);
+ AddBmpCharacters(compiler, result, on_success, &splitter);
+ AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
+ AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
+ AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
static constexpr int kMaxRangesToInline = 32; // Arbitrary.
if (ranges->length() > kMaxRangesToInline) result->SetDoNotInline();
return result;
@@ -510,12 +503,10 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
// i is length or it is the index of an atom.
if (i == length) break;
int first_atom = i;
- JSRegExp::Flags flags = alternatives->at(i)->AsAtom()->flags();
i++;
while (i < length) {
RegExpTree* alternative = alternatives->at(i);
if (!alternative->IsAtom()) break;
- if (alternative->AsAtom()->flags() != flags) break;
i++;
}
// Sort atoms to get ones with common prefixes together.
@@ -527,7 +518,7 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
DCHECK_LT(first_atom, alternatives->length());
DCHECK_LE(i, alternatives->length());
DCHECK_LE(first_atom, i);
- if (IgnoreCase(flags)) {
+ if (IgnoreCase(compiler->flags())) {
#ifdef V8_INTL_SUPPORT
alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
i - first_atom);
@@ -564,7 +555,6 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
continue;
}
RegExpAtom* const atom = alternative->AsAtom();
- JSRegExp::Flags flags = atom->flags();
#ifdef V8_INTL_SUPPORT
icu::UnicodeString common_prefix(atom->data().at(0));
#else
@@ -577,18 +567,17 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
alternative = alternatives->at(i);
if (!alternative->IsAtom()) break;
RegExpAtom* const atom = alternative->AsAtom();
- if (atom->flags() != flags) break;
#ifdef V8_INTL_SUPPORT
icu::UnicodeString new_prefix(atom->data().at(0));
if (new_prefix != common_prefix) {
- if (!IgnoreCase(flags)) break;
+ if (!IgnoreCase(compiler->flags())) break;
if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
break;
}
#else
unibrow::uchar new_prefix = atom->data().at(0);
if (new_prefix != common_prefix) {
- if (!IgnoreCase(flags)) break;
+ if (!IgnoreCase(compiler->flags())) break;
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
compiler->isolate()->regexp_macro_assembler_canonicalize();
new_prefix = Canonical(canonicalize, new_prefix);
@@ -617,8 +606,8 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
}
}
}
- RegExpAtom* prefix = zone->New<RegExpAtom>(
- atom->data().SubVector(0, prefix_length), flags);
+ RegExpAtom* prefix =
+ zone->New<RegExpAtom>(atom->data().SubVector(0, prefix_length));
ZoneList<RegExpTree*>* pair = zone->New<ZoneList<RegExpTree*>>(2, zone);
pair->Add(prefix, zone);
ZoneList<RegExpTree*>* suffixes =
@@ -631,8 +620,7 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
suffixes->Add(zone->New<RegExpEmpty>(), zone);
} else {
RegExpTree* suffix = zone->New<RegExpAtom>(
- old_atom->data().SubVector(prefix_length, old_atom->length()),
- flags);
+ old_atom->data().SubVector(prefix_length, old_atom->length()));
suffixes->Add(suffix, zone);
}
}
@@ -670,7 +658,7 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
i++;
continue;
}
- JSRegExp::Flags flags = atom->flags();
+ const JSRegExp::Flags flags = compiler->flags();
DCHECK_IMPLIES(IsUnicode(flags),
!unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
bool contains_trail_surrogate =
@@ -684,7 +672,6 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
if (!alternative->IsAtom()) break;
RegExpAtom* const atom = alternative->AsAtom();
if (atom->length() != 1) break;
- if (atom->flags() != flags) break;
DCHECK_IMPLIES(IsUnicode(flags),
!unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
contains_trail_surrogate |=
@@ -705,8 +692,8 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
if (IsUnicode(flags) && contains_trail_surrogate) {
character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
}
- alternatives->at(write_posn++) = zone->New<RegExpCharacterClass>(
- zone, ranges, flags, character_class_flags);
+ alternatives->at(write_posn++) =
+ zone->New<RegExpCharacterClass>(zone, ranges, character_class_flags);
} else {
// Just copy any trivial alternatives.
for (int j = first_in_run; j < i; j++) {
@@ -754,7 +741,7 @@ RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpNode* on_success,
RegExpAssertion::AssertionType type,
JSRegExp::Flags flags) {
- DCHECK(NeedsUnicodeCaseEquivalents(flags));
+ CHECK(NeedsUnicodeCaseEquivalents(flags));
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* word_range =
zone->New<ZoneList<CharacterRange>>(2, zone);
@@ -772,13 +759,13 @@ RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpLookaround::Builder lookbehind(lookbehind_for_word, on_success,
stack_register, position_register);
RegExpNode* backward = TextNode::CreateForCharacterRanges(
- zone, word_range, true, lookbehind.on_match_success(), flags);
+ zone, word_range, true, lookbehind.on_match_success());
// Look to the right.
RegExpLookaround::Builder lookahead(lookahead_for_word,
lookbehind.ForMatch(backward),
stack_register, position_register);
RegExpNode* forward = TextNode::CreateForCharacterRanges(
- zone, word_range, false, lookahead.on_match_success(), flags);
+ zone, word_range, false, lookahead.on_match_success());
result->AddAlternative(GuardedAlternative(lookahead.ForMatch(forward)));
}
return result;
@@ -796,14 +783,14 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
case START_OF_INPUT:
return AssertionNode::AtStart(on_success);
case BOUNDARY:
- return NeedsUnicodeCaseEquivalents(flags_)
+ return NeedsUnicodeCaseEquivalents(compiler->flags())
? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY,
- flags_)
+ compiler->flags())
: AssertionNode::AtBoundary(on_success);
case NON_BOUNDARY:
- return NeedsUnicodeCaseEquivalents(flags_)
- ? BoundaryAssertionAsLookaround(compiler, on_success,
- NON_BOUNDARY, flags_)
+ return NeedsUnicodeCaseEquivalents(compiler->flags())
+ ? BoundaryAssertionAsLookaround(
+ compiler, on_success, NON_BOUNDARY, compiler->flags())
: AssertionNode::AtNonBoundary(on_success);
case END_OF_INPUT:
return AssertionNode::AtEnd(on_success);
@@ -819,9 +806,7 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
ZoneList<CharacterRange>* newline_ranges =
zone->New<ZoneList<CharacterRange>>(3, zone);
CharacterRange::AddClassEscape('n', newline_ranges, false, zone);
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- RegExpCharacterClass* newline_atom =
- zone->New<RegExpCharacterClass>('n', default_flags);
+ RegExpCharacterClass* newline_atom = zone->New<RegExpCharacterClass>('n');
TextNode* newline_matcher =
zone->New<TextNode>(newline_atom, false,
ActionNode::PositiveSubmatchSuccess(
@@ -975,16 +960,11 @@ class AssertionSequenceRewriter final {
uint32_t seen_assertions = 0;
STATIC_ASSERT(RegExpAssertion::LAST_TYPE < kUInt32Size * kBitsPerByte);
- // Flags must match for folding.
- JSRegExp::Flags flags = terms_->at(from)->AsAssertion()->flags();
- bool saw_mismatched_flags = false;
-
for (int i = from; i < to; i++) {
RegExpAssertion* t = terms_->at(i)->AsAssertion();
- if (t->flags() != flags) saw_mismatched_flags = true;
const uint32_t bit = 1 << t->assertion_type();
- if ((seen_assertions & bit) && !saw_mismatched_flags) {
+ if (seen_assertions & bit) {
// Fold duplicates.
terms_->Set(i, zone_->New<RegExpEmpty>());
}
@@ -1006,8 +986,7 @@ class AssertionSequenceRewriter final {
// negated '*' (everything) range serves the purpose.
ZoneList<CharacterRange>* ranges =
zone_->New<ZoneList<CharacterRange>>(0, zone_);
- RegExpCharacterClass* cc =
- zone_->New<RegExpCharacterClass>(zone_, ranges, JSRegExp::Flags());
+ RegExpCharacterClass* cc = zone_->New<RegExpCharacterClass>(zone_, ranges);
terms_->Set(from, cc);
// Zero out the rest.
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index 04599f6a39..38a3d4447f 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -240,12 +240,13 @@ class RecursionCheck {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool one_byte)
+ JSRegExp::Flags flags, bool one_byte)
: next_register_(JSRegExp::RegistersForCaptureCount(capture_count)),
unicode_lookaround_stack_register_(kNoRegister),
unicode_lookaround_position_register_(kNoRegister),
work_list_(nullptr),
recursion_depth_(0),
+ flags_(flags),
one_byte_(one_byte),
reg_exp_too_big_(false),
limiting_recursion_(false),
@@ -279,6 +280,9 @@ RegExpCompiler::CompilationResult RegExpCompiler::Assemble(
if (!node->label()->is_bound()) node->Emit(this, &new_trace);
}
if (reg_exp_too_big_) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("Aborting on excess zone allocation");
+ }
macro_assembler_->AbortedCodeGeneration();
return CompilationResult::RegExpTooBig();
}
@@ -1585,7 +1589,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
base::uc16 c = quarks[i];
- if (elm.atom()->ignore_case()) {
+ if (IgnoreCase(compiler->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, c, compiler->one_byte(), chars, 4);
@@ -1815,16 +1819,16 @@ class IterationDecrementer {
LoopChoiceNode* node_;
};
-RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
VisitMarker marker(info());
- return FilterSuccessor(depth - 1);
+ return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
- RegExpNode* next = on_success_->FilterOneByte(depth - 1);
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, JSRegExp::Flags flags) {
+ RegExpNode* next = on_success_->FilterOneByte(depth - 1, flags);
if (next == nullptr) return set_replacement(nullptr);
on_success_ = next;
return set_replacement(this);
@@ -1845,7 +1849,7 @@ static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
return false;
}
-RegExpNode* TextNode::FilterOneByte(int depth) {
+RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -1857,7 +1861,7 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
base::uc16 c = quarks[j];
- if (elm.atom()->ignore_case()) {
+ if (IgnoreCase(flags)) {
c = unibrow::Latin1::TryConvertToLatin1(c);
}
if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
@@ -1876,8 +1880,7 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
if (range_count != 0 && ranges->at(0).from() == 0 &&
ranges->at(0).to() >= String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) &&
- RangesContainLatin1Equivalents(ranges)) {
+ if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1886,8 +1889,7 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
if (range_count == 0 ||
ranges->at(0).from() > String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) &&
- RangesContainLatin1Equivalents(ranges)) {
+ if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1895,26 +1897,27 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
}
}
}
- return FilterSuccessor(depth - 1);
+ return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* LoopChoiceNode::FilterOneByte(int depth) {
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
{
VisitMarker marker(info());
- RegExpNode* continue_replacement = continue_node_->FilterOneByte(depth - 1);
+ RegExpNode* continue_replacement =
+ continue_node_->FilterOneByte(depth - 1, flags);
// If we can't continue after the loop then there is no sense in doing the
// loop.
if (continue_replacement == nullptr) return set_replacement(nullptr);
}
- return ChoiceNode::FilterOneByte(depth - 1);
+ return ChoiceNode::FilterOneByte(depth - 1, flags);
}
-RegExpNode* ChoiceNode::FilterOneByte(int depth) {
+RegExpNode* ChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1934,7 +1937,8 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth) {
RegExpNode* survivor = nullptr;
for (int i = 0; i < choice_count; i++) {
GuardedAlternative alternative = alternatives_->at(i);
- RegExpNode* replacement = alternative.node()->FilterOneByte(depth - 1);
+ RegExpNode* replacement =
+ alternative.node()->FilterOneByte(depth - 1, flags);
DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK.
if (replacement != nullptr) {
alternatives_->at(i).set_node(replacement);
@@ -1954,7 +1958,7 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth) {
zone()->New<ZoneList<GuardedAlternative>>(surviving, zone());
for (int i = 0; i < choice_count; i++) {
RegExpNode* replacement =
- alternatives_->at(i).node()->FilterOneByte(depth - 1);
+ alternatives_->at(i).node()->FilterOneByte(depth - 1, flags);
if (replacement != nullptr) {
alternatives_->at(i).set_node(replacement);
new_alternatives->Add(alternatives_->at(i), zone());
@@ -1964,7 +1968,8 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth) {
return this;
}
-RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
+RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
+ JSRegExp::Flags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1972,12 +1977,12 @@ RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
RegExpNode* node = continue_node();
- RegExpNode* replacement = node->FilterOneByte(depth - 1);
+ RegExpNode* replacement = node->FilterOneByte(depth - 1, flags);
if (replacement == nullptr) return set_replacement(nullptr);
alternatives_->at(kContinueIndex).set_node(replacement);
RegExpNode* neg_node = lookaround_node();
- RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1);
+ RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1, flags);
// If the negative lookahead is always going to fail then
// we don't need to check it.
if (neg_replacement == nullptr) return set_replacement(replacement);
@@ -2316,13 +2321,13 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
TextElement elm = elements()->at(i);
int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
if (elm.text_type() == TextElement::ATOM) {
- if (SkipPass(pass, elm.atom()->ignore_case())) continue;
+ if (SkipPass(pass, IgnoreCase(compiler->flags()))) continue;
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
base::uc16 quark = quarks[j];
- if (elm.atom()->ignore_case()) {
+ if (IgnoreCase(compiler->flags())) {
// Everywhere else we assume that a non-Latin-1 character cannot match
// a Latin-1 character. Avoid the cases where this is assumption is
// invalid by using the Latin1 equivalent instead.
@@ -2391,29 +2396,27 @@ bool TextNode::SkipPass(TextEmitPassType pass, bool ignore_case) {
TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
ZoneList<CharacterRange>* ranges,
bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
+ RegExpNode* on_success) {
DCHECK_NOT_NULL(ranges);
ZoneList<TextElement>* elms = zone->New<ZoneList<TextElement>>(1, zone);
- elms->Add(TextElement::CharClass(
- zone->New<RegExpCharacterClass>(zone, ranges, flags)),
- zone);
+ elms->Add(
+ TextElement::CharClass(zone->New<RegExpCharacterClass>(zone, ranges)),
+ zone);
return zone->New<TextNode>(elms, read_backward, on_success);
}
TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
CharacterRange trail,
bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
+ RegExpNode* on_success) {
ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
ZoneList<TextElement>* elms = zone->New<ZoneList<TextElement>>(2, zone);
elms->Add(TextElement::CharClass(
- zone->New<RegExpCharacterClass>(zone, lead_ranges, flags)),
+ zone->New<RegExpCharacterClass>(zone, lead_ranges)),
zone);
elms->Add(TextElement::CharClass(
- zone->New<RegExpCharacterClass>(zone, trail_ranges, flags)),
+ zone->New<RegExpCharacterClass>(zone, trail_ranges)),
zone);
return zone->New<TextNode>(elms, read_backward, on_success);
}
@@ -2487,26 +2490,23 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
bound_checked_up_to_ = std::max(0, bound_checked_up_to_ - by);
}
-void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
+void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
+ JSRegExp::Flags flags) {
+ if (!IgnoreCase(flags)) return;
+#ifdef V8_INTL_SUPPORT
+ if (NeedsUnicodeCaseEquivalents(flags)) return;
+#endif
+
int element_count = elements()->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elements()->at(i);
if (elm.text_type() == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.char_class();
-#ifdef V8_INTL_SUPPORT
- bool case_equivalents_already_added =
- NeedsUnicodeCaseEquivalents(cc->flags());
-#else
- bool case_equivalents_already_added = false;
-#endif
- if (IgnoreCase(cc->flags()) && !case_equivalents_already_added) {
- // None of the standard character classes is different in the case
- // independent case and it slows us down if we don't know that.
- if (cc->is_standard(zone())) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- CharacterRange::AddCaseEquivalents(isolate, zone(), ranges,
- is_one_byte);
- }
+ // None of the standard character classes is different in the case
+ // independent case and it slows us down if we don't know that.
+ if (cc->is_standard(zone())) continue;
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::AddCaseEquivalents(isolate, zone(), ranges, is_one_byte);
}
}
}
@@ -3634,9 +3634,10 @@ class EatsAtLeastPropagator : public AllStatic {
template <typename... Propagators>
class Analysis : public NodeVisitor {
public:
- Analysis(Isolate* isolate, bool is_one_byte)
+ Analysis(Isolate* isolate, bool is_one_byte, JSRegExp::Flags flags)
: isolate_(isolate),
is_one_byte_(is_one_byte),
+ flags_(flags),
error_(RegExpError::kNone) {}
void EnsureAnalyzed(RegExpNode* that) {
@@ -3677,7 +3678,7 @@ class Analysis : public NodeVisitor {
} while (false)
void VisitText(TextNode* that) override {
- that->MakeCaseIndependent(isolate(), is_one_byte_);
+ that->MakeCaseIndependent(isolate(), is_one_byte_, flags_);
EnsureAnalyzed(that->on_success());
if (has_failed()) return;
that->CalculateOffsets();
@@ -3744,16 +3745,17 @@ class Analysis : public NodeVisitor {
private:
Isolate* isolate_;
- bool is_one_byte_;
+ const bool is_one_byte_;
+ const JSRegExp::Flags flags_;
RegExpError error_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
};
RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
- RegExpNode* node) {
- Analysis<AssertionPropagator, EatsAtLeastPropagator> analysis(isolate,
- is_one_byte);
+ JSRegExp::Flags flags, RegExpNode* node) {
+ Analysis<AssertionPropagator, EatsAtLeastPropagator> analysis(
+ isolate, is_one_byte, flags);
DCHECK_EQ(node->info()->been_analyzed, false);
analysis.EnsureAnalyzed(node);
DCHECK_IMPLIES(analysis.has_failed(), analysis.error() != RegExpError::kNone);
@@ -3807,7 +3809,7 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
return;
}
base::uc16 character = atom->data()[j];
- if (IgnoreCase(atom->flags())) {
+ if (IgnoreCase(bm->compiler()->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
@@ -3846,7 +3848,7 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
}
RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
- RegExpNode* on_success, JSRegExp::Flags flags) {
+ RegExpNode* on_success) {
DCHECK(!read_backward());
ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
zone(), CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
@@ -3858,11 +3860,11 @@ RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
int stack_register = UnicodeLookaroundStackRegister();
int position_register = UnicodeLookaroundPositionRegister();
RegExpNode* step_back = TextNode::CreateForCharacterRanges(
- zone(), lead_surrogates, true, on_success, flags);
+ zone(), lead_surrogates, true, on_success);
RegExpLookaround::Builder builder(true, step_back, stack_register,
position_register);
RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
- zone(), trail_surrogates, false, builder.on_match_success(), flags);
+ zone(), trail_surrogates, false, builder.on_match_success());
optional_step_back->AddAlternative(
GuardedAlternative(builder.ForMatch(match_trail)));
@@ -3881,11 +3883,9 @@ RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
if (!data->tree->IsAnchoredAtStart() && !IsSticky(flags)) {
// Add a .*? at the beginning, outside the body capture, unless
// this expression is anchored at the beginning or sticky.
- JSRegExp::Flags default_flags = JSRegExp::Flags();
RegExpNode* loop_node = RegExpQuantifier::ToNode(
- 0, RegExpTree::kInfinity, false,
- zone()->New<RegExpCharacterClass>('*', default_flags), this,
- captured_body, data->contains_anchor);
+ 0, RegExpTree::kInfinity, false, zone()->New<RegExpCharacterClass>('*'),
+ this, captured_body, data->contains_anchor);
if (data->contains_anchor) {
// Unroll loop once, to take care of the case that might start
@@ -3893,22 +3893,21 @@ RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
ChoiceNode* first_step_node = zone()->New<ChoiceNode>(2, zone());
first_step_node->AddAlternative(GuardedAlternative(captured_body));
first_step_node->AddAlternative(GuardedAlternative(zone()->New<TextNode>(
- zone()->New<RegExpCharacterClass>('*', default_flags), false,
- loop_node)));
+ zone()->New<RegExpCharacterClass>('*'), false, loop_node)));
node = first_step_node;
} else {
node = loop_node;
}
}
if (is_one_byte) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, flags);
// Do it again to propagate the new nodes to places where they were not
// put because they had not been calculated yet.
if (node != nullptr) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, flags);
}
} else if (IsUnicode(flags) && (IsGlobal(flags) || IsSticky(flags))) {
- node = OptionallyStepBackToLeadSurrogate(node, flags);
+ node = OptionallyStepBackToLeadSurrogate(node);
}
if (node == nullptr) node = zone()->New<EndNode>(EndNode::BACKTRACK, zone());
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
index 4d53e47094..2be7a48e9a 100644
--- a/deps/v8/src/regexp/regexp-compiler.h
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -424,7 +424,8 @@ struct PreloadState {
// Analysis performs assertion propagation and computes eats_at_least_ values.
// See the comments on AssertionPropagator and EatsAtLeastPropagator for more
// details.
-RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpNode* node);
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
+ JSRegExp::Flags flags, RegExpNode* node);
class FrequencyCollator {
public:
@@ -474,7 +475,7 @@ class FrequencyCollator {
class RegExpCompiler {
public:
RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool is_one_byte);
+ JSRegExp::Flags flags, bool is_one_byte);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -531,8 +532,7 @@ class RegExpCompiler {
// If the regexp matching starts within a surrogate pair, step back to the
// lead surrogate and start matching from there.
- RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpNode* on_success,
- JSRegExp::Flags flags);
+ RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpNode* on_success);
inline void AddWork(RegExpNode* node) {
if (!node->on_work_list() && !node->label()->is_bound()) {
@@ -553,6 +553,8 @@ class RegExpCompiler {
inline void IncrementRecursionDepth() { recursion_depth_++; }
inline void DecrementRecursionDepth() { recursion_depth_--; }
+ JSRegExp::Flags flags() const { return flags_; }
+
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
inline bool one_byte() { return one_byte_; }
@@ -583,6 +585,7 @@ class RegExpCompiler {
int unicode_lookaround_position_register_;
ZoneVector<RegExpNode*>* work_list_;
int recursion_depth_;
+ const JSRegExp::Flags flags_;
RegExpMacroAssembler* macro_assembler_;
bool one_byte_;
bool reg_exp_too_big_;
diff --git a/deps/v8/src/regexp/regexp-nodes.h b/deps/v8/src/regexp/regexp-nodes.h
index 23dbd4cdcb..537cf96201 100644
--- a/deps/v8/src/regexp/regexp-nodes.h
+++ b/deps/v8/src/regexp/regexp-nodes.h
@@ -205,7 +205,9 @@ class RegExpNode : public ZoneObject {
// If we know that the input is one-byte then there are some nodes that can
// never match. This method returns a node that can be substituted for
// itself, or nullptr if the node can never match.
- virtual RegExpNode* FilterOneByte(int depth) { return this; }
+ virtual RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) {
+ return this;
+ }
// Helper for FilterOneByte.
RegExpNode* replacement() {
DCHECK(info()->replacement_calculated);
@@ -294,7 +296,7 @@ class SeqRegExpNode : public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) {}
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- RegExpNode* FilterOneByte(int depth) override;
+ RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override {
on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
@@ -302,7 +304,7 @@ class SeqRegExpNode : public RegExpNode {
}
protected:
- RegExpNode* FilterSuccessor(int depth);
+ RegExpNode* FilterSuccessor(int depth, JSRegExp::Flags flags);
private:
RegExpNode* on_success_;
@@ -406,15 +408,13 @@ class TextNode : public SeqRegExpNode {
static TextNode* CreateForCharacterRanges(Zone* zone,
ZoneList<CharacterRange>* ranges,
bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags);
+ RegExpNode* on_success);
// Create TextNode for a surrogate pair with a range given for the
// lead and the trail surrogate each.
static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
CharacterRange trail,
bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags);
+ RegExpNode* on_success);
void Accept(NodeVisitor* visitor) override;
void Emit(RegExpCompiler* compiler, Trace* trace) override;
void GetQuickCheckDetails(QuickCheckDetails* details,
@@ -422,14 +422,15 @@ class TextNode : public SeqRegExpNode {
bool not_at_start) override;
ZoneList<TextElement>* elements() { return elms_; }
bool read_backward() { return read_backward_; }
- void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
+ void MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
+ JSRegExp::Flags flags);
int GreedyLoopTextLength() override;
RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override;
void CalculateOffsets();
- RegExpNode* FilterOneByte(int depth) override;
+ RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
int Length();
private:
@@ -622,7 +623,7 @@ class ChoiceNode : public RegExpNode {
virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
return true;
}
- RegExpNode* FilterOneByte(int depth) override;
+ RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
virtual bool read_backward() { return false; }
protected:
@@ -694,7 +695,7 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
return !is_first;
}
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth) override;
+ RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
};
class LoopChoiceNode : public ChoiceNode {
@@ -727,7 +728,7 @@ class LoopChoiceNode : public ChoiceNode {
int min_loop_iterations() const { return min_loop_iterations_; }
bool read_backward() override { return read_backward_; }
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth) override;
+ RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
private:
// AddAlternative is made private for loop nodes because alternatives
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index aaa7b9cf8b..1201e555ad 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -250,14 +250,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(RegExpError::kNothingToRepeat);
case '^': {
Advance();
- if (builder->multiline()) {
- builder->AddAssertion(zone()->New<RegExpAssertion>(
- RegExpAssertion::START_OF_LINE, builder->flags()));
- } else {
- builder->AddAssertion(zone()->New<RegExpAssertion>(
- RegExpAssertion::START_OF_INPUT, builder->flags()));
- set_contains_anchor();
- }
+ builder->AddAssertion(zone()->New<RegExpAssertion>(
+ builder->multiline() ? RegExpAssertion::START_OF_LINE
+ : RegExpAssertion::START_OF_INPUT));
+ set_contains_anchor();
continue;
}
case '$': {
@@ -265,8 +261,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpAssertion::AssertionType assertion_type =
builder->multiline() ? RegExpAssertion::END_OF_LINE
: RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(
- zone()->New<RegExpAssertion>(assertion_type, builder->flags()));
+ builder->AddAssertion(zone()->New<RegExpAssertion>(assertion_type));
continue;
}
case '.': {
@@ -283,7 +278,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges, builder->flags());
+ zone()->New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -305,13 +300,13 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(RegExpError::kEscapeAtEndOfPattern);
case 'b':
Advance(2);
- builder->AddAssertion(zone()->New<RegExpAssertion>(
- RegExpAssertion::BOUNDARY, builder->flags()));
+ builder->AddAssertion(
+ zone()->New<RegExpAssertion>(RegExpAssertion::BOUNDARY));
continue;
case 'B':
Advance(2);
- builder->AddAssertion(zone()->New<RegExpAssertion>(
- RegExpAssertion::NON_BOUNDARY, builder->flags()));
+ builder->AddAssertion(
+ zone()->New<RegExpAssertion>(RegExpAssertion::NON_BOUNDARY));
continue;
// AtomEscape ::
// CharacterClassEscape
@@ -330,8 +325,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
zone()->New<ZoneList<CharacterRange>>(2, zone());
CharacterRange::AddClassEscape(
c, ranges, unicode() && builder->ignore_case(), zone());
- RegExpCharacterClass* cc = zone()->New<RegExpCharacterClass>(
- zone(), ranges, builder->flags());
+ RegExpCharacterClass* cc =
+ zone()->New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -346,8 +341,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneVector<char> name_2(zone());
if (ParsePropertyClassName(&name_1, &name_2)) {
if (AddPropertyClassRange(ranges, p == 'P', name_1, name_2)) {
- RegExpCharacterClass* cc = zone()->New<RegExpCharacterClass>(
- zone(), ranges, builder->flags());
+ RegExpCharacterClass* cc =
+ zone()->New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -605,68 +600,6 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
lookaround_type = RegExpLookaround::LOOKAHEAD;
subexpr_type = NEGATIVE_LOOKAROUND;
break;
- case '-':
- case 'i':
- case 's':
- case 'm': {
- if (!FLAG_regexp_mode_modifiers) {
- ReportError(RegExpError::kInvalidGroup);
- return nullptr;
- }
- Advance();
- bool flags_sense = true; // Switching on flags.
- while (subexpr_type != GROUPING) {
- switch (current()) {
- case '-':
- if (!flags_sense) {
- ReportError(RegExpError::kMultipleFlagDashes);
- return nullptr;
- }
- flags_sense = false;
- Advance();
- continue;
- case 's':
- case 'i':
- case 'm': {
- JSRegExp::Flags bit = JSRegExp::kUnicode;
- if (current() == 'i') bit = JSRegExp::kIgnoreCase;
- if (current() == 'm') bit = JSRegExp::kMultiline;
- if (current() == 's') bit = JSRegExp::kDotAll;
- if (((switch_on | switch_off) & bit) != 0) {
- ReportError(RegExpError::kRepeatedFlag);
- return nullptr;
- }
- if (flags_sense) {
- switch_on |= bit;
- } else {
- switch_off |= bit;
- }
- Advance();
- continue;
- }
- case ')': {
- Advance();
- state->builder()
- ->FlushText(); // Flush pending text using old flags.
- // These (?i)-style flag switches don't put us in a subexpression
- // at all, they just modify the flags in the rest of the current
- // subexpression.
- JSRegExp::Flags flags =
- (state->builder()->flags() | switch_on) & ~switch_off;
- state->builder()->set_flags(flags);
- return state;
- }
- case ':':
- Advance();
- subexpr_type = GROUPING; // Will break us out of the outer loop.
- continue;
- default:
- ReportError(RegExpError::kInvalidFlagGroup);
- return nullptr;
- }
- }
- break;
- }
case '<':
Advance();
if (Next() == '=') {
@@ -1493,7 +1426,7 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
prefix_ranges->Add(CharacterRange::Singleton('#'), zone());
prefix_ranges->Add(CharacterRange::Singleton('*'), zone());
builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), prefix_ranges, flags));
+ zone()->New<RegExpCharacterClass>(zone(), prefix_ranges));
builder.AddCharacter(0xFE0F);
builder.AddCharacter(0x20E3);
return builder.ToRegExp();
@@ -1506,13 +1439,13 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
LookupPropertyValueName(UCHAR_EMOJI_MODIFIER_BASE, "Y", false,
modifier_base_ranges, zone());
builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_base_ranges, flags));
+ zone()->New<RegExpCharacterClass>(zone(), modifier_base_ranges));
ZoneList<CharacterRange>* modifier_ranges =
zone()->New<ZoneList<CharacterRange>>(2, zone());
LookupPropertyValueName(UCHAR_EMOJI_MODIFIER, "Y", false, modifier_ranges,
zone());
builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_ranges, flags));
+ zone()->New<RegExpCharacterClass>(zone(), modifier_ranges));
return builder.ToRegExp();
}
@@ -1780,7 +1713,7 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
Advance();
RegExpCharacterClass::CharacterClassFlags character_class_flags;
if (is_negated) character_class_flags = RegExpCharacterClass::NEGATED;
- return zone()->New<RegExpCharacterClass>(zone(), ranges, builder->flags(),
+ return zone()->New<RegExpCharacterClass>(zone(), ranges,
character_class_flags);
}
@@ -1874,7 +1807,7 @@ void RegExpBuilder::AddTrailSurrogate(base::uc16 trail_surrogate) {
surrogate_pair.Add(lead_surrogate, zone());
surrogate_pair.Add(trail_surrogate, zone());
RegExpAtom* atom =
- zone()->New<RegExpAtom>(surrogate_pair.ToConstVector(), flags_);
+ zone()->New<RegExpAtom>(surrogate_pair.ToConstVector());
AddAtom(atom);
}
} else {
@@ -1897,8 +1830,7 @@ void RegExpBuilder::FlushCharacters() {
FlushPendingSurrogate();
pending_empty_ = false;
if (characters_ != nullptr) {
- RegExpTree* atom =
- zone()->New<RegExpAtom>(characters_->ToConstVector(), flags_);
+ RegExpTree* atom = zone()->New<RegExpAtom>(characters_->ToConstVector());
characters_ = nullptr;
text_.Add(atom, zone());
LAST(ADD_ATOM);
@@ -1972,8 +1904,7 @@ void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
void RegExpBuilder::AddCharacterClassForDesugaring(base::uc32 c) {
AddTerm(zone()->New<RegExpCharacterClass>(
- zone(), CharacterRange::List(zone(), CharacterRange::Singleton(c)),
- flags_));
+ zone(), CharacterRange::List(zone(), CharacterRange::Singleton(c))));
}
void RegExpBuilder::AddAtom(RegExpTree* term) {
@@ -2083,11 +2014,11 @@ bool RegExpBuilder::AddQuantifierToAtom(
if (num_chars > 1) {
base::Vector<const base::uc16> prefix =
char_vector.SubVector(0, num_chars - 1);
- text_.Add(zone()->New<RegExpAtom>(prefix, flags_), zone());
+ text_.Add(zone()->New<RegExpAtom>(prefix), zone());
char_vector = char_vector.SubVector(num_chars - 1, num_chars);
}
characters_ = nullptr;
- atom = zone()->New<RegExpAtom>(char_vector, flags_);
+ atom = zone()->New<RegExpAtom>(char_vector);
FlushText();
} else if (text_.length() > 0) {
DCHECK(last_added_ == ADD_ATOM);
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index d2e5f1a1f9..9bdebe1918 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -225,7 +225,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, atom_string,
isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
- if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
+ if (!IgnoreCase(flags) && !HasFewDifferentCharacters(atom_string)) {
RegExpImpl::AtomCompile(isolate, re, pattern, flags, atom_string);
has_been_compiled = true;
}
@@ -802,7 +802,8 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
return false;
}
- RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
+ RegExpCompiler compiler(isolate, zone, data->capture_count, flags,
+ is_one_byte);
if (compiler.optimize()) {
compiler.set_optimize(!TooMuchRegExpCode(isolate, pattern));
@@ -821,7 +822,7 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
}
data->node = compiler.PreprocessRegExp(data, flags, is_one_byte);
- data->error = AnalyzeRegExp(isolate, is_one_byte, data->node);
+ data->error = AnalyzeRegExp(isolate, is_one_byte, flags, data->node);
if (data->error != RegExpError::kNone) {
return false;
}
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index ecdab593b2..1cf4f9f644 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -41,13 +41,20 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
Handle<String> name(constructor->shared().Name(), isolate);
+
+ Handle<Context> context = handle(constructor->native_context(), isolate);
+ DCHECK(context->IsNativeContext());
+ Handle<JSFunction> realm_type_error_function(
+ JSFunction::cast(context->get(Context::TYPE_ERROR_FUNCTION_INDEX)),
+ isolate);
if (name->length() == 0) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kAnonymousConstructorNonCallable));
+ isolate, NewError(realm_type_error_function,
+ MessageTemplate::kAnonymousConstructorNonCallable));
}
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNonCallable, name));
+ isolate, NewError(realm_type_error_function,
+ MessageTemplate::kConstructorNonCallable, name));
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 9d6bfb7a7a..cb92eae13c 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -12,6 +12,7 @@
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
@@ -193,13 +194,17 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<Object> object) {
auto result = ArrayList::New(isolate, 8 * 2);
if (object->IsJSObject()) {
- PrototypeIterator iter(isolate, Handle<JSObject>::cast(object));
- Handle<Object> prototype = PrototypeIterator::GetCurrent(iter);
- if (!prototype->IsNull(isolate)) {
- result = ArrayList::Add(
- isolate, result,
- isolate->factory()->NewStringFromStaticChars("[[Prototype]]"),
- prototype);
+ PrototypeIterator iter(isolate, Handle<JSObject>::cast(object),
+ kStartAtReceiver);
+ if (iter.HasAccess()) {
+ iter.Advance();
+ Handle<Object> prototype = PrototypeIterator::GetCurrent(iter);
+ if (!prototype->IsNull(isolate)) {
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromStaticChars("[[Prototype]]"),
+ prototype);
+ }
}
}
if (object->IsJSBoundFunction()) {
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index b3396c753c..f9dce4d271 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -329,10 +329,10 @@ RUNTIME_FUNCTION(Runtime_StackGuardWithGap) {
return isolate->stack_guard()->HandleInterrupts();
}
-RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+namespace {
+
+void BytecodeBudgetInterruptFromBytecode(Isolate* isolate,
+ Handle<JSFunction> function) {
function->SetInterruptBudget();
bool should_mark_for_optimization = function->has_feedback_vector();
if (!function->has_feedback_vector()) {
@@ -343,7 +343,7 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
// Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions.
- function->feedback_vector().set_invocation_count(1);
+ function->feedback_vector().set_invocation_count(1, kRelaxedStore);
}
if (CanCompileWithBaseline(isolate, function->shared()) &&
!function->ActiveTierIsBaseline()) {
@@ -361,6 +361,42 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
isolate->counters()->runtime_profiler_ticks()->Increment();
isolate->runtime_profiler()->MarkCandidatesForOptimizationFromBytecode();
}
+}
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheckFromBytecode) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ TRACE_EVENT0("v8.execute", "V8.BytecodeBudgetInterruptWithStackCheck");
+
+ // Check for stack interrupts here so that we can fold the interrupt check
+ // into bytecode budget interrupts.
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
+ // We ideally wouldn't actually get StackOverflows here, since we stack
+ // check on bytecode entry, but it's possible that this check fires due to
+ // the runtime function call being what overflows the stack.
+ // if our function entry
+ return isolate->StackOverflow();
+ } else if (check.InterruptRequested()) {
+ Object return_value = isolate->stack_guard()->HandleInterrupts();
+ if (!return_value.IsUndefined(isolate)) {
+ return return_value;
+ }
+ }
+
+ BytecodeBudgetInterruptFromBytecode(isolate, function);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ TRACE_EVENT0("v8.execute", "V8.BytecodeBudgetInterrupt");
+
+ BytecodeBudgetInterruptFromBytecode(isolate, function);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -369,6 +405,9 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromCode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 0);
+ // TODO(leszeks): Consider checking stack interrupts here, and removing
+ // those checks for code that can have budget interrupts.
+
DCHECK(feedback_cell->value().IsFeedbackVector());
FeedbackVector::SetInterruptBudget(*feedback_cell);
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 1862b504fe..52fadb8c8c 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -28,7 +28,9 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
Handle<Script> script(Script::cast(function->shared().script()), isolate);
while (script->has_eval_from_shared()) {
- script = handle(Script::cast(script->eval_from_shared().script()), isolate);
+ Object maybe_script = script->eval_from_shared().script();
+ CHECK(maybe_script.IsScript());
+ script = handle(Script::cast(maybe_script), isolate);
}
RETURN_RESULT_OR_FAILURE(isolate,
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 5fc2b2bd23..c52449a642 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -961,8 +961,6 @@ RUNTIME_FUNCTION(
}
RUNTIME_FUNCTION(Runtime_RegExpBuildIndices) {
- DCHECK(FLAG_harmony_regexp_match_indices);
-
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, match_info, 1);
diff --git a/deps/v8/src/runtime/runtime-test-wasm.cc b/deps/v8/src/runtime/runtime-test-wasm.cc
index 72598c7345..8425b1fa18 100644
--- a/deps/v8/src/runtime/runtime-test-wasm.cc
+++ b/deps/v8/src/runtime/runtime-test-wasm.cc
@@ -293,7 +293,7 @@ RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
return *isolate->factory()->NewNumberFromSize(trap_count);
}
-RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
+RUNTIME_FUNCTION(Runtime_GetWasmExceptionTagId) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0);
@@ -301,9 +301,9 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
Handle<Object> tag =
WasmExceptionPackage::GetExceptionTag(isolate, exception);
CHECK(tag->IsWasmExceptionTag());
- Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate);
- for (int index = 0; index < exceptions_table->length(); ++index) {
- if (exceptions_table->get(index) == *tag) return Smi::FromInt(index);
+ Handle<FixedArray> tags_table(instance->tags_table(), isolate);
+ for (int index = 0; index < tags_table->length(); ++index) {
+ if (tags_table->get(index) == *tag) return Smi::FromInt(index);
}
UNREACHABLE();
}
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index fb5949e2c9..69b0f6241b 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -42,6 +42,11 @@ V8_WARN_UNUSED_RESULT Object CrashUnlessFuzzing(Isolate* isolate) {
return ReadOnlyRoots(isolate).undefined_value();
}
+V8_WARN_UNUSED_RESULT bool CrashUnlessFuzzingReturnFalse(Isolate* isolate) {
+ CHECK(FLAG_fuzzing);
+ return false;
+}
+
// Returns |value| unless correctness-fuzzer-supressions is enabled,
// otherwise returns undefined_value.
V8_WARN_UNUSED_RESULT Object ReturnFuzzSafe(Object value, Isolate* isolate) {
@@ -212,42 +217,35 @@ namespace {
enum class TierupKind { kTierupBytecode, kTierupBytecodeOrMidTier };
-Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
- TierupKind tierup_kind) {
- if (args.length() != 1 && args.length() != 2) {
- return CrashUnlessFuzzing(isolate);
- }
-
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
- if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
- Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
-
+bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
+ TierupKind tierup_kind,
+ IsCompiledScope* is_compiled_scope) {
// The following conditions were lifted (in part) from the DCHECK inside
// JSFunction::MarkForOptimization().
if (!function->shared().allows_lazy_compilation()) {
- return CrashUnlessFuzzing(isolate);
+ return CrashUnlessFuzzingReturnFalse(isolate);
}
// If function isn't compiled, compile it now.
- IsCompiledScope is_compiled_scope(
- function->shared().is_compiled_scope(isolate));
- if (!is_compiled_scope.is_compiled() &&
+ if (!is_compiled_scope->is_compiled() &&
!Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope)) {
- return CrashUnlessFuzzing(isolate);
+ is_compiled_scope)) {
+ return CrashUnlessFuzzingReturnFalse(isolate);
}
- if (!FLAG_opt) return ReadOnlyRoots(isolate).undefined_value();
+ if (!FLAG_opt) return false;
if (function->shared().optimization_disabled() &&
function->shared().disable_optimization_reason() ==
BailoutReason::kNeverOptimize) {
- return CrashUnlessFuzzing(isolate);
+ return CrashUnlessFuzzingReturnFalse(isolate);
}
#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
+ if (function->shared().HasAsmWasmData()) {
+ return CrashUnlessFuzzingReturnFalse(isolate);
+ }
#endif // V8_ENABLE_WEBASSEMBLY
if (FLAG_testing_d8_test_runner) {
@@ -263,6 +261,26 @@ Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
if (FLAG_testing_d8_test_runner) {
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
+ return false;
+ }
+
+ return true;
+}
+
+Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
+ TierupKind tierup_kind) {
+ if (args.length() != 1 && args.length() != 2) {
+ return CrashUnlessFuzzing(isolate);
+ }
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
+ IsCompiledScope is_compiled_scope(
+ function->shared().is_compiled_scope(isolate));
+ if (!CanOptimizeFunction(function, isolate, tierup_kind,
+ &is_compiled_scope)) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -420,6 +438,32 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_OptimizeFunctionForTopTier) {
+ // TODO(rmcilroy): Ideally this should be rolled into
+ // OptimizeFunctionOnNextCall, but there is no way to mark the tier to be
+ // optimized using the regular optimization marking system.
+ HandleScope scope(isolate);
+ if (args.length() != 1) {
+ return CrashUnlessFuzzing(isolate);
+ }
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
+ IsCompiledScope is_compiled_scope(
+ function->shared().is_compiled_scope(isolate));
+ if (!CanOptimizeFunction(function, isolate,
+ TierupKind::kTierupBytecodeOrMidTier,
+ &is_compiled_scope)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ Compiler::CompileOptimized(isolate, function, ConcurrencyMode::kNotConcurrent,
+ CodeKindForTopTier());
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
HandleScope scope(isolate);
DCHECK(args.length() == 0 || args.length() == 1);
@@ -634,6 +678,34 @@ RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_DisableOptimizationFinalization) {
+ DCHECK_EQ(0, args.length());
+ DCHECK(!FLAG_block_concurrent_recompilation);
+ CHECK(isolate->concurrent_recompilation_enabled());
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->optimizing_compile_dispatcher()->set_finalize(false);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WaitForBackgroundOptimization) {
+ DCHECK_EQ(0, args.length());
+ DCHECK(!FLAG_block_concurrent_recompilation);
+ CHECK(isolate->concurrent_recompilation_enabled());
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_FinalizeOptimization) {
+ DCHECK_EQ(0, args.length());
+ DCHECK(!FLAG_block_concurrent_recompilation);
+ CHECK(isolate->concurrent_recompilation_enabled());
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
static void ReturnNull(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().SetNull();
}
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 1d75cb8f6f..df4ea14164 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -168,18 +168,8 @@ RUNTIME_FUNCTION(Runtime_WasmThrow) {
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<WasmExceptionTag> tag(tag_raw, isolate);
Handle<FixedArray> values(values_raw, isolate);
-
- Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
- MessageTemplate::kWasmExceptionError);
- Object::SetProperty(
- isolate, exception, isolate->factory()->wasm_exception_tag_symbol(), tag,
- StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
- .Check();
- Object::SetProperty(
- isolate, exception, isolate->factory()->wasm_exception_values_symbol(),
- values, StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
- .Check();
-
+ Handle<WasmExceptionPackage> exception =
+ WasmExceptionPackage::New(isolate, tag, values);
wasm::GetWasmEngine()->SampleThrowEvent(isolate);
return isolate->Throw(*exception);
}
@@ -238,7 +228,7 @@ void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
function_index)
.ToHandleChecked();
- exported_function->set_code(*wrapper_code);
+ exported_function->set_code(*wrapper_code, kReleaseStore);
WasmExportedFunctionData function_data =
exported_function->shared().wasm_exported_function_data();
function_data.set_wrapper_code(*wrapper_code);
@@ -559,7 +549,13 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
// Stepping can repeatedly create code, and code GC requires stack guards to
// be executed on all involved isolates. Proactively do this here.
StackLimitCheck check(isolate);
- if (check.InterruptRequested()) isolate->stack_guard()->HandleInterrupts();
+ if (check.InterruptRequested()) {
+ Object interrupt_object = isolate->stack_guard()->HandleInterrupts();
+ // Interrupt handling can create an exception, including the
+ // termination exception.
+ if (interrupt_object.IsException(isolate)) return interrupt_object;
+ DCHECK(interrupt_object.IsUndefined(isolate));
+ }
// Enter the debugger.
DebugScope debug_scope(isolate->debug());
@@ -657,7 +653,8 @@ RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
CONVERT_UINT32_ARG_CHECKED(length, 4);
bool overlapping_ranges =
dst_array->ptr() == src_array->ptr() &&
- (dst_index + length > src_index || src_index + length > dst_index);
+ (dst_index < src_index ? dst_index + length > src_index
+ : src_index + length > dst_index);
wasm::ValueType element_type = src_array->type()->element_type();
if (element_type.is_reference()) {
ObjectSlot dst_slot = dst_array->ElementSlot(dst_index);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 73cb35336e..045ffb3641 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -206,58 +206,59 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTL(F, I)
#endif // V8_INTL_SUPPORT
-#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
- F(AccessCheck, 1, 1) \
- F(AllocateByteArray, 1, 1) \
- F(AllocateInYoungGeneration, 2, 1) \
- F(AllocateInOldGeneration, 2, 1) \
- F(AllocateSeqOneByteString, 1, 1) \
- F(AllocateSeqTwoByteString, 1, 1) \
- F(AllowDynamicFunction, 1, 1) \
- I(CreateAsyncFromSyncIterator, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(DoubleToStringWithRadix, 2, 1) \
- F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
- F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \
- F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
- F(GetTemplateObject, 3, 1) \
- F(IncrementUseCounter, 1, 1) \
- F(BytecodeBudgetInterruptFromBytecode, 1, 1) \
- F(BytecodeBudgetInterruptFromCode, 1, 1) \
- F(NewError, 2, 1) \
- F(NewReferenceError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
- F(NewTypeError, -1 /* [1, 4] */, 1) \
- F(OrdinaryHasInstance, 2, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ReportMessageFromMicrotask, 1, 1) \
- F(ReThrow, 1, 1) \
- F(RunMicrotaskCallback, 2, 1) \
- F(PerformMicrotaskCheckpoint, 0, 1) \
- F(StackGuard, 0, 1) \
- F(StackGuardWithGap, 1, 1) \
- F(Throw, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowConstructedNonConstructable, 1, 1) \
- F(ThrowConstructorReturnedNonObject, 0, 1) \
- F(ThrowInvalidStringLength, 0, 1) \
- F(ThrowInvalidTypedArrayAlignment, 2, 1) \
- F(ThrowIteratorError, 1, 1) \
- F(ThrowSpreadArgError, 2, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowNotConstructor, 1, 1) \
- F(ThrowPatternAssignmentNonCoercible, 1, 1) \
- F(ThrowRangeError, -1 /* >= 1 */, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowAccessedUninitializedVariable, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
- F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
- F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowThrowMethodMissing, 0, 1) \
- F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1) \
- F(Typeof, 1, 1) \
+#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
+ F(AccessCheck, 1, 1) \
+ F(AllocateByteArray, 1, 1) \
+ F(AllocateInYoungGeneration, 2, 1) \
+ F(AllocateInOldGeneration, 2, 1) \
+ F(AllocateSeqOneByteString, 1, 1) \
+ F(AllocateSeqTwoByteString, 1, 1) \
+ F(AllowDynamicFunction, 1, 1) \
+ I(CreateAsyncFromSyncIterator, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(DoubleToStringWithRadix, 2, 1) \
+ F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
+ F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
+ F(GetTemplateObject, 3, 1) \
+ F(IncrementUseCounter, 1, 1) \
+ F(BytecodeBudgetInterruptFromBytecode, 1, 1) \
+ F(BytecodeBudgetInterruptWithStackCheckFromBytecode, 1, 1) \
+ F(BytecodeBudgetInterruptFromCode, 1, 1) \
+ F(NewError, 2, 1) \
+ F(NewReferenceError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewTypeError, -1 /* [1, 4] */, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ReportMessageFromMicrotask, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(RunMicrotaskCallback, 2, 1) \
+ F(PerformMicrotaskCheckpoint, 0, 1) \
+ F(StackGuard, 0, 1) \
+ F(StackGuardWithGap, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowConstructorReturnedNonObject, 0, 1) \
+ F(ThrowInvalidStringLength, 0, 1) \
+ F(ThrowInvalidTypedArrayAlignment, 2, 1) \
+ F(ThrowIteratorError, 1, 1) \
+ F(ThrowSpreadArgError, 2, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowNotConstructor, 1, 1) \
+ F(ThrowPatternAssignmentNonCoercible, 1, 1) \
+ F(ThrowRangeError, -1 /* >= 1 */, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowAccessedUninitializedVariable, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowThrowMethodMissing, 0, 1) \
+ F(ThrowTypeError, -1 /* >= 1 */, 1) \
+ F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1) \
+ F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F, I) \
@@ -462,8 +463,9 @@ namespace internal {
#define FOR_EACH_INTRINSIC_TEST(F, I) \
F(Abort, 1, 1) \
- F(AbortJS, 1, 1) \
F(AbortCSAAssert, 1, 1) \
+ F(AbortJS, 1, 1) \
+ F(ArrayIteratorProtector, 0, 1) \
F(ArraySpeciesProtector, 0, 1) \
F(BaselineOsr, -1, 1) \
F(ClearFunctionFeedback, 1, 1) \
@@ -480,11 +482,10 @@ namespace internal {
F(DisallowCodegenFromStrings, 1, 1) \
F(DisassembleFunction, 1, 1) \
F(DynamicCheckMapsEnabled, 0, 1) \
- F(IsTopTierTurboprop, 0, 1) \
- F(IsMidTierTurboprop, 0, 1) \
- F(IsAtomicsWaitAllowed, 0, 1) \
F(EnableCodeLoggingForTesting, 0, 1) \
F(EnsureFeedbackVectorForFunction, 1, 1) \
+ F(DisableOptimizationFinalization, 0, 1) \
+ F(FinalizeOptimization, 0, 1) \
F(GetCallable, 0, 1) \
F(GetInitializerFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
@@ -495,7 +496,6 @@ namespace internal {
F(HasElementsInALargeObjectSpace, 1, 1) \
F(HasFastElements, 1, 1) \
F(HasFastProperties, 1, 1) \
- F(HasOwnConstDataProperty, 2, 1) \
F(HasFixedBigInt64Elements, 1, 1) \
F(HasFixedBigUint64Elements, 1, 1) \
F(HasFixedFloat32Elements, 1, 1) \
@@ -509,6 +509,7 @@ namespace internal {
F(HasFixedUint8Elements, 1, 1) \
F(HasHoleyElements, 1, 1) \
F(HasObjectElements, 1, 1) \
+ F(HasOwnConstDataProperty, 2, 1) \
F(HasPackedElements, 1, 1) \
F(HasSloppyArgumentsElements, 1, 1) \
F(HasSmiElements, 1, 1) \
@@ -518,44 +519,48 @@ namespace internal {
F(ICsAreEnabled, 0, 1) \
F(InLargeObjectSpace, 1, 1) \
F(InYoungGeneration, 1, 1) \
+ F(Is64Bit, 0, 1) \
+ F(IsAtomicsWaitAllowed, 0, 1) \
F(IsBeingInterpreted, 0, 1) \
+ F(IsConcatSpreadableProtector, 0, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
F(IsDictPropertyConstTrackingEnabled, 0, 1) \
- F(RegexpHasBytecode, 2, 1) \
- F(RegexpHasNativeCode, 2, 1) \
- F(RegexpTypeTag, 1, 1) \
- F(RegexpIsUnmodified, 1, 1) \
+ F(IsMidTierTurboprop, 0, 1) \
+ F(IsTopTierTurboprop, 0, 1) \
F(MapIteratorProtector, 0, 1) \
- F(ArrayIteratorProtector, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
+ F(NewRegExpWithBacktrackLimit, 3, 1) \
F(NotifyContextDisposed, 0, 1) \
+ F(OptimizeFunctionForTopTier, 1, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
- F(TierupFunctionOnNextCall, -1, 1) \
F(OptimizeOsr, -1, 1) \
- F(NewRegExpWithBacktrackLimit, 3, 1) \
F(PrepareFunctionForOptimization, -1, 1) \
F(PretenureAllocationSite, 1, 1) \
F(PrintWithNameForAssert, 2, 1) \
+ F(PromiseSpeciesProtector, 0, 1) \
+ F(RegexpHasBytecode, 2, 1) \
+ F(RegexpHasNativeCode, 2, 1) \
+ F(RegexpIsUnmodified, 1, 1) \
+ F(RegExpSpeciesProtector, 0, 1) \
+ F(RegexpTypeTag, 1, 1) \
F(RunningInSimulator, 0, 1) \
F(RuntimeEvaluateREPL, 1, 1) \
+ F(ScheduleGCInStackCheck, 0, 1) \
F(SerializeDeserializeNow, 0, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(SetForceSlowPath, 1, 1) \
F(SetIteratorProtector, 0, 1) \
F(SimulateNewspaceFull, 0, 1) \
- F(ScheduleGCInStackCheck, 0, 1) \
F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
+ F(TierupFunctionOnNextCall, -1, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
F(TurbofanStaticAssert, 1, 1) \
F(TypedArraySpeciesProtector, 0, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
- I(DeoptimizeNow, 0, 1) \
- F(PromiseSpeciesProtector, 0, 1) \
- F(IsConcatSpreadableProtector, 0, 1) \
- F(RegExpSpeciesProtector, 0, 1) \
- F(Is64Bit, 0, 1)
+ F(WaitForBackgroundOptimization, 0, 1) \
+ I(DeoptimizeNow, 0, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
F(ArrayBufferDetach, 1, 1) \
@@ -595,7 +600,7 @@ namespace internal {
F(DeserializeWasmModule, 2, 1) \
F(DisallowWasmCodegen, 1, 1) \
F(FreezeWasmLazyCompilation, 1, 1) \
- F(GetWasmExceptionId, 2, 1) \
+ F(GetWasmExceptionTagId, 2, 1) \
F(GetWasmExceptionValues, 1, 1) \
F(GetWasmRecoveredTrapCount, 0, 1) \
F(IsAsmWasmCode, 1, 1) \
@@ -800,8 +805,8 @@ class Runtime : public AllStatic {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> HasProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key);
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GetInternalProperties(
- Isolate* isolate, Handle<Object>);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray>
+ GetInternalProperties(Isolate* isolate, Handle<Object>);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ThrowIteratorError(
Isolate* isolate, Handle<Object> object);
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index dc31572c14..3ccee83753 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -4,16 +4,24 @@
#include "src/snapshot/code-serializer.h"
+#include <memory>
+
+#include "src/base/logging.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
#include "src/codegen/macro-assembler.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
+#include "src/handles/maybe-handles.h"
+#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
-#include "src/logging/counters.h"
+#include "src/heap/parked-scope.h"
+#include "src/logging/counters-scopes.h"
#include "src/logging/log.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
#include "src/snapshot/object-deserializer.h"
@@ -24,7 +32,7 @@
namespace v8 {
namespace internal {
-ScriptData::ScriptData(const byte* data, int length)
+AlignedCachedData::AlignedCachedData(const byte* data, int length)
: owns_data_(false), rejected_(false), data_(data), length_(length) {
if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
byte* copy = NewArray<byte>(length);
@@ -44,7 +52,8 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
Handle<SharedFunctionInfo> info) {
Isolate* isolate = info->GetIsolate();
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
- HistogramTimerScope histogram_timer(isolate->counters()->compile_serialize());
+ NestedTimedHistogramScope histogram_timer(
+ isolate->counters()->compile_serialize());
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileSerialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileSerialize");
@@ -69,24 +78,24 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
source, script->origin_options()));
DisallowGarbageCollection no_gc;
cs.reference_map()->AddAttachedReference(*source);
- ScriptData* script_data = cs.SerializeSharedFunctionInfo(info);
+ AlignedCachedData* cached_data = cs.SerializeSharedFunctionInfo(info);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
- int length = script_data->length();
+ int length = cached_data->length();
PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
}
ScriptCompiler::CachedData* result =
- new ScriptCompiler::CachedData(script_data->data(), script_data->length(),
+ new ScriptCompiler::CachedData(cached_data->data(), cached_data->length(),
ScriptCompiler::CachedData::BufferOwned);
- script_data->ReleaseDataOwnership();
- delete script_data;
+ cached_data->ReleaseDataOwnership();
+ delete cached_data;
return result;
}
-ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
+AlignedCachedData* CodeSerializer::SerializeSharedFunctionInfo(
Handle<SharedFunctionInfo> info) {
DisallowGarbageCollection no_gc;
@@ -222,6 +231,8 @@ void CodeSerializer::SerializeGeneric(Handle<HeapObject> heap_object) {
serializer.Serialize();
}
+namespace {
+
#ifndef V8_TARGET_ARCH_ARM
// NOTE(mmarchini): when FLAG_interpreted_frames_native_stack is on, we want to
// create duplicates of InterpreterEntryTrampoline for the deserialized
@@ -263,89 +274,40 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
}
#endif // V8_TARGET_ARCH_ARM
-namespace {
class StressOffThreadDeserializeThread final : public base::Thread {
public:
explicit StressOffThreadDeserializeThread(Isolate* isolate,
- const SerializedCodeData* scd)
+ AlignedCachedData* cached_data)
: Thread(
base::Thread::Options("StressOffThreadDeserializeThread", 2 * MB)),
isolate_(isolate),
- scd_(scd) {}
-
- MaybeHandle<SharedFunctionInfo> maybe_result() const { return maybe_result_; }
+ cached_data_(cached_data) {}
void Run() final {
LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
- MaybeHandle<SharedFunctionInfo> local_maybe_result =
- ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
- &local_isolate, scd_, local_isolate.factory()->empty_string());
+ UnparkedScope unparked_scope(&local_isolate);
+ LocalHandleScope handle_scope(&local_isolate);
+ off_thread_data_ =
+ CodeSerializer::StartDeserializeOffThread(&local_isolate, cached_data_);
+ }
- maybe_result_ =
- local_isolate.heap()->NewPersistentMaybeHandle(local_maybe_result);
+ MaybeHandle<SharedFunctionInfo> Finalize(Isolate* isolate,
+ Handle<String> source,
+ ScriptOriginOptions origin_options) {
+ return CodeSerializer::FinishOffThreadDeserialize(
+ isolate, std::move(off_thread_data_), cached_data_, source,
+ origin_options);
}
private:
Isolate* isolate_;
- const SerializedCodeData* scd_;
- MaybeHandle<SharedFunctionInfo> maybe_result_;
+ AlignedCachedData* cached_data_;
+ CodeSerializer::OffThreadDeserializeData off_thread_data_;
};
-} // namespace
-
-MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
- Isolate* isolate, ScriptData* cached_data, Handle<String> source,
- ScriptOriginOptions origin_options) {
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization || FLAG_log_function_events) timer.Start();
-
- HandleScope scope(isolate);
-
- SerializedCodeData::SanityCheckResult sanity_check_result =
- SerializedCodeData::CHECK_SUCCESS;
- const SerializedCodeData scd = SerializedCodeData::FromCachedData(
- cached_data, SerializedCodeData::SourceHash(source, origin_options),
- &sanity_check_result);
- if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
- if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
- DCHECK(cached_data->rejected());
- isolate->counters()->code_cache_reject_reason()->AddSample(
- sanity_check_result);
- return MaybeHandle<SharedFunctionInfo>();
- }
-
- // Deserialize.
- MaybeHandle<SharedFunctionInfo> maybe_result;
- // TODO(leszeks): Add LocalHeap support to deserializer
- if (false && FLAG_stress_background_compile) {
- StressOffThreadDeserializeThread thread(isolate, &scd);
- CHECK(thread.Start());
- thread.Join();
-
- maybe_result = thread.maybe_result();
-
- // Fix-up result script source.
- Handle<SharedFunctionInfo> result;
- if (maybe_result.ToHandle(&result)) {
- Script::cast(result->script()).set_source(*source);
- }
- } else {
- maybe_result = ObjectDeserializer::DeserializeSharedFunctionInfo(
- isolate, &scd, source);
- }
-
- Handle<SharedFunctionInfo> result;
- if (!maybe_result.ToHandle(&result)) {
- // Deserializing may fail if the reservations cannot be fulfilled.
- if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
- return MaybeHandle<SharedFunctionInfo>();
- }
-
- if (FLAG_profile_deserialization) {
- double ms = timer.Elapsed().InMillisecondsF();
- int length = cached_data->length();
- PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
- }
+void FinalizeDeserialization(Isolate* isolate,
+ Handle<SharedFunctionInfo> result,
+ const base::ElapsedTimer& timer) {
const bool log_code_creation =
isolate->logger()->is_listening_to_code_events() ||
isolate->is_profiling() ||
@@ -405,6 +367,159 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
Handle<Script> script(Script::cast(result->script()), isolate);
Script::InitLineEnds(isolate, script);
}
+}
+
+} // namespace
+
+MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
+ Isolate* isolate, AlignedCachedData* cached_data, Handle<String> source,
+ ScriptOriginOptions origin_options) {
+ if (FLAG_stress_background_compile) {
+ StressOffThreadDeserializeThread thread(isolate, cached_data);
+ CHECK(thread.Start());
+ thread.Join();
+ return thread.Finalize(isolate, source, origin_options);
+ // TODO(leszeks): Compare off-thread deserialized data to on-thread.
+ }
+
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization || FLAG_log_function_events) timer.Start();
+
+ HandleScope scope(isolate);
+
+ SerializedCodeData::SanityCheckResult sanity_check_result =
+ SerializedCodeData::CHECK_SUCCESS;
+ const SerializedCodeData scd = SerializedCodeData::FromCachedData(
+ cached_data, SerializedCodeData::SourceHash(source, origin_options),
+ &sanity_check_result);
+ if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
+ if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
+ DCHECK(cached_data->rejected());
+ isolate->counters()->code_cache_reject_reason()->AddSample(
+ sanity_check_result);
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ // Deserialize.
+ MaybeHandle<SharedFunctionInfo> maybe_result =
+ ObjectDeserializer::DeserializeSharedFunctionInfo(isolate, &scd, source);
+
+ Handle<SharedFunctionInfo> result;
+ if (!maybe_result.ToHandle(&result)) {
+ // Deserializing may fail if the reservations cannot be fulfilled.
+ if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int length = cached_data->length();
+ PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
+ }
+
+ FinalizeDeserialization(isolate, result, timer);
+
+ return scope.CloseAndEscape(result);
+}
+
+CodeSerializer::OffThreadDeserializeData
+CodeSerializer::StartDeserializeOffThread(LocalIsolate* local_isolate,
+ AlignedCachedData* cached_data) {
+ OffThreadDeserializeData result;
+
+ DCHECK(!local_isolate->heap()->HasPersistentHandles());
+
+ SerializedCodeData::SanityCheckResult sanity_check_result =
+ SerializedCodeData::CHECK_SUCCESS;
+ const SerializedCodeData scd =
+ SerializedCodeData::FromCachedDataWithoutSource(cached_data,
+ &sanity_check_result);
+ if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
+ // Exit early but don't report yet, we'll re-check this when finishing on
+ // the main thread
+ DCHECK(cached_data->rejected());
+ return result;
+ }
+
+ MaybeHandle<SharedFunctionInfo> local_maybe_result =
+ OffThreadObjectDeserializer::DeserializeSharedFunctionInfo(
+ local_isolate, &scd, &result.scripts);
+
+ result.maybe_result =
+ local_isolate->heap()->NewPersistentMaybeHandle(local_maybe_result);
+ result.persistent_handles = local_isolate->heap()->DetachPersistentHandles();
+
+ return result;
+}
+
+MaybeHandle<SharedFunctionInfo> CodeSerializer::FinishOffThreadDeserialize(
+ Isolate* isolate, OffThreadDeserializeData&& data,
+ AlignedCachedData* cached_data, Handle<String> source,
+ ScriptOriginOptions origin_options) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization || FLAG_log_function_events) timer.Start();
+
+ HandleScope scope(isolate);
+
+ // Check again now that we have the source.
+ SerializedCodeData::SanityCheckResult sanity_check_result =
+ SerializedCodeData::CHECK_SUCCESS;
+ const SerializedCodeData scd = SerializedCodeData::FromCachedData(
+ cached_data, SerializedCodeData::SourceHash(source, origin_options),
+ &sanity_check_result);
+ if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
+ // The only case where the deserialization result could exist despite a
+ // check failure is on a source mismatch, since we can't test for this
+ // off-thread.
+ DCHECK_IMPLIES(!data.maybe_result.is_null(),
+ sanity_check_result == SerializedCodeData::SOURCE_MISMATCH);
+ if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
+ DCHECK(cached_data->rejected());
+ isolate->counters()->code_cache_reject_reason()->AddSample(
+ sanity_check_result);
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ Handle<SharedFunctionInfo> result;
+ if (!data.maybe_result.ToHandle(&result)) {
+ // Deserializing may fail if the reservations cannot be fulfilled.
+ if (FLAG_profile_deserialization) {
+ PrintF("[Off-thread deserializing failed]\n");
+ }
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ // Change the result persistent handle into a regular handle.
+ DCHECK(data.persistent_handles->Contains(result.location()));
+ result = handle(*result, isolate);
+
+ // Fix up the source on the script. This should be the only deserialized
+ // script, and the off-thread deserializer should have set its source to
+ // the empty string.
+ DCHECK_EQ(data.scripts.size(), 1);
+ DCHECK_EQ(result->script(), *data.scripts[0]);
+ DCHECK_EQ(Script::cast(result->script()).source(),
+ ReadOnlyRoots(isolate).empty_string());
+ Script::cast(result->script()).set_source(*source);
+
+ // Fix up the script list to include the newly deserialized script.
+ Handle<WeakArrayList> list = isolate->factory()->script_list();
+ for (Handle<Script> script : data.scripts) {
+ DCHECK(data.persistent_handles->Contains(script.location()));
+ list =
+ WeakArrayList::AddToEnd(isolate, list, MaybeObjectHandle::Weak(script));
+ }
+ isolate->heap()->SetRootScriptList(*list);
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int length = cached_data->length();
+ PrintF("[Finishing off-thread deserialize from %d bytes took %0.3f ms]\n",
+ length, ms);
+ }
+
+ FinalizeDeserialization(isolate, result, timer);
+
return scope.CloseAndEscape(result);
}
@@ -441,16 +556,23 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
uint32_t expected_source_hash) const {
+ SanityCheckResult result = SanityCheckWithoutSource();
+ if (result != CHECK_SUCCESS) return result;
+ uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
+ if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
+ return CHECK_SUCCESS;
+}
+
+SerializedCodeData::SanityCheckResult
+SerializedCodeData::SanityCheckWithoutSource() const {
if (this->size_ < kHeaderSize) return INVALID_HEADER;
uint32_t magic_number = GetMagicNumber();
if (magic_number != kMagicNumber) return MAGIC_NUMBER_MISMATCH;
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
- uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
uint32_t c = GetHeaderValue(kChecksumOffset);
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
- if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
uint32_t max_payload_length = this->size_ - kHeaderSize;
if (payload_length > max_payload_length) return LENGTH_MISMATCH;
@@ -470,9 +592,9 @@ uint32_t SerializedCodeData::SourceHash(Handle<String> source,
}
// Return ScriptData object and relinquish ownership over it to the caller.
-ScriptData* SerializedCodeData::GetScriptData() {
+AlignedCachedData* SerializedCodeData::GetScriptData() {
DCHECK(owns_data_);
- ScriptData* result = new ScriptData(data_, size_);
+ AlignedCachedData* result = new AlignedCachedData(data_, size_);
result->AcquireDataOwnership();
owns_data_ = false;
data_ = nullptr;
@@ -487,11 +609,11 @@ base::Vector<const byte> SerializedCodeData::Payload() const {
return base::Vector<const byte>(payload, length);
}
-SerializedCodeData::SerializedCodeData(ScriptData* data)
+SerializedCodeData::SerializedCodeData(AlignedCachedData* data)
: SerializedData(const_cast<byte*>(data->data()), data->length()) {}
SerializedCodeData SerializedCodeData::FromCachedData(
- ScriptData* cached_data, uint32_t expected_source_hash,
+ AlignedCachedData* cached_data, uint32_t expected_source_hash,
SanityCheckResult* rejection_result) {
DisallowGarbageCollection no_gc;
SerializedCodeData scd(cached_data);
@@ -503,5 +625,17 @@ SerializedCodeData SerializedCodeData::FromCachedData(
return scd;
}
+SerializedCodeData SerializedCodeData::FromCachedDataWithoutSource(
+ AlignedCachedData* cached_data, SanityCheckResult* rejection_result) {
+ DisallowGarbageCollection no_gc;
+ SerializedCodeData scd(cached_data);
+ *rejection_result = scd.SanityCheckWithoutSource();
+ if (*rejection_result != CHECK_SUCCESS) {
+ cached_data->Reject();
+ return SerializedCodeData(nullptr, 0);
+ }
+ return scd;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 229c62f99a..cc6662e857 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -12,14 +12,16 @@
namespace v8 {
namespace internal {
-class V8_EXPORT_PRIVATE ScriptData {
+class PersistentHandles;
+
+class V8_EXPORT_PRIVATE AlignedCachedData {
public:
- ScriptData(const byte* data, int length);
- ~ScriptData() {
+ AlignedCachedData(const byte* data, int length);
+ ~AlignedCachedData() {
if (owns_data_) DeleteArray(data_);
}
- ScriptData(const ScriptData&) = delete;
- ScriptData& operator=(const ScriptData&) = delete;
+ AlignedCachedData(const AlignedCachedData&) = delete;
+ AlignedCachedData& operator=(const AlignedCachedData&) = delete;
const byte* data() const { return data_; }
int length() const { return length_; }
@@ -27,6 +29,8 @@ class V8_EXPORT_PRIVATE ScriptData {
void Reject() { rejected_ = true; }
+ bool HasDataOwnership() const { return owns_data_; }
+
void AcquireDataOwnership() {
DCHECK(!owns_data_);
owns_data_ = true;
@@ -46,17 +50,36 @@ class V8_EXPORT_PRIVATE ScriptData {
class CodeSerializer : public Serializer {
public:
+ struct OffThreadDeserializeData {
+ private:
+ friend class CodeSerializer;
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ std::vector<Handle<Script>> scripts;
+ std::unique_ptr<PersistentHandles> persistent_handles;
+ };
+
CodeSerializer(const CodeSerializer&) = delete;
CodeSerializer& operator=(const CodeSerializer&) = delete;
V8_EXPORT_PRIVATE static ScriptCompiler::CachedData* Serialize(
Handle<SharedFunctionInfo> info);
- ScriptData* SerializeSharedFunctionInfo(Handle<SharedFunctionInfo> info);
+ AlignedCachedData* SerializeSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
- Isolate* isolate, ScriptData* cached_data, Handle<String> source,
+ Isolate* isolate, AlignedCachedData* cached_data, Handle<String> source,
ScriptOriginOptions origin_options);
+ V8_WARN_UNUSED_RESULT static OffThreadDeserializeData
+ StartDeserializeOffThread(LocalIsolate* isolate,
+ AlignedCachedData* cached_data);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
+ FinishOffThreadDeserialize(Isolate* isolate, OffThreadDeserializeData&& data,
+ AlignedCachedData* cached_data,
+ Handle<String> source,
+ ScriptOriginOptions origin_options);
+
uint32_t source_hash() const { return source_hash_; }
protected:
@@ -106,16 +129,18 @@ class SerializedCodeData : public SerializedData {
static const uint32_t kHeaderSize = POINTER_SIZE_ALIGN(kUnalignedHeaderSize);
// Used when consuming.
- static SerializedCodeData FromCachedData(ScriptData* cached_data,
+ static SerializedCodeData FromCachedData(AlignedCachedData* cached_data,
uint32_t expected_source_hash,
SanityCheckResult* rejection_result);
+ static SerializedCodeData FromCachedDataWithoutSource(
+ AlignedCachedData* cached_data, SanityCheckResult* rejection_result);
// Used when producing.
SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs);
// Return ScriptData object and relinquish ownership over it to the caller.
- ScriptData* GetScriptData();
+ AlignedCachedData* GetScriptData();
base::Vector<const byte> Payload() const;
@@ -123,7 +148,7 @@ class SerializedCodeData : public SerializedData {
ScriptOriginOptions origin_options);
private:
- explicit SerializedCodeData(ScriptData* data);
+ explicit SerializedCodeData(AlignedCachedData* data);
SerializedCodeData(const byte* data, int size)
: SerializedData(const_cast<byte*>(data), size) {}
@@ -132,6 +157,7 @@ class SerializedCodeData : public SerializedData {
}
SanityCheckResult SanityCheck(uint32_t expected_source_hash) const;
+ SanityCheckResult SanityCheckWithoutSource() const;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/context-deserializer.h b/deps/v8/src/snapshot/context-deserializer.h
index 6552a0fe45..8fa570ad86 100644
--- a/deps/v8/src/snapshot/context-deserializer.h
+++ b/deps/v8/src/snapshot/context-deserializer.h
@@ -17,7 +17,8 @@ class Isolate;
// Deserializes the context-dependent object graph rooted at a given object.
// The ContextDeserializer is not expected to deserialize any code objects.
-class V8_EXPORT_PRIVATE ContextDeserializer final : public Deserializer {
+class V8_EXPORT_PRIVATE ContextDeserializer final
+ : public Deserializer<Isolate> {
public:
static MaybeHandle<Context> DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index ec3605f82b..7a02a50caa 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -175,7 +175,7 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// Unconditionally reset the JSFunction to its SFI's code, since we can't
// serialize optimized code anyway.
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
- closure->ResetIfBytecodeFlushed();
+ closure->ResetIfCodeFlushed();
if (closure->is_compiled()) {
if (closure->shared().HasBaselineData()) {
closure->shared().flush_baseline_data();
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 89ab3ce538..fab2f80355 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -14,10 +14,14 @@
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
+#include "src/logging/local-logger.h"
#include "src/logging/log.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/backing-store.h"
#include "src/objects/cell-inl.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table.h"
@@ -149,9 +153,10 @@ class SlotAccessorForRootSlots {
// A SlotAccessor for creating a Handle, which saves a Handle allocation when
// a Handle already exists.
+template <typename IsolateT>
class SlotAccessorForHandle {
public:
- SlotAccessorForHandle(Handle<HeapObject>* handle, Isolate* isolate)
+ SlotAccessorForHandle(Handle<HeapObject>* handle, IsolateT* isolate)
: handle_(handle), isolate_(isolate) {}
MaybeObjectSlot slot() const { UNREACHABLE(); }
@@ -185,36 +190,62 @@ class SlotAccessorForHandle {
private:
Handle<HeapObject>* handle_;
- Isolate* isolate_;
+ IsolateT* isolate_;
};
+template <typename IsolateT>
template <typename TSlot>
-int Deserializer::WriteAddress(TSlot dest, Address value) {
+int Deserializer<IsolateT>::WriteAddress(TSlot dest, Address value) {
DCHECK(!next_reference_is_weak_);
memcpy(dest.ToVoidPtr(), &value, kSystemPointerSize);
STATIC_ASSERT(IsAligned(kSystemPointerSize, TSlot::kSlotDataSize));
return (kSystemPointerSize / TSlot::kSlotDataSize);
}
+template <typename IsolateT>
template <typename TSlot>
-int Deserializer::WriteExternalPointer(TSlot dest, Address value,
- ExternalPointerTag tag) {
+int Deserializer<IsolateT>::WriteExternalPointer(TSlot dest, Address value,
+ ExternalPointerTag tag) {
DCHECK(!next_reference_is_weak_);
- InitExternalPointerField(dest.address(), isolate(), value, tag);
+ InitExternalPointerField(dest.address(), main_thread_isolate(), value, tag);
STATIC_ASSERT(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
return (kExternalPointerSize / TSlot::kSlotDataSize);
}
-Deserializer::Deserializer(Isolate* isolate, base::Vector<const byte> payload,
- uint32_t magic_number, bool deserializing_user_code,
- bool can_rehash)
+namespace {
+#ifdef DEBUG
+int GetNumApiReferences(Isolate* isolate) {
+ int num_api_references = 0;
+ // The read-only deserializer is run by read-only heap set-up before the
+ // heap is fully set up. External reference table relies on a few parts of
+ // this set-up (like old-space), so it may be uninitialized at this point.
+ if (isolate->isolate_data()->external_reference_table()->is_initialized()) {
+ // Count the number of external references registered through the API.
+ if (isolate->api_external_references() != nullptr) {
+ while (isolate->api_external_references()[num_api_references] != 0) {
+ num_api_references++;
+ }
+ }
+ }
+ return num_api_references;
+}
+int GetNumApiReferences(LocalIsolate* isolate) { return 0; }
+#endif
+} // namespace
+
+template <typename IsolateT>
+Deserializer<IsolateT>::Deserializer(IsolateT* isolate,
+ base::Vector<const byte> payload,
+ uint32_t magic_number,
+ bool deserializing_user_code,
+ bool can_rehash)
: isolate_(isolate),
source_(payload),
magic_number_(magic_number),
deserializing_user_code_(deserializing_user_code),
can_rehash_(can_rehash) {
DCHECK_NOT_NULL(isolate);
- isolate_->RegisterDeserializerStarted();
+ isolate->RegisterDeserializerStarted();
// We start the indices here at 1, so that we can distinguish between an
// actual index and a nullptr (serialized as kNullRefSentinel) in a
@@ -223,30 +254,21 @@ Deserializer::Deserializer(Isolate* isolate, base::Vector<const byte> payload,
backing_stores_.push_back({});
#ifdef DEBUG
- num_api_references_ = 0;
- // The read-only deserializer is run by read-only heap set-up before the
- // heap is fully set up. External reference table relies on a few parts of
- // this set-up (like old-space), so it may be uninitialized at this point.
- if (isolate->isolate_data()->external_reference_table()->is_initialized()) {
- // Count the number of external references registered through the API.
- if (isolate->api_external_references() != nullptr) {
- while (isolate->api_external_references()[num_api_references_] != 0) {
- num_api_references_++;
- }
- }
- }
+ num_api_references_ = GetNumApiReferences(isolate);
#endif // DEBUG
CHECK_EQ(magic_number_, SerializedData::kMagicNumber);
}
-void Deserializer::Rehash() {
+template <typename IsolateT>
+void Deserializer<IsolateT>::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
for (Handle<HeapObject> item : to_rehash_) {
item->RehashBasedOnMap(isolate());
}
}
-Deserializer::~Deserializer() {
+template <typename IsolateT>
+Deserializer<IsolateT>::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
if (source_.position() == 0) return;
@@ -261,24 +283,30 @@ Deserializer::~Deserializer() {
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
-void Deserializer::VisitRootPointers(Root root, const char* description,
- FullObjectSlot start, FullObjectSlot end) {
+template <typename IsolateT>
+void Deserializer<IsolateT>::VisitRootPointers(Root root,
+ const char* description,
+ FullObjectSlot start,
+ FullObjectSlot end) {
ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end));
}
-void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+template <typename IsolateT>
+void Deserializer<IsolateT>::Synchronize(VisitorSynchronization::SyncTag tag) {
static const byte expected = kSynchronize;
CHECK_EQ(expected, source_.Get());
}
-void Deserializer::DeserializeDeferredObjects() {
+template <typename IsolateT>
+void Deserializer<IsolateT>::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
SnapshotSpace space = NewObject::Decode(code);
ReadObject(space);
}
}
-void Deserializer::LogNewMapEvents() {
+template <typename IsolateT>
+void Deserializer<IsolateT>::LogNewMapEvents() {
DisallowGarbageCollection no_gc;
for (Handle<Map> map : new_maps_) {
DCHECK(FLAG_log_maps);
@@ -287,7 +315,8 @@ void Deserializer::LogNewMapEvents() {
}
}
-void Deserializer::WeakenDescriptorArrays() {
+template <typename IsolateT>
+void Deserializer<IsolateT>::WeakenDescriptorArrays() {
DisallowGarbageCollection no_gc;
for (Handle<DescriptorArray> descriptor_array : new_descriptor_arrays_) {
DCHECK(descriptor_array->IsStrongDescriptorArray());
@@ -297,36 +326,66 @@ void Deserializer::WeakenDescriptorArrays() {
}
}
-void Deserializer::LogScriptEvents(Script script) {
+template <typename IsolateT>
+void Deserializer<IsolateT>::LogScriptEvents(Script script) {
DisallowGarbageCollection no_gc;
LOG(isolate(),
ScriptEvent(Logger::ScriptEventType::kDeserialize, script.id()));
LOG(isolate(), ScriptDetails(script));
}
-StringTableInsertionKey::StringTableInsertionKey(Handle<String> string)
- : StringTableKey(ComputeRawHashField(*string), string->length()),
+namespace {
+template <typename IsolateT>
+uint32_t ComputeRawHashField(IsolateT* isolate, String string) {
+ // Make sure raw_hash_field() is computed.
+ string.EnsureHash(SharedStringAccessGuardIfNeeded(isolate));
+ return string.raw_hash_field();
+}
+} // namespace
+
+StringTableInsertionKey::StringTableInsertionKey(Isolate* isolate,
+ Handle<String> string)
+ : StringTableKey(ComputeRawHashField(isolate, *string), string->length()),
string_(string) {
DCHECK(string->IsInternalizedString());
}
-bool StringTableInsertionKey::IsMatch(Isolate* isolate, String string) {
- // We want to compare the content of two strings here.
- return string_->SlowEquals(string);
+StringTableInsertionKey::StringTableInsertionKey(LocalIsolate* isolate,
+ Handle<String> string)
+ : StringTableKey(ComputeRawHashField(isolate, *string), string->length()),
+ string_(string) {
+ DCHECK(string->IsInternalizedString());
}
-Handle<String> StringTableInsertionKey::AsHandle(Isolate* isolate) {
- return string_;
+template <typename IsolateT>
+bool StringTableInsertionKey::IsMatch(IsolateT* isolate, String string) {
+ // We want to compare the content of two strings here.
+ return string_->SlowEquals(string, SharedStringAccessGuardIfNeeded(isolate));
}
+template bool StringTableInsertionKey::IsMatch(Isolate* isolate, String string);
+template bool StringTableInsertionKey::IsMatch(LocalIsolate* isolate,
+ String string);
-uint32_t StringTableInsertionKey::ComputeRawHashField(String string) {
- // Make sure raw_hash_field() is computed.
- string.EnsureHash();
- return string.raw_hash_field();
+namespace {
+
+void PostProcessExternalString(Handle<ExternalString> string,
+ Isolate* isolate) {
+ uint32_t index = string->GetResourceRefForDeserialization();
+ Address address =
+ static_cast<Address>(isolate->api_external_references()[index]);
+ string->AllocateExternalPointerEntries(isolate);
+ string->set_address_as_resource(isolate, address);
+ isolate->heap()->UpdateExternalString(*string, 0,
+ string->ExternalPayloadSize());
+ isolate->heap()->RegisterExternalString(*string);
}
-void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
- SnapshotSpace space) {
+} // namespace
+
+template <typename IsolateT>
+void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
+ Handle<HeapObject> obj,
+ SnapshotSpace space) {
DCHECK_EQ(*map, obj->map());
DisallowGarbageCollection no_gc;
InstanceType instance_type = map->instance_type();
@@ -349,10 +408,14 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
if (deserializing_user_code()) {
if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
// Canonicalize the internalized string. If it already exists in the
- // string table, set it to forward to the existing one.
+ // string table, set the string to point to the existing one and patch the
+ // deserialized string handle to point to the existing one.
+ // TODO(leszeks): This handle patching is ugly, consider adding an
+ // explicit internalized string bytecode. Also, the new thin string should
+ // be dead, try immediately freeing it.
Handle<String> string = Handle<String>::cast(obj);
- StringTableInsertionKey key(string);
+ StringTableInsertionKey key(isolate(), string);
Handle<String> result =
isolate()->string_table()->LookupKey(isolate(), &key);
@@ -388,8 +451,8 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
} else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
auto code_data_container = Handle<CodeDataContainer>::cast(obj);
- code_data_container->AllocateExternalPointerEntries(isolate());
- code_data_container->UpdateCodeEntryPoint(isolate(),
+ code_data_container->AllocateExternalPointerEntries(main_thread_isolate());
+ code_data_container->UpdateCodeEntryPoint(main_thread_isolate(),
code_data_container->code());
} else if (InstanceTypeChecker::IsMap(instance_type)) {
if (FLAG_log_maps) {
@@ -406,15 +469,8 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
call_handler_infos_.push_back(Handle<CallHandlerInfo>::cast(obj));
#endif
} else if (InstanceTypeChecker::IsExternalString(instance_type)) {
- Handle<ExternalString> string = Handle<ExternalString>::cast(obj);
- uint32_t index = string->GetResourceRefForDeserialization();
- Address address =
- static_cast<Address>(isolate()->api_external_references()[index]);
- string->AllocateExternalPointerEntries(isolate());
- string->set_address_as_resource(isolate(), address);
- isolate()->heap()->UpdateExternalString(*string, 0,
- string->ExternalPayloadSize());
- isolate()->heap()->RegisterExternalString(*string);
+ PostProcessExternalString(Handle<ExternalString>::cast(obj),
+ main_thread_isolate());
} else if (InstanceTypeChecker::IsJSDataView(instance_type)) {
Handle<JSDataView> data_view = Handle<JSDataView>::cast(obj);
JSArrayBuffer buffer = JSArrayBuffer::cast(data_view->buffer());
@@ -426,18 +482,18 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
// a numbered reference to an already deserialized backing store.
backing_store = backing_stores_[store_index]->buffer_start();
}
- data_view->AllocateExternalPointerEntries(isolate());
+ data_view->AllocateExternalPointerEntries(main_thread_isolate());
data_view->set_data_pointer(
- isolate(),
+ main_thread_isolate(),
reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
} else if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(obj);
// Fixup typed array pointers.
if (typed_array->is_on_heap()) {
Address raw_external_pointer = typed_array->external_pointer_raw();
- typed_array->AllocateExternalPointerEntries(isolate());
+ typed_array->AllocateExternalPointerEntries(main_thread_isolate());
typed_array->SetOnHeapDataPtr(
- isolate(), HeapObject::cast(typed_array->base_pointer()),
+ main_thread_isolate(), HeapObject::cast(typed_array->base_pointer()),
raw_external_pointer);
} else {
// Serializer writes backing store ref as a DataPtr() value.
@@ -447,8 +503,8 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
- typed_array->AllocateExternalPointerEntries(isolate());
- typed_array->SetOffHeapDataPtr(isolate(), start,
+ typed_array->AllocateExternalPointerEntries(main_thread_isolate());
+ typed_array->SetOffHeapDataPtr(main_thread_isolate(), start,
typed_array->byte_offset());
}
} else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
@@ -457,8 +513,8 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
new_off_heap_array_buffers_.push_back(buffer);
} else {
- buffer->AllocateExternalPointerEntries(isolate());
- buffer->set_backing_store(isolate(), nullptr);
+ buffer->AllocateExternalPointerEntries(main_thread_isolate());
+ buffer->set_backing_store(main_thread_isolate(), nullptr);
}
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
@@ -471,7 +527,7 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
new_descriptor_arrays_.push_back(descriptors);
} else if (InstanceTypeChecker::IsNativeContext(instance_type)) {
Handle<NativeContext> context = Handle<NativeContext>::cast(obj);
- context->AllocateExternalPointerEntries(isolate());
+ context->AllocateExternalPointerEntries(main_thread_isolate());
}
// Check alignment.
@@ -479,7 +535,8 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
HeapObject::RequiredAlignment(*map)));
}
-HeapObjectReferenceType Deserializer::GetAndResetNextReferenceType() {
+template <typename IsolateT>
+HeapObjectReferenceType Deserializer<IsolateT>::GetAndResetNextReferenceType() {
HeapObjectReferenceType type = next_reference_is_weak_
? HeapObjectReferenceType::WEAK
: HeapObjectReferenceType::STRONG;
@@ -487,7 +544,8 @@ HeapObjectReferenceType Deserializer::GetAndResetNextReferenceType() {
return type;
}
-Handle<HeapObject> Deserializer::GetBackReferencedObject() {
+template <typename IsolateT>
+Handle<HeapObject> Deserializer<IsolateT>::GetBackReferencedObject() {
Handle<HeapObject> obj = back_refs_[source_.GetInt()];
// We don't allow ThinStrings in backreferences -- if internalization produces
@@ -499,15 +557,17 @@ Handle<HeapObject> Deserializer::GetBackReferencedObject() {
return obj;
}
-Handle<HeapObject> Deserializer::ReadObject() {
+template <typename IsolateT>
+Handle<HeapObject> Deserializer<IsolateT>::ReadObject() {
Handle<HeapObject> ret;
- CHECK_EQ(ReadSingleBytecodeData(source_.Get(),
- SlotAccessorForHandle(&ret, isolate())),
+ CHECK_EQ(ReadSingleBytecodeData(
+ source_.Get(), SlotAccessorForHandle<IsolateT>(&ret, isolate())),
1);
return ret;
}
-Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
+template <typename IsolateT>
+Handle<HeapObject> Deserializer<IsolateT>::ReadObject(SnapshotSpace space) {
const int size_in_tagged = source_.GetInt();
const int size_in_bytes = size_in_tagged * kTaggedSize;
@@ -555,8 +615,8 @@ Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
JSObject js_obj = JSObject::cast(raw_obj);
for (int i = 0; i < js_obj.GetEmbedderFieldCount(); ++i) {
void* pointer;
- CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointerSafe(isolate(),
- &pointer));
+ CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointerSafe(
+ main_thread_isolate(), &pointer));
CHECK_NULL(pointer);
}
} else if (raw_obj.IsEmbedderDataArray()) {
@@ -565,7 +625,7 @@ Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
EmbedderDataSlot end(array, array.length());
for (EmbedderDataSlot slot = start; slot < end; ++slot) {
void* pointer;
- CHECK(slot.ToAlignedPointerSafe(isolate(), &pointer));
+ CHECK(slot.ToAlignedPointerSafe(main_thread_isolate(), &pointer));
CHECK_NULL(pointer);
}
}
@@ -577,8 +637,6 @@ Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
ReadData(obj, 1, size_in_tagged);
PostProcessNewObject(map, obj, space);
- DCHECK(!obj->IsThinString(isolate()));
-
#ifdef DEBUG
if (obj->IsCode()) {
DCHECK(space == SnapshotSpace::kCode ||
@@ -591,7 +649,8 @@ Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
return obj;
}
-Handle<HeapObject> Deserializer::ReadMetaMap() {
+template <typename IsolateT>
+Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() {
const SnapshotSpace space = SnapshotSpace::kReadOnlyHeap;
const int size_in_bytes = Map::kSize;
const int size_in_tagged = size_in_bytes / kTaggedSize;
@@ -613,12 +672,20 @@ Handle<HeapObject> Deserializer::ReadMetaMap() {
return obj;
}
-class Deserializer::RelocInfoVisitor {
+class DeserializerRelocInfoVisitor {
public:
- RelocInfoVisitor(Deserializer* deserializer,
- const std::vector<Handle<HeapObject>>* objects)
+ DeserializerRelocInfoVisitor(Deserializer<Isolate>* deserializer,
+ const std::vector<Handle<HeapObject>>* objects)
: deserializer_(deserializer), objects_(objects), current_object_(0) {}
- ~RelocInfoVisitor() { DCHECK_EQ(current_object_, objects_->size()); }
+
+ DeserializerRelocInfoVisitor(Deserializer<LocalIsolate>* deserializer,
+ const std::vector<Handle<HeapObject>>* objects) {
+ UNREACHABLE();
+ }
+
+ ~DeserializerRelocInfoVisitor() {
+ DCHECK_EQ(current_object_, objects_->size());
+ }
void VisitCodeTarget(Code host, RelocInfo* rinfo);
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
@@ -631,34 +698,34 @@ class Deserializer::RelocInfoVisitor {
Isolate* isolate() { return deserializer_->isolate(); }
SnapshotByteSource& source() { return deserializer_->source_; }
- Deserializer* deserializer_;
+ Deserializer<Isolate>* deserializer_;
const std::vector<Handle<HeapObject>>* objects_;
int current_object_;
};
-void Deserializer::RelocInfoVisitor::VisitCodeTarget(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitCodeTarget(Code host,
+ RelocInfo* rinfo) {
HeapObject object = *objects_->at(current_object_++);
rinfo->set_target_address(Code::cast(object).raw_instruction_start());
}
-void Deserializer::RelocInfoVisitor::VisitEmbeddedPointer(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitEmbeddedPointer(Code host,
+ RelocInfo* rinfo) {
HeapObject object = *objects_->at(current_object_++);
// Embedded object reference must be a strong one.
rinfo->set_target_object(isolate()->heap(), object);
}
-void Deserializer::RelocInfoVisitor::VisitRuntimeEntry(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitRuntimeEntry(Code host,
+ RelocInfo* rinfo) {
// We no longer serialize code that contains runtime entries.
UNREACHABLE();
}
-void Deserializer::RelocInfoVisitor::VisitExternalReference(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitExternalReference(Code host,
+ RelocInfo* rinfo) {
byte data = source().Get();
- CHECK_EQ(data, kExternalReference);
+ CHECK_EQ(data, Deserializer<Isolate>::kExternalReference);
Address address = deserializer_->ReadExternalReferenceCase();
@@ -671,10 +738,10 @@ void Deserializer::RelocInfoVisitor::VisitExternalReference(Code host,
}
}
-void Deserializer::RelocInfoVisitor::VisitInternalReference(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitInternalReference(Code host,
+ RelocInfo* rinfo) {
byte data = source().Get();
- CHECK_EQ(data, kInternalReference);
+ CHECK_EQ(data, Deserializer<Isolate>::kInternalReference);
// Internal reference target is encoded as an offset from code entry.
int target_offset = source().GetInt();
@@ -689,10 +756,10 @@ void Deserializer::RelocInfoVisitor::VisitInternalReference(Code host,
rinfo->pc(), target, rinfo->rmode());
}
-void Deserializer::RelocInfoVisitor::VisitOffHeapTarget(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitOffHeapTarget(Code host,
+ RelocInfo* rinfo) {
byte data = source().Get();
- CHECK_EQ(data, kOffHeapTarget);
+ CHECK_EQ(data, Deserializer<Isolate>::kOffHeapTarget);
Builtin builtin = Builtins::FromInt(source().GetInt());
@@ -711,9 +778,10 @@ void Deserializer::RelocInfoVisitor::VisitOffHeapTarget(Code host,
}
}
+template <typename IsolateT>
template <typename SlotAccessor>
-int Deserializer::ReadRepeatedObject(SlotAccessor slot_accessor,
- int repeat_count) {
+int Deserializer<IsolateT>::ReadRepeatedObject(SlotAccessor slot_accessor,
+ int repeat_count) {
CHECK_LE(2, repeat_count);
Handle<HeapObject> heap_object = ReadObject();
@@ -765,8 +833,10 @@ constexpr byte VerifyBytecodeCount(byte bytecode) {
: case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap) \
: case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kReadOnlyHeap)
-void Deserializer::ReadData(Handle<HeapObject> object, int start_slot_index,
- int end_slot_index) {
+template <typename IsolateT>
+void Deserializer<IsolateT>::ReadData(Handle<HeapObject> object,
+ int start_slot_index,
+ int end_slot_index) {
int current = start_slot_index;
while (current < end_slot_index) {
byte data = source_.Get();
@@ -776,8 +846,9 @@ void Deserializer::ReadData(Handle<HeapObject> object, int start_slot_index,
CHECK_EQ(current, end_slot_index);
}
-void Deserializer::ReadData(FullMaybeObjectSlot start,
- FullMaybeObjectSlot end) {
+template <typename IsolateT>
+void Deserializer<IsolateT>::ReadData(FullMaybeObjectSlot start,
+ FullMaybeObjectSlot end) {
FullMaybeObjectSlot current = start;
while (current < end) {
byte data = source_.Get();
@@ -786,9 +857,10 @@ void Deserializer::ReadData(FullMaybeObjectSlot start,
CHECK_EQ(current, end);
}
+template <typename IsolateT>
template <typename SlotAccessor>
-int Deserializer::ReadSingleBytecodeData(byte data,
- SlotAccessor slot_accessor) {
+int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
+ SlotAccessor slot_accessor) {
using TSlot = decltype(slot_accessor.slot());
switch (data) {
@@ -841,8 +913,8 @@ int Deserializer::ReadSingleBytecodeData(byte data,
int cache_index = source_.GetInt();
// TODO(leszeks): Could we use the address of the startup_object_cache
// entry as a Handle backing?
- HeapObject heap_object =
- HeapObject::cast(isolate()->startup_object_cache()->at(cache_index));
+ HeapObject heap_object = HeapObject::cast(
+ main_thread_isolate()->startup_object_cache()->at(cache_index));
return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
@@ -989,7 +1061,7 @@ int Deserializer::ReadSingleBytecodeData(byte data,
DisallowGarbageCollection no_gc;
Code code = Code::cast(*slot_accessor.object());
- RelocInfoVisitor visitor(this, &preserialized_objects);
+ DeserializerRelocInfoVisitor visitor(this, &preserialized_objects);
for (RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
!it.done(); it.next()) {
it.rinfo()->Visit(&visitor);
@@ -1007,11 +1079,10 @@ int Deserializer::ReadSingleBytecodeData(byte data,
}
case kOffHeapBackingStore: {
- AlwaysAllocateScope scope(isolate()->heap());
int byte_length = source_.GetInt();
- std::unique_ptr<BackingStore> backing_store =
- BackingStore::Allocate(isolate(), byte_length, SharedFlag::kNotShared,
- InitializedFlag::kUninitialized);
+ std::unique_ptr<BackingStore> backing_store = BackingStore::Allocate(
+ main_thread_isolate(), byte_length, SharedFlag::kNotShared,
+ InitializedFlag::kUninitialized);
CHECK_NOT_NULL(backing_store);
source_.CopyRaw(backing_store->buffer_start(), byte_length);
backing_stores_.push_back(std::move(backing_store));
@@ -1022,11 +1093,11 @@ int Deserializer::ReadSingleBytecodeData(byte data,
case kApiReference: {
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
Address address;
- if (isolate()->api_external_references()) {
+ if (main_thread_isolate()->api_external_references()) {
DCHECK_WITH_MSG(reference_id < num_api_references_,
"too few external references provided through the API");
address = static_cast<Address>(
- isolate()->api_external_references()[reference_id]);
+ main_thread_isolate()->api_external_references()[reference_id]);
} else {
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
@@ -1117,9 +1188,11 @@ int Deserializer::ReadSingleBytecodeData(byte data,
#undef CASE_R2
#undef CASE_R1
-Address Deserializer::ReadExternalReferenceCase() {
+template <typename IsolateT>
+Address Deserializer<IsolateT>::ReadExternalReferenceCase() {
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- return isolate()->external_reference_table()->address(reference_id);
+ return main_thread_isolate()->external_reference_table()->address(
+ reference_id);
}
namespace {
@@ -1137,8 +1210,9 @@ AllocationType SpaceToType(SnapshotSpace space) {
}
} // namespace
-HeapObject Deserializer::Allocate(SnapshotSpace space, int size,
- AllocationAlignment alignment) {
+template <typename IsolateT>
+HeapObject Deserializer<IsolateT>::Allocate(SnapshotSpace space, int size,
+ AllocationAlignment alignment) {
#ifdef DEBUG
if (!previous_allocation_obj_.is_null()) {
// Make sure that the previous object is initialized sufficiently to
@@ -1148,8 +1222,8 @@ HeapObject Deserializer::Allocate(SnapshotSpace space, int size,
}
#endif
- HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
- size, SpaceToType(space), AllocationOrigin::kRuntime, alignment);
+ HeapObject obj = HeapObject::FromAddress(isolate()->heap()->AllocateRawOrFail(
+ size, SpaceToType(space), AllocationOrigin::kRuntime, alignment));
#ifdef DEBUG
previous_allocation_obj_ = handle(obj, isolate());
@@ -1159,5 +1233,9 @@ HeapObject Deserializer::Allocate(SnapshotSpace space, int size,
return obj;
}
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Deserializer<Isolate>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Deserializer<LocalIsolate>;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 6824d755d6..62ccfe3cae 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -8,7 +8,10 @@
#include <utility>
#include <vector>
+#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/execution/local-isolate.h"
#include "src/objects/allocation-site.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/backing-store.h"
@@ -39,7 +42,8 @@ class Object;
#endif
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
+template <typename IsolateT>
+class Deserializer : public SerializerDeserializer {
public:
~Deserializer() override;
Deserializer(const Deserializer&) = delete;
@@ -49,7 +53,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
protected:
// Create a deserializer from a snapshot byte source.
- Deserializer(Isolate* isolate, base::Vector<const byte> payload,
+ Deserializer(IsolateT* isolate, base::Vector<const byte> payload,
uint32_t magic_number, bool deserializing_user_code,
bool can_rehash);
@@ -79,7 +83,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
CHECK_EQ(new_off_heap_array_buffers().size(), 0);
}
- Isolate* isolate() const { return isolate_; }
+ IsolateT* isolate() const { return isolate_; }
+
+ Isolate* main_thread_isolate() const { return isolate_->AsIsolate(); }
SnapshotByteSource* source() { return &source_; }
const std::vector<Handle<AllocationSite>>& new_allocation_sites() const {
@@ -120,7 +126,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
Handle<HeapObject> ReadObject();
private:
- class RelocInfoVisitor;
+ friend class DeserializerRelocInfoVisitor;
// A circular queue of hot objects. This is added to in the same order as in
// Serializer::HotObjectsList, but this stores the objects as a vector of
// existing handles. This allows us to add Handles to the queue without having
@@ -196,7 +202,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
AllocationAlignment alignment);
// Cached current isolate.
- Isolate* isolate_;
+ IsolateT* isolate_;
// Objects from the attached object descriptions in the serialized user code.
std::vector<Handle<HeapObject>> attached_objects_;
@@ -253,19 +259,27 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
#endif // DEBUG
};
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Deserializer<Isolate>;
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Deserializer<LocalIsolate>;
+
// Used to insert a deserialized internalized string into the string table.
class StringTableInsertionKey final : public StringTableKey {
public:
- explicit StringTableInsertionKey(Handle<String> string);
+ explicit StringTableInsertionKey(Isolate* isolate, Handle<String> string);
+ explicit StringTableInsertionKey(LocalIsolate* isolate,
+ Handle<String> string);
- bool IsMatch(Isolate* isolate, String string);
+ template <typename IsolateT>
+ bool IsMatch(IsolateT* isolate, String string);
- V8_WARN_UNUSED_RESULT Handle<String> AsHandle(Isolate* isolate);
- V8_WARN_UNUSED_RESULT Handle<String> AsHandle(LocalIsolate* isolate);
+ template <typename IsolateT>
+ V8_WARN_UNUSED_RESULT Handle<String> AsHandle(IsolateT* isolate) {
+ return string_;
+ }
private:
- uint32_t ComputeRawHashField(String string);
-
Handle<String> string_;
DISALLOW_GARBAGE_COLLECTION(no_gc)
};
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 4065e4a7eb..41cd9dbca0 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -29,7 +29,7 @@ const char* DirectiveAsString(DataDirective directive) {
} // namespace
void PlatformEmbeddedFileWriterAIX::SectionText() {
- fprintf(fp_, ".csect .text[PR]\n");
+ fprintf(fp_, ".csect [GL], 5\n");
}
void PlatformEmbeddedFileWriterAIX::SectionData() {
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index d5ce8cc6e9..dd1d30af15 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -7,6 +7,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/local-factory-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects.h"
@@ -34,14 +35,6 @@ ObjectDeserializer::DeserializeSharedFunctionInfo(
: MaybeHandle<SharedFunctionInfo>();
}
-MaybeHandle<SharedFunctionInfo>
-ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
- LocalIsolate* isolate, const SerializedCodeData* data,
- Handle<String> source) {
- // TODO(leszeks): Add LocalHeap support to deserializer
- UNREACHABLE();
-}
-
MaybeHandle<HeapObject> ObjectDeserializer::Deserialize() {
DCHECK(deserializing_user_code());
HandleScope scope(isolate());
@@ -102,5 +95,56 @@ void ObjectDeserializer::LinkAllocationSites() {
}
}
+OffThreadObjectDeserializer::OffThreadObjectDeserializer(
+ LocalIsolate* isolate, const SerializedCodeData* data)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), true,
+ false) {}
+
+MaybeHandle<SharedFunctionInfo>
+OffThreadObjectDeserializer::DeserializeSharedFunctionInfo(
+ LocalIsolate* isolate, const SerializedCodeData* data,
+ std::vector<Handle<Script>>* deserialized_scripts) {
+ OffThreadObjectDeserializer d(isolate, data);
+
+ // Attach the empty string as the source.
+ d.AddAttachedObject(isolate->factory()->empty_string());
+
+ Handle<HeapObject> result;
+ if (!d.Deserialize(deserialized_scripts).ToHandle(&result)) {
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+ return Handle<SharedFunctionInfo>::cast(result);
+}
+
+MaybeHandle<HeapObject> OffThreadObjectDeserializer::Deserialize(
+ std::vector<Handle<Script>>* deserialized_scripts) {
+ DCHECK(deserializing_user_code());
+ LocalHandleScope scope(isolate());
+ Handle<HeapObject> result;
+ {
+ result = ReadObject();
+ DeserializeDeferredObjects();
+ CHECK(new_code_objects().empty());
+ CHECK(new_allocation_sites().empty());
+ CHECK(new_maps().empty());
+ WeakenDescriptorArrays();
+ }
+
+ Rehash();
+ CHECK(new_off_heap_array_buffers().empty());
+
+ // TODO(leszeks): Figure out a better way of dealing with scripts.
+ CHECK_EQ(new_scripts().size(), 1);
+ for (Handle<Script> script : new_scripts()) {
+ // Assign a new script id to avoid collision.
+ script->set_id(isolate()->GetNextScriptId());
+ LogScriptEvents(*script);
+ deserialized_scripts->push_back(
+ isolate()->heap()->NewPersistentHandle(script));
+ }
+
+ return scope.CloseAndEscape(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/object-deserializer.h b/deps/v8/src/snapshot/object-deserializer.h
index 6ba79147f5..ad1a1b523d 100644
--- a/deps/v8/src/snapshot/object-deserializer.h
+++ b/deps/v8/src/snapshot/object-deserializer.h
@@ -14,13 +14,10 @@ class SerializedCodeData;
class SharedFunctionInfo;
// Deserializes the object graph rooted at a given object.
-class ObjectDeserializer final : public Deserializer {
+class ObjectDeserializer final : public Deserializer<Isolate> {
public:
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
- static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfoOffThread(
- LocalIsolate* isolate, const SerializedCodeData* data,
- Handle<String> source);
private:
explicit ObjectDeserializer(Isolate* isolate, const SerializedCodeData* data);
@@ -32,6 +29,22 @@ class ObjectDeserializer final : public Deserializer {
void CommitPostProcessedObjects();
};
+// Deserializes the object graph rooted at a given object.
+class OffThreadObjectDeserializer final : public Deserializer<LocalIsolate> {
+ public:
+ static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
+ LocalIsolate* isolate, const SerializedCodeData* data,
+ std::vector<Handle<Script>>* deserialized_scripts);
+
+ private:
+ explicit OffThreadObjectDeserializer(LocalIsolate* isolate,
+ const SerializedCodeData* data);
+
+ // Deserialize an object graph. Fail gracefully.
+ MaybeHandle<HeapObject> Deserialize(
+ std::vector<Handle<Script>>* deserialized_scripts);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/read-only-deserializer.h b/deps/v8/src/snapshot/read-only-deserializer.h
index c546c234ff..2d9e8c4d82 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.h
+++ b/deps/v8/src/snapshot/read-only-deserializer.h
@@ -14,7 +14,7 @@ namespace internal {
// Deserializes the read-only blob, creating the read-only roots and the
// Read-only object cache used by the other deserializers.
-class ReadOnlyDeserializer final : public Deserializer {
+class ReadOnlyDeserializer final : public Deserializer<Isolate> {
public:
explicit ReadOnlyDeserializer(Isolate* isolate, const SnapshotData* data,
bool can_rehash)
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 1bd9c0fabe..68fb1a01a6 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -890,6 +890,26 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
}
}
+void Serializer::ObjectSerializer::VisitCodePointer(HeapObject host,
+ CodeObjectSlot slot) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // A version of VisitPointers() customized for CodeObjectSlot.
+ HandleScope scope(isolate());
+ DisallowGarbageCollection no_gc;
+
+ // TODO(v8:11880): support external code space.
+ PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
+ Object contents = slot.load(code_cage_base);
+ DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(contents.ptr()));
+ DCHECK(contents.IsCode());
+
+ Handle<HeapObject> obj = handle(HeapObject::cast(contents), isolate());
+ if (!serializer_->SerializePendingObject(obj)) {
+ serializer_->SerializeObject(obj);
+ }
+ bytes_processed_so_far_ += kTaggedSize;
+}
+
void Serializer::ObjectSerializer::OutputExternalReference(Address target,
int target_size,
bool sandboxify) {
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index d040b4848f..82b1d8ed1e 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -425,6 +425,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
ObjectSlot end) override;
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override;
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override;
void VisitEmbeddedPointer(Code host, RelocInfo* target) override;
void VisitExternalReference(Foreign host, Address* p) override;
void VisitExternalReference(Code host, RelocInfo* rinfo) override;
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index dcf7905eee..faeacee8ca 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -85,7 +85,7 @@ void StartupDeserializer::DeserializeStringTable() {
// TODO(leszeks): Consider pre-sizing the string table.
for (int i = 0; i < string_table_size; ++i) {
Handle<String> string = Handle<String>::cast(ReadObject());
- StringTableInsertionKey key(string);
+ StringTableInsertionKey key(isolate(), string);
Handle<String> result =
isolate()->string_table()->LookupKey(isolate(), &key);
USE(result);
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index f744efc193..e6443af658 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
-class StartupDeserializer final : public Deserializer {
+class StartupDeserializer final : public Deserializer<Isolate> {
public:
explicit StartupDeserializer(Isolate* isolate,
const SnapshotData* startup_data,
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 67e361963d..66140a1455 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -365,7 +365,7 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
if (object_print_mode_ == kPrintObjectConcise) return;
DebugObjectCache* debug_object_cache =
isolate->string_stream_debug_object_cache();
- Add("==== Key ============================================\n\n");
+ Add("-- ObjectCacheKey --\n\n");
for (size_t i = 0; i < debug_object_cache->size(); i++) {
HeapObject printee = *(*debug_object_cache)[i];
Add(" #%d# %p: ", static_cast<int>(i),
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index bb0ae2b69e..57ff3ef4e1 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -88,7 +88,8 @@ static const char* const ANNOTATION_NO_VERIFIER = "@noVerifier";
static const char* const ANNOTATION_ABSTRACT = "@abstract";
static const char* const ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT =
"@hasSameInstanceTypeAsParent";
-static const char* const ANNOTATION_GENERATE_CPP_CLASS = "@generateCppClass";
+static const char* const ANNOTATION_DO_NOT_GENERATE_CPP_CLASS =
+ "@doNotGenerateCppClass";
static const char* const ANNOTATION_CUSTOM_MAP = "@customMap";
static const char* const ANNOTATION_CUSTOM_CPP_CLASS = "@customCppClass";
static const char* const ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT =
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 5d0695f2ad..8320b62337 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -889,7 +889,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
child_results,
{ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
- ANNOTATION_GENERATE_CPP_CLASS, ANNOTATION_CUSTOM_CPP_CLASS,
+ ANNOTATION_DO_NOT_GENERATE_CPP_CLASS, ANNOTATION_CUSTOM_CPP_CLASS,
ANNOTATION_CUSTOM_MAP, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
ANNOTATION_EXPORT, ANNOTATION_DO_NOT_GENERATE_CAST,
ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
@@ -907,9 +907,8 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (annotations.Contains(ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT)) {
flags |= ClassFlag::kHasSameInstanceTypeAsParent;
}
- if (annotations.Contains(ANNOTATION_GENERATE_CPP_CLASS)) {
- flags |= ClassFlag::kGenerateCppClassDefinitions;
- }
+ bool do_not_generate_cpp_class =
+ annotations.Contains(ANNOTATION_DO_NOT_GENERATE_CPP_CLASS);
if (annotations.Contains(ANNOTATION_CUSTOM_CPP_CLASS)) {
flags |= ClassFlag::kCustomCppClass;
}
@@ -964,6 +963,14 @@ base::Optional<ParseResult> MakeClassDeclaration(
flags |= ClassFlag::kUndefinedLayout;
}
+ if (is_extern && body.has_value()) {
+ if (!do_not_generate_cpp_class) {
+ flags |= ClassFlag::kGenerateCppClassDefinitions;
+ }
+ } else if (do_not_generate_cpp_class) {
+ Lint("Annotation @doNotGenerateCppClass has no effect");
+ }
+
// Filter to only include fields that should be present based on decoration.
std::vector<ClassFieldExpression> fields;
std::copy_if(
diff --git a/deps/v8/src/trap-handler/handler-inside-posix.cc b/deps/v8/src/trap-handler/handler-inside-posix.cc
index a75b5143fa..173e0ba3cd 100644
--- a/deps/v8/src/trap-handler/handler-inside-posix.cc
+++ b/deps/v8/src/trap-handler/handler-inside-posix.cc
@@ -39,10 +39,24 @@
#include "src/trap-handler/trap-handler-internal.h"
#include "src/trap-handler/trap-handler.h"
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+#include "src/trap-handler/trap-handler-simulator.h"
+#endif
+
namespace v8 {
namespace internal {
namespace trap_handler {
+#if V8_OS_LINUX
+#define CONTEXT_REG(reg, REG) &uc->uc_mcontext.gregs[REG_##REG]
+#elif V8_OS_MACOSX
+#define CONTEXT_REG(reg, REG) &uc->uc_mcontext->__ss.__##reg
+#elif V8_OS_FREEBSD
+#define CONTEXT_REG(reg, REG) &uc->uc_mcontext.mc_##reg
+#else
+#error "Unsupported platform."
+#endif
+
bool IsKernelGeneratedSignal(siginfo_t* info) {
// On macOS, only `info->si_code > 0` is relevant, because macOS leaves
// si_code at its default of 0 for signals that don’t originate in hardware.
@@ -72,11 +86,17 @@ class UnmaskOobSignalScope {
sigset_t old_mask_;
};
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+// This is the address where we continue on a failed "ProbeMemory". It's defined
+// in "handler-outside-simulators.cc".
+extern "C" char v8_probe_memory_continuation[];
+#endif // V8_TRAP_HANDLER_VIA_SIMULATOR
+
bool TryHandleSignal(int signum, siginfo_t* info, void* context) {
// Ensure the faulting thread was actually running Wasm code. This should be
- // the first check in the trap handler to guarantee that the IsThreadInWasm
- // flag is only set in wasm code. Otherwise a later signal handler is executed
- // with the flag set.
+ // the first check in the trap handler to guarantee that the
+ // g_thread_in_wasm_code flag is only set in wasm code. Otherwise a later
+ // signal handler is executed with the flag set.
if (!g_thread_in_wasm_code) return false;
// Clear g_thread_in_wasm_code, primarily to protect against nested faults.
@@ -102,23 +122,38 @@ bool TryHandleSignal(int signum, siginfo_t* info, void* context) {
UnmaskOobSignalScope unmask_oob_signal;
ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
-#if V8_OS_LINUX && V8_TARGET_ARCH_X64
- auto* context_ip = &uc->uc_mcontext.gregs[REG_RIP];
-#elif V8_OS_MACOSX && V8_TARGET_ARCH_ARM64
- auto* context_ip = &uc->uc_mcontext->__ss.__pc;
-#elif V8_OS_MACOSX && V8_TARGET_ARCH_X64
- auto* context_ip = &uc->uc_mcontext->__ss.__rip;
-#elif V8_OS_FREEBSD && V8_TARGET_ARCH_X64
- auto* context_ip = &uc->uc_mcontext.mc_rip;
+#if V8_HOST_ARCH_X64
+ auto* context_ip = CONTEXT_REG(rip, RIP);
+#elif V8_HOST_ARCH_ARM64
+ auto* context_ip = CONTEXT_REG(pc, PC);
#else
-#error Unsupported platform
+#error "Unsupported architecture."
#endif
+
uintptr_t fault_addr = *context_ip;
uintptr_t landing_pad = 0;
+
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+ // Only handle signals triggered by the load in {ProbeMemory}.
+ if (fault_addr != reinterpret_cast<uintptr_t>(&ProbeMemory)) {
+ return false;
+ }
+
+ // The simulated ip will be in the second parameter register (%rsi).
+ auto* simulated_ip_reg = CONTEXT_REG(rsi, RSI);
+ if (!TryFindLandingPad(*simulated_ip_reg, &landing_pad)) return false;
+ TH_DCHECK(landing_pad != 0);
+
+ auto* return_reg = CONTEXT_REG(rax, RAX);
+ *return_reg = landing_pad;
+ // Continue at the memory probing continuation.
+ *context_ip = reinterpret_cast<uintptr_t>(&v8_probe_memory_continuation);
+#else
if (!TryFindLandingPad(fault_addr, &landing_pad)) return false;
// Tell the caller to return to the landing pad.
*context_ip = landing_pad;
+#endif
}
// We will return to wasm code, so restore the g_thread_in_wasm_code flag.
// This should only be done once the signal is blocked again (outside the
diff --git a/deps/v8/src/trap-handler/handler-outside-simulator.cc b/deps/v8/src/trap-handler/handler-outside-simulator.cc
new file mode 100644
index 0000000000..cc1e20ee21
--- /dev/null
+++ b/deps/v8/src/trap-handler/handler-outside-simulator.cc
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8config.h"
+#include "src/trap-handler/trap-handler-simulator.h"
+
+#if V8_OS_MACOSX
+#define SYMBOL(name) "_" #name
+#else // !V8_OS_MACOSX
+#define SYMBOL(name) #name
+#endif // !V8_OS_MACOSX
+
+// Define the ProbeMemory function declared in trap-handler-simulators.h.
+asm(
+ ".globl " SYMBOL(ProbeMemory) " \n"
+ SYMBOL(ProbeMemory) ": \n"
+ // First parameter (address) passed in %rdi.
+ // The second parameter (pc) is unused here. It is read by the trap handler
+ // instead.
+ " movb (%rdi), %al \n"
+ // Return 0 on success.
+ " xorl %eax, %eax \n"
+ // Place an additional "ret" here instead of falling through to the one
+ // below, because (some) toolchain(s) on Mac set ".subsections_via_symbols",
+ // which can cause the "ret" below to be placed elsewhere. An alternative
+ // prevention would be to add ".alt_entry" (see
+ // https://reviews.llvm.org/D79926), but just adding a "ret" is simpler.
+ " ret \n"
+ ".globl " SYMBOL(v8_probe_memory_continuation) "\n"
+ SYMBOL(v8_probe_memory_continuation) ": \n"
+ // If the trap handler continues here, it wrote the landing pad in %rax.
+ " ret \n");
diff --git a/deps/v8/src/trap-handler/trap-handler-simulator.h b/deps/v8/src/trap-handler/trap-handler-simulator.h
new file mode 100644
index 0000000000..bfceb49697
--- /dev/null
+++ b/deps/v8/src/trap-handler/trap-handler-simulator.h
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRAP_HANDLER_TRAP_HANDLER_SIMULATOR_H_
+#define V8_TRAP_HANDLER_TRAP_HANDLER_SIMULATOR_H_
+
+#include <cstdint>
+
+// This header defines the ProbeMemory function to be used by simulators to
+// trigger a signal at a defined location, before doing an actual memory access.
+
+// This implementation is only usable on an x64 host with non-x64 target (i.e. a
+// simulator build on x64).
+#if (!defined(_M_X64) && !defined(__x86_64__)) || defined(V8_TARGET_ARCH_X64)
+#error "Do only include this file on simulator builds on x64."
+#endif
+
+namespace v8 {
+namespace internal {
+namespace trap_handler {
+
+// Probe a memory address by doing a 1-byte read from the given address. If the
+// address is not readable, this will cause a trap as usual, but the trap
+// handler will recognise the address of the instruction doing the access and
+// treat it specially. It will use the given {pc} to look up the respective
+// landing pad and return to this function to return that landing pad. If {pc}
+// is not registered as a protected instruction, the signal will be propagated
+// as usual.
+// If the read at {address} succeeds, this function returns {0} instead.
+extern "C" uintptr_t ProbeMemory(uintptr_t address, uintptr_t pc);
+
+} // namespace trap_handler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TRAP_HANDLER_TRAP_HANDLER_SIMULATOR_H_
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index a27ea236e7..0b3a6e0a70 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -17,16 +17,19 @@ namespace v8 {
namespace internal {
namespace trap_handler {
-#if V8_TARGET_ARCH_X64 && V8_OS_LINUX && !V8_OS_ANDROID
+// X64 on Linux, Windows, MacOS, FreeBSD.
+#if V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64 && \
+ ((V8_OS_LINUX && !V8_OS_ANDROID) || V8_OS_WIN || V8_OS_MACOSX || \
+ V8_OS_FREEBSD)
#define V8_TRAP_HANDLER_SUPPORTED true
-#elif V8_TARGET_ARCH_X64 && V8_OS_WIN
+// Arm64 (non-simulator) on Mac.
+#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_ARM64 && V8_OS_MACOSX
#define V8_TRAP_HANDLER_SUPPORTED true
-#elif V8_TARGET_ARCH_X64 && V8_OS_MACOSX
-#define V8_TRAP_HANDLER_SUPPORTED true
-#elif V8_TARGET_ARCH_X64 && V8_OS_FREEBSD
-#define V8_TRAP_HANDLER_SUPPORTED true
-#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64 && V8_OS_MACOSX
+// Arm64 simulator on x64 on Linux or Mac.
+#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && (V8_OS_LINUX || V8_OS_MACOSX)
+#define V8_TRAP_HANDLER_VIA_SIMULATOR
#define V8_TRAP_HANDLER_SUPPORTED true
+// Everything else is unsupported.
#else
#define V8_TRAP_HANDLER_SUPPORTED false
#endif
@@ -150,7 +153,7 @@ inline void ClearThreadInWasm() {
bool RegisterDefaultTrapHandler();
TH_EXPORT_PRIVATE void RemoveTrapHandler();
-size_t GetRecoveredTrapCount();
+TH_EXPORT_PRIVATE size_t GetRecoveredTrapCount();
} // namespace trap_handler
} // namespace internal
diff --git a/deps/v8/src/utils/v8dll-main.cc b/deps/v8/src/utils/v8dll-main.cc
index 6d7f390c8f..6b484cfc8e 100644
--- a/deps/v8/src/utils/v8dll-main.cc
+++ b/deps/v8/src/utils/v8dll-main.cc
@@ -13,7 +13,7 @@
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL, DWORD dwReason, LPVOID lpvReserved) {
// Do nothing.
- return TRUE;
+ return 1;
}
}
#endif // V8_OS_WIN
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index e2bd64c88f..6e2bacc043 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -10,6 +10,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/baseline/liftoff-register.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -478,51 +479,76 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- // The frame_size includes the frame marker. The frame marker has already been
- // pushed on the stack though, so we don't need to allocate memory for it
- // anymore.
- int frame_size = GetTotalFrameSize() - kSystemPointerSize;
-
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- // Note: This check is only needed for simulator runs, but we run it
- // unconditionally to make sure that the simulator executes the same code
- // that's also executed on native hardware (see https://crbug.com/v8/11041).
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
- return;
- }
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset,
liftoff::kPatchInstructionsRequired);
-#if V8_OS_WIN
- if (frame_size > kStackPageSize) {
- // Generate OOL code (at the end of the function, where the current
- // assembler is pointing) to do the explicit stack limit check (see
- // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
- // visual-studio-6.0/aa227153(v=vs.60)).
- // At the function start, emit a jump to that OOL code (from {offset} to
- // {pc_offset()}).
- int ool_offset = pc_offset() - offset;
- patching_assembler.b(ool_offset - Instruction::kPcLoadDelta);
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.sub(sp, sp, Operand(frame_size));
patching_assembler.PadWithNops();
-
- // Now generate the OOL code.
- AllocateStackSpace(frame_size);
- // Jump back to the start of the function (from {pc_offset()} to {offset +
- // liftoff::kPatchInstructionsRequired * kInstrSize}).
- int func_start_offset =
- offset + liftoff::kPatchInstructionsRequired * kInstrSize - pc_offset();
- b(func_start_offset - Instruction::kPcLoadDelta);
return;
}
-#endif
- patching_assembler.sub(sp, sp, Operand(frame_size));
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+ patching_assembler.b(pc_offset() - offset - Instruction::kPcLoadDelta);
patching_assembler.PadWithNops();
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ UseScratchRegisterScope temps(this);
+ Register stack_limit = temps.Acquire();
+ ldr(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ ldr(stack_limit, MemOperand(stack_limit));
+ add(stack_limit, stack_limit, Operand(frame_size));
+ cmp(sp, stack_limit);
+ b(cs /* higher or same */, &continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ AllocateStackSpace(frame_size);
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ int func_start_offset =
+ offset + liftoff::kPatchInstructionsRequired * kInstrSize;
+ b(func_start_offset - pc_offset() - Instruction::kPcLoadDelta);
}
void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a80c0d3c30..a52370f293 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -303,10 +304,11 @@ void LiftoffAssembler::AlignFrameSize() {
}
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- // The frame_size includes the frame marker. The frame marker has already been
- // pushed on the stack though, so we don't need to allocate memory for it
- // anymore.
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The stack pointer is required to be quadword aligned.
@@ -314,39 +316,66 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
DCHECK_EQ(frame_size, RoundUp(frame_size, kQuadWordSizeInBytes));
DCHECK(IsImmAddSub(frame_size));
-#ifdef USE_SIMULATOR
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
- return;
- }
-#endif
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset, 1);
-#if V8_TARGET_OS_WIN
- if (frame_size > kStackPageSize) {
- // Generate OOL code (at the end of the function, where the current
- // assembler is pointing) to do the explicit stack limit check (see
- // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
- // visual-studio-6.0/aa227153(v=vs.60)).
- // At the function start, emit a jump to that OOL code (from {offset} to
- // {pc_offset()}).
- int ool_offset = pc_offset() - offset;
- patching_assembler.b(ool_offset >> kInstrSizeLog2);
-
- // Now generate the OOL code.
- Claim(frame_size, 1);
- // Jump back to the start of the function (from {pc_offset()} to {offset +
- // kInstrSize}).
- int func_start_offset = offset + kInstrSize - pc_offset();
- b(func_start_offset >> kInstrSizeLog2);
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.PatchSubSp(frame_size);
return;
}
-#endif
- patching_assembler.PatchSubSp(frame_size);
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+ patching_assembler.b((pc_offset() - offset) >> kInstrSizeLog2);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ UseScratchRegisterScope temps(this);
+ Register stack_limit = temps.AcquireX();
+ Ldr(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ldr(stack_limit, MemOperand(stack_limit));
+ Add(stack_limit, stack_limit, Operand(frame_size));
+ Cmp(sp, stack_limit);
+ B(hs /* higher or same */, &continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) Brk(0);
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::Claim}.
+ Claim(frame_size, 1);
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ int func_start_offset = offset + kInstrSize;
+ b((func_start_offset - pc_offset()) >> kInstrSizeLog2);
}
void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index d29963dea1..bb2fed83c6 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -9,8 +9,10 @@
#include "src/codegen/assembler.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/simd-shuffle.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -220,43 +222,83 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- // The frame_size includes the frame marker. The frame marker has already been
- // pushed on the stack though, so we don't need to allocate memory for it
- // anymore.
- int frame_size = GetTotalFrameSize() - kSystemPointerSize;
- DCHECK_EQ(frame_size % kSystemPointerSize, 0);
- // We can't run out of space, just pass anything big enough to not cause the
- // assembler to try to grow the buffer.
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ DCHECK_EQ(0, frame_size % kSystemPointerSize);
+
+ // We can't run out of space when patching, just pass anything big enough to
+ // not cause the assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
Assembler patching_assembler(
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
-#if V8_OS_WIN
- if (frame_size > kStackPageSize) {
- // Generate OOL code (at the end of the function, where the current
- // assembler is pointing) to do the explicit stack limit check (see
- // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
- // visual-studio-6.0/aa227153(v=vs.60)).
- // At the function start, emit a jump to that OOL code (from {offset} to
- // {pc_offset()}).
- int ool_offset = pc_offset() - offset;
- patching_assembler.jmp_rel(ool_offset);
- DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
- patching_assembler.Nop(liftoff::kSubSpSize -
- patching_assembler.pc_offset());
-
- // Now generate the OOL code.
- AllocateStackSpace(frame_size);
- // Jump back to the start of the function (from {pc_offset()} to {offset +
- // kSubSpSize}).
- int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
- jmp_rel(func_start_offset);
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.sub_sp_32(frame_size);
+ DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
return;
}
-#endif
- patching_assembler.sub_sp_32(frame_size);
- DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, framesize)} with a jump to OOL code that does this
+ // "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+ patching_assembler.jmp_rel(pc_offset() - offset);
+ DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
+ patching_assembler.Nop(liftoff::kSubSpSize - patching_assembler.pc_offset());
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ // We do not have a scratch register, so pick any and push it first.
+ Register stack_limit = eax;
+ push(stack_limit);
+ mov(stack_limit,
+ FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ mov(stack_limit, Operand(stack_limit, 0));
+ add(stack_limit, Immediate(frame_size));
+ cmp(esp, stack_limit);
+ pop(stack_limit);
+ j(above_equal, &continuation, Label::kNear);
+ }
+
+ wasm_call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ AllocateStackSpace(frame_size);
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ int func_start_offset = offset + liftoff::kSubSpSize;
+ jmp_rel(func_start_offset - pc_offset());
}
void LiftoffAssembler::FinishCode() {}
@@ -3862,47 +3904,19 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- XMMRegister shift = liftoff::kScratchDoubleReg;
XMMRegister tmp =
GetUnusedRegister(RegClass::kFpReg, LiftoffRegList::ForRegs(dst, lhs))
.fp();
+ Register scratch =
+ GetUnusedRegister(RegClass::kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
- // Take shift value modulo 64.
- and_(rhs.gp(), Immediate(63));
- Movd(shift, rhs.gp());
-
- // Set up a mask [0x80000000,0,0x80000000,0].
- Pcmpeqb(tmp, tmp);
- Psllq(tmp, tmp, byte{63});
-
- Psrlq(tmp, tmp, shift);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlq(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst != lhs) {
- movaps(dst.fp(), lhs.fp());
- }
- psrlq(dst.fp(), shift);
- }
- Pxor(dst.fp(), tmp);
- Psubq(dst.fp(), tmp);
+ I64x2ShrS(dst.fp(), lhs.fp(), rhs.gp(), liftoff::kScratchDoubleReg, tmp,
+ scratch);
}
void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- XMMRegister tmp = liftoff::kScratchDoubleReg;
- byte shift = rhs & 63;
-
- // Set up a mask [0x80000000,0,0x80000000,0].
- Pcmpeqb(tmp, tmp);
- Psllq(tmp, tmp, byte{63});
-
- Psrlq(tmp, tmp, shift);
- liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlq, &Assembler::psrlq, 6>(
- this, dst, lhs, rhs);
- Pxor(dst.fp(), tmp);
- Psubq(dst.fp(), tmp);
+ I64x2ShrS(dst.fp(), lhs.fp(), rhs & 0x3F, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 9ed45932b3..d445655dca 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -80,7 +80,7 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
#elif V8_TARGET_ARCH_PPC64
constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11);
+ Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11, cp);
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 724bd6f90f..581737979e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -561,6 +561,9 @@ void LiftoffAssembler::CacheState::DefineSafepointWithCalleeSavedRegisters(
safepoint.DefineRegister(slot.reg().gp().code());
}
}
+ if (cached_instance != no_reg) {
+ safepoint.DefineRegister(cached_instance.code());
+ }
}
int LiftoffAssembler::GetTotalFrameSlotCountForGC() const {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index c27653bb95..19611fb0ee 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -654,7 +654,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void PrepareTailCall(int num_callee_stack_params,
int stack_param_delta);
inline void AlignFrameSize();
- inline void PatchPrepareStackFrame(int offset);
+ inline void PatchPrepareStackFrame(int offset, SafepointTableBuilder*);
inline void FinishCode();
inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 57b6457c77..eeed531cf8 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -310,12 +310,6 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
return;
#endif
- // TODO(11235): On arm and arm64 there is still a limit on the size of
- // supported stack frames.
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
- if (strstr(detail, "Stack limited to 512 bytes")) return;
-#endif
-
#define LIST_FEATURE(name, ...) kFeature_##name,
constexpr WasmFeatures kExperimentalFeatures{
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
@@ -747,8 +741,6 @@ class LiftoffCompiler {
Register::from_code(
descriptor_->GetInputLocation(kInstanceParameterIndex)
.AsRegister()));
- // Store the instance parameter to a special stack slot.
- __ SpillInstance(kWasmInstanceRegister);
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
if (for_debugging_) __ ResetOSRTarget();
@@ -855,7 +847,7 @@ class LiftoffCompiler {
void GenerateOutOfLineCode(OutOfLineCode* ool) {
CODE_COMMENT(
- (std::string("out of line: ") + GetRuntimeStubName(ool->stub)).c_str());
+ (std::string("OOL: ") + GetRuntimeStubName(ool->stub)).c_str());
__ bind(ool->label.get());
const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
@@ -954,7 +946,8 @@ class LiftoffCompiler {
GenerateOutOfLineCode(&ool);
}
DCHECK_EQ(frame_size, __ GetTotalFrameSize());
- __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_);
+ __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
+ &safepoint_table_builder_);
__ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC());
// Emit the handler table.
@@ -1148,8 +1141,8 @@ class LiftoffCompiler {
}
void CatchException(FullDecoder* decoder,
- const ExceptionIndexImmediate<validate>& imm,
- Control* block, base::Vector<Value> values) {
+ const TagIndexImmediate<validate>& imm, Control* block,
+ base::Vector<Value> values) {
DCHECK(block->is_try_catch());
__ emit_jump(block->label.get());
@@ -1178,7 +1171,7 @@ class LiftoffCompiler {
CODE_COMMENT("load expected exception tag");
Register imm_tag = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(imm_tag, ExceptionsTable, pinned);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(imm_tag, TagsTable, pinned);
__ LoadTaggedPointer(
imm_tag, imm_tag, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
@@ -1196,8 +1189,7 @@ class LiftoffCompiler {
block->try_info->in_handler = true;
num_exceptions_++;
}
- GetExceptionValues(decoder, __ cache_state()->stack_state.back(),
- imm.exception);
+ GetExceptionValues(decoder, __ cache_state()->stack_state.back(), imm.tag);
}
void Rethrow(FullDecoder* decoder,
@@ -4234,18 +4226,18 @@ class LiftoffCompiler {
void GetExceptionValues(FullDecoder* decoder,
LiftoffAssembler::VarState& exception_var,
- const WasmException* exception) {
+ const WasmTag* tag) {
LiftoffRegList pinned;
CODE_COMMENT("get exception values");
LiftoffRegister values_array = GetExceptionProperty(
exception_var, RootIndex::kwasm_exception_values_symbol);
pinned.set(values_array);
uint32_t index = 0;
- const WasmExceptionSig* sig = exception->sig;
+ const WasmTagSig* sig = tag->sig;
for (ValueType param : sig->parameters()) {
LoadExceptionValue(param.kind(), values_array, &index, pinned);
}
- DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
+ DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(tag));
}
void EmitLandingPad(FullDecoder* decoder, int handler_offset) {
@@ -4280,12 +4272,12 @@ class LiftoffCompiler {
__ DropValues(1);
}
- void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
+ void Throw(FullDecoder* decoder, const TagIndexImmediate<validate>& imm,
const base::Vector<Value>& /* args */) {
LiftoffRegList pinned;
// Load the encoded size in a register for the builtin call.
- int encoded_size = WasmExceptionPackage::GetEncodedSize(imm.exception);
+ int encoded_size = WasmExceptionPackage::GetEncodedSize(imm.tag);
LiftoffRegister encoded_size_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(encoded_size_reg, WasmValue(encoded_size));
@@ -4307,7 +4299,7 @@ class LiftoffCompiler {
// first value, such that we can just pop them from the value stack.
CODE_COMMENT("fill values array");
int index = encoded_size;
- auto* sig = imm.exception->sig;
+ auto* sig = imm.tag->sig;
for (size_t param_idx = sig->parameter_count(); param_idx > 0;
--param_idx) {
ValueType type = sig->GetParam(param_idx - 1);
@@ -4319,7 +4311,7 @@ class LiftoffCompiler {
CODE_COMMENT("load exception tag");
LiftoffRegister exception_tag =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag.gp(), ExceptionsTable, pinned);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag.gp(), TagsTable, pinned);
__ LoadTaggedPointer(
exception_tag.gp(), exception_tag.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
@@ -6283,10 +6275,7 @@ constexpr base::EnumSet<ValueKind> LiftoffCompiler::kUnconditionallySupported;
WasmCompilationResult ExecuteLiftoffCompilation(
CompilationEnv* env, const FunctionBody& func_body, int func_index,
- ForDebugging for_debugging, Counters* counters, WasmFeatures* detected,
- base::Vector<const int> breakpoints,
- std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint,
- int32_t* max_steps, int32_t* nondeterminism) {
+ ForDebugging for_debugging, const LiftoffOptions& compiler_options) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileBaseline", "funcIndex", func_index, "bodySize",
@@ -6302,20 +6291,25 @@ WasmCompilationResult ExecuteLiftoffCompilation(
// have to grow more often.
int initial_buffer_size = static_cast<int>(128 + code_size_estimate * 4 / 3);
std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
- if (debug_sidetable) {
+ if (compiler_options.debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
- DCHECK_IMPLIES(max_steps, for_debugging == kForDebugging);
+ DCHECK_IMPLIES(compiler_options.max_steps, for_debugging == kForDebugging);
+ WasmFeatures unused_detected_features;
WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
- &zone, env->module, env->enabled_features, detected, func_body,
- call_descriptor, env, &zone, NewAssemblerBuffer(initial_buffer_size),
- debug_sidetable_builder.get(), for_debugging, func_index, breakpoints,
- dead_breakpoint, max_steps, nondeterminism);
+ &zone, env->module, env->enabled_features,
+ compiler_options.detected_features ? compiler_options.detected_features
+ : &unused_detected_features,
+ func_body, call_descriptor, env, &zone,
+ NewAssemblerBuffer(initial_buffer_size), debug_sidetable_builder.get(),
+ for_debugging, func_index, compiler_options.breakpoints,
+ compiler_options.dead_breakpoint, compiler_options.max_steps,
+ compiler_options.nondeterminism);
decoder.Decode();
LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) compiler->OnFirstError(&decoder);
- if (counters) {
+ if (auto* counters = compiler_options.counters) {
// Check that the histogram for the bailout reasons has the correct size.
DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min());
DCHECK_EQ(kNumBailoutReasons - 1,
@@ -6339,7 +6333,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
result.func_index = func_index;
result.result_tier = ExecutionTier::kLiftoff;
result.for_debugging = for_debugging;
- if (debug_sidetable) {
+ if (auto* debug_sidetable = compiler_options.debug_sidetable) {
*debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable();
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index e01d617ea4..1e4f9215eb 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -53,12 +53,46 @@ enum LiftoffBailoutReason : int8_t {
kNumBailoutReasons
};
-V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
- CompilationEnv*, const FunctionBody&, int func_index, ForDebugging,
- Counters*, WasmFeatures* detected_features,
- base::Vector<const int> breakpoints = {},
- std::unique_ptr<DebugSideTable>* = nullptr, int dead_breakpoint = 0,
- int32_t* max_steps = nullptr, int32_t* nondeterminism = nullptr);
+struct LiftoffOptions {
+ Counters* counters = nullptr;
+ WasmFeatures* detected_features = nullptr;
+ base::Vector<const int> breakpoints = {};
+ std::unique_ptr<DebugSideTable>* debug_sidetable = nullptr;
+ int dead_breakpoint = 0;
+ int32_t* max_steps = nullptr;
+ int32_t* nondeterminism = nullptr;
+
+ // We keep the macro as small as possible by offloading the actual DCHECK and
+ // assignment to another function. This makes debugging easier.
+#define SETTER(field) \
+ template <typename T> \
+ LiftoffOptions& set_##field(T new_value) { \
+ return Set<decltype(field)>(&field, new_value); \
+ }
+
+ SETTER(counters)
+ SETTER(detected_features)
+ SETTER(breakpoints)
+ SETTER(debug_sidetable)
+ SETTER(dead_breakpoint)
+ SETTER(max_steps)
+ SETTER(nondeterminism)
+
+#undef SETTER
+
+ private:
+ template <typename T>
+ LiftoffOptions& Set(T* ptr, T new_value) {
+ // The field must still have its default value.
+ DCHECK_EQ(*ptr, T{});
+ *ptr = new_value;
+ return *this;
+ }
+};
+
+V8_EXPORT_PRIVATE WasmCompilationResult
+ExecuteLiftoffCompilation(CompilationEnv*, const FunctionBody&, int func_index,
+ ForDebugging, const LiftoffOptions& = {});
V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
const WasmCode*);
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 48891ab08b..4ab036da8e 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -304,12 +305,15 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
- // When constant that represents size of stack frame can't be represented
- // as 16bit we need three instructions to add it to sp, so we reserve space
- // for this case.
+ // When the frame size is bigger than 4KB, we need seven instructions for
+ // stack checking, so we reserve space for this case.
addiu(sp, sp, 0);
nop();
nop();
+ nop();
+ nop();
+ nop();
+ nop();
return offset;
}
@@ -338,11 +342,12 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- // The frame_size includes the frame marker. The frame marker has already been
- // pushed on the stack though, so we don't need to allocate memory for it
- // anymore.
- int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
@@ -350,10 +355,64 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
TurboAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
- // If bytes can be represented as 16bit, addiu will be generated and two
- // nops will stay untouched. Otherwise, lui-ori sequence will load it to
- // register and, as third instruction, addu will be generated.
- patching_assembler.Addu(sp, sp, Operand(-frame_size));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Addu(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Addu(sp, sp, -framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset - 3 * kInstrSize;
+ patching_assembler.BranchLong(imm32);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Lw(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Lw(stack_limit, MemOperand(stack_limit));
+ Addu(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Addu(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Addu(sp, sp, -framesize)} (which
+ // is a jump now).
+ int func_start_offset = offset + 7 * kInstrSize;
+ imm32 = func_start_offset - pc_offset() - 3 * kInstrSize;
+ BranchLong(imm32);
}
void LiftoffAssembler::FinishCode() {}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index c7a66ca754..0a23c190e9 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -9,6 +9,7 @@
#include "src/codegen/machine-type.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -291,12 +292,15 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
- // When constant that represents size of stack frame can't be represented
- // as 16bit we need three instructions to add it to sp, so we reserve space
- // for this case.
+ // When the frame size is bigger than 4KB, we need seven instructions for
+ // stack checking, so we reserve space for this case.
daddiu(sp, sp, 0);
nop();
nop();
+ nop();
+ nop();
+ nop();
+ nop();
return offset;
}
@@ -325,11 +329,12 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- // The frame_size includes the frame marker. The frame marker has already been
- // pushed on the stack though, so we don't need to allocate memory for it
- // anymore.
- int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
@@ -337,10 +342,64 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
TurboAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
- // If bytes can be represented as 16bit, daddiu will be generated and two
- // nops will stay untouched. Otherwise, lui-ori sequence will load it to
- // register and, as third instruction, daddu will be generated.
- patching_assembler.Daddu(sp, sp, Operand(-frame_size));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Daddu(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
+ // does this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset - 3 * kInstrSize;
+ patching_assembler.BranchLong(imm32);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Ld(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ld(stack_limit, MemOperand(stack_limit));
+ Daddu(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Daddu(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
+ // (which is a Branch now).
+ int func_start_offset = offset + 7 * kInstrSize;
+ imm32 = func_start_offset - pc_offset() - 3 * kInstrSize;
+ BranchLong(imm32);
}
void LiftoffAssembler::FinishCode() {}
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index ff83b614e1..8e3808d259 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -99,8 +99,9 @@ inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
- bailout(kUnsupportedArchitecture, "PrepareStackFrame");
- return 0;
+ int offset = pc_offset();
+ addi(sp, sp, Operand::Zero());
+ return offset;
}
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
@@ -110,8 +111,28 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
+ SafepointTableBuilder*) {
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+
+#ifdef USE_SIMULATOR
+ // When using the simulator, deal with Liftoff which allocates the stack
+ // before checking it.
+ // TODO(arm): Remove this when the stack check mechanism will be updated.
+ if (frame_size > KB / 2) {
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
+ return;
+ }
+#endif
+ if (!is_int16(-frame_size)) {
+ bailout(kOtherReason, "PPC subi overflow");
+ return;
+ }
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kInstrSize + kGap));
+ patching_assembler.addi(sp, sp, Operand(-frame_size));
}
void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
@@ -155,7 +176,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
case kF64: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
+ mov(scratch, Operand(value.to_f64_boxed().get_scalar()));
MovInt64ToDouble(reg.fp(), scratch);
break;
}
@@ -227,7 +248,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Label write_barrier;
Label exit;
- CheckPageFlag(dst_addr, r0, MemoryChunk::kPointersFromHereAreInterestingMask,
+ CheckPageFlag(dst_addr, ip, MemoryChunk::kPointersFromHereAreInterestingMask,
ne, &write_barrier);
b(&exit);
bind(&write_barrier);
@@ -235,7 +256,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedPointer(src.gp(), src.gp());
}
- CheckPageFlag(src.gp(), r0, MemoryChunk::kPointersToHereAreInterestingMask,
+ CheckPageFlag(src.gp(), ip, MemoryChunk::kPointersToHereAreInterestingMask,
eq, &exit);
mov(ip, Operand(offset_imm));
add(ip, ip, dst_addr);
@@ -321,6 +342,8 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
break;
case LoadType::kS128Load:
+ bailout(kUnsupportedArchitecture, "SIMD");
+ break;
default:
UNREACHABLE();
}
@@ -688,158 +711,44 @@ void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
- DCHECK_EQ(0, size % 4);
+ DCHECK_EQ(0, size % 8);
RecordUsedSpillOffset(start + size);
// We need a zero reg. Always use r0 for that, and push it before to restore
// its value afterwards.
- push(r0);
- mov(r0, Operand(0));
if (size <= 36) {
// Special straight-line code for up to nine words. Generates one
// instruction per word.
- for (int offset = 4; offset <= size; offset += 4) {
- StoreU64(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
+ mov(ip, Operand::Zero());
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ StoreU64(ip, liftoff::GetStackSlot(start + remainder), r0);
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ StoreU32(ip, liftoff::GetStackSlot(start + remainder), r0);
}
} else {
- // General case for bigger counts (9 instructions).
- // Use r4 for start address (inclusive), r5 for end address (exclusive).
+ Label loop;
push(r4);
- push(r5);
- SubS64(r4, fp, Operand(start + size), r0);
- SubS64(r5, fp, Operand(start), r0);
- Label loop;
+ mov(r4, Operand(size / kSystemPointerSize));
+ mtctr(r4);
+
+ SubS64(r4, fp, Operand(start + size + kSystemPointerSize), r0);
+ mov(r0, Operand::Zero());
+
bind(&loop);
- StoreU64(r0, MemOperand(r0));
- addi(r0, r0, Operand(kSystemPointerSize));
- CmpS64(r4, r5);
- bne(&loop);
+ StoreU64WithUpdate(r0, MemOperand(r4, kSystemPointerSize));
+ bdnz(&loop);
pop(r4);
- pop(r5);
}
-
- pop(r0);
}
-#define UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- bailout(kUnsupportedArchitecture, "i32 binop:: " #name); \
- }
-#define UNIMPLEMENTED_I32_BINOP_I(name) \
- UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name##i(Register dst, Register lhs, \
- int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
- }
-#define UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
- LiftoffRegister rhs) { \
- bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
- }
-#define UNIMPLEMENTED_I64_BINOP_I(name) \
- UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
- LiftoffRegister lhs, int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i64_i binop: " #name); \
- }
-#define UNIMPLEMENTED_GP_UNOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src) { \
- bailout(kUnsupportedArchitecture, "gp unop: " #name); \
- }
-#define UNIMPLEMENTED_FP_BINOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
- DoubleRegister rhs) { \
- bailout(kUnsupportedArchitecture, "fp binop: " #name); \
- }
-#define UNIMPLEMENTED_FP_UNOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- bailout(kUnsupportedArchitecture, "fp unop: " #name); \
- }
-#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
- bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- bailout(kUnsupportedArchitecture, "fp unop: " #name); \
- return true; \
- }
-#define UNIMPLEMENTED_I32_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name##i(Register dst, Register src, \
- int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- }
-#define UNIMPLEMENTED_I64_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
- LiftoffRegister src, int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
- }
-
-UNIMPLEMENTED_I32_BINOP_I(i32_add)
-UNIMPLEMENTED_I32_BINOP_I(i32_sub)
-UNIMPLEMENTED_I32_BINOP(i32_mul)
-UNIMPLEMENTED_I32_BINOP_I(i32_and)
-UNIMPLEMENTED_I32_BINOP_I(i32_or)
-UNIMPLEMENTED_I32_BINOP_I(i32_xor)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
-UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP(i64_mul)
-#ifdef V8_TARGET_ARCH_PPC64
-UNIMPLEMENTED_I64_BINOP_I(i64_and)
-UNIMPLEMENTED_I64_BINOP_I(i64_or)
-UNIMPLEMENTED_I64_BINOP_I(i64_xor)
-#endif
-UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
-UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
-UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-UNIMPLEMENTED_FP_BINOP(f32_div)
-UNIMPLEMENTED_FP_BINOP(f32_copysign)
-UNIMPLEMENTED_FP_UNOP(f32_abs)
-UNIMPLEMENTED_FP_UNOP(f32_neg)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_ceil)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_floor)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_trunc)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f32_sqrt)
-UNIMPLEMENTED_FP_BINOP(f64_add)
-UNIMPLEMENTED_FP_BINOP(f64_sub)
-UNIMPLEMENTED_FP_BINOP(f64_mul)
-UNIMPLEMENTED_FP_BINOP(f64_div)
-UNIMPLEMENTED_FP_BINOP(f64_copysign)
-UNIMPLEMENTED_FP_UNOP(f64_abs)
-UNIMPLEMENTED_FP_UNOP(f64_neg)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_floor)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f64_sqrt)
-
-#undef UNIMPLEMENTED_I32_BINOP
-#undef UNIMPLEMENTED_I32_BINOP_I
-#undef UNIMPLEMENTED_I64_BINOP
-#undef UNIMPLEMENTED_I64_BINOP_I
-#undef UNIMPLEMENTED_GP_UNOP
-#undef UNIMPLEMENTED_FP_BINOP
-#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_FP_UNOP_RETURN_TRUE
-#undef UNIMPLEMENTED_I32_SHIFTOP
-#undef UNIMPLEMENTED_I64_SHIFTOP
-
#define SIGN_EXT(r) extsw(r, r)
+#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
#define REGISTER_AND_WITH_1F \
([&](Register rhs) { \
@@ -850,9 +759,46 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#define LFR_TO_REG(reg) reg.gp()
// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
-#define UNOP_LIST(V) \
- V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
- V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+#define UNOP_LIST(V) \
+ V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
+ void) \
+ V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
+ void) \
+ V(f32_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
+ void) \
+ V(f32_floor, frim, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
+ true, bool) \
+ V(f32_ceil, frip, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
+ true, bool) \
+ V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
+ true, bool) \
+ V(f32_nearest_int, frin, DoubleRegister, DoubleRegister, , , \
+ ROUND_F64_TO_F32, true, bool) \
+ V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_nearest_int, frin, DoubleRegister, DoubleRegister, , , USE, true, \
+ bool) \
+ V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
+ V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
+ V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(u32_to_intptr, ZeroExtWord32, Register, Register, , , USE, , void) \
+ V(i32_signextend_i8, extsb, Register, Register, , , USE, , void) \
+ V(i32_signextend_i16, extsh, Register, Register, , , USE, , void) \
+ V(i64_signextend_i8, extsb, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, , void) \
+ V(i64_signextend_i16, extsh, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, , void) \
+ V(i64_signextend_i32, extsw, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, , void) \
+ V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
+ V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
LFR_TO_REG, USE, true, bool)
#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
@@ -870,21 +816,90 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
// return_val, return_type)
-#define BINOP_LIST(V) \
- V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void)
+#define BINOP_LIST(V) \
+ V(f32_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
+ , ROUND_F64_TO_F32, , void) \
+ V(f64_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
+ , USE, , void) \
+ V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
+ V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
+ V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
scast2, rcast, ret, return_type) \
@@ -956,22 +971,6 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
-void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_clz");
-}
-
-void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_ctz");
-}
-
-void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
-#ifdef V8_TARGET_ARCH_PPC64
- bailout(kUnsupportedArchitecture, "emit_u32_to_intptr");
-#else
-// This is a nop on ppc32.
-#endif
-}
-
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
@@ -979,29 +978,6 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
-void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
-}
-
-void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
-}
-
-void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
-}
-
-void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
-}
-
-void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
-}
-
void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index 8b7b0b83e1..fef59471c1 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -277,10 +278,9 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
- // When constant that represents size of stack frame can't be represented
- // as 16bit we need three instructions to add it to sp, so we reserve space
- // for this case.
- Add64(sp, sp, Operand(0L));
+ // When the frame size is bigger than 4KB, we need two instructions for
+ // stack checking, so we reserve space for this case.
+ addi(sp, sp, 0);
nop();
nop();
return offset;
@@ -311,18 +311,76 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
TurboAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
- // If bytes can be represented as 16bit, addi will be generated and two
- // nops will stay untouched. Otherwise, lui-ori sequence will load it to
- // register and, as third instruction, daddu will be generated.
- patching_assembler.Add64(sp, sp, Operand(-frame_size));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Add64(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
+ // does this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset;
+ patching_assembler.GenPCRelativeJump(kScratchReg, imm32);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Ld(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ld(stack_limit, MemOperand(stack_limit));
+ Add64(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Add64(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
+ // (which is a Branch now).
+ int func_start_offset = offset + 2 * kInstrSize;
+ imm32 = func_start_offset - pc_offset();
+ GenPCRelativeJump(kScratchReg, imm32);
}
void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index e78b9c5f61..722b0b074b 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -122,8 +122,9 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
+ SafepointTableBuilder*) {
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
constexpr int LayInstrSize = 6;
@@ -2142,6 +2143,136 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
+#define SIMD_BINOP_LIST(V) \
+ V(f64x2_add, F64x2Add) \
+ V(f64x2_sub, F64x2Sub) \
+ V(f64x2_mul, F64x2Mul) \
+ V(f64x2_div, F64x2Div) \
+ V(f64x2_min, F64x2Min) \
+ V(f64x2_max, F64x2Max) \
+ V(f64x2_eq, F64x2Eq) \
+ V(f64x2_ne, F64x2Ne) \
+ V(f64x2_lt, F64x2Lt) \
+ V(f64x2_le, F64x2Le) \
+ V(f32x4_add, F32x4Add) \
+ V(f32x4_sub, F32x4Sub) \
+ V(f32x4_mul, F32x4Mul) \
+ V(f32x4_div, F32x4Div) \
+ V(f32x4_min, F32x4Min) \
+ V(f32x4_max, F32x4Max) \
+ V(f32x4_eq, F32x4Eq) \
+ V(f32x4_ne, F32x4Ne) \
+ V(f32x4_lt, F32x4Lt) \
+ V(f32x4_le, F32x4Le) \
+ V(i64x2_add, I64x2Add) \
+ V(i64x2_sub, I64x2Sub) \
+ V(i64x2_mul, I64x2Mul) \
+ V(i64x2_eq, I64x2Eq) \
+ V(i64x2_ne, I64x2Ne) \
+ V(i64x2_gt_s, I64x2GtS) \
+ V(i64x2_ge_s, I64x2GeS) \
+ V(i32x4_add, I32x4Add) \
+ V(i32x4_sub, I32x4Sub) \
+ V(i32x4_mul, I32x4Mul) \
+ V(i32x4_eq, I32x4Eq) \
+ V(i32x4_ne, I32x4Ne) \
+ V(i32x4_gt_s, I32x4GtS) \
+ V(i32x4_ge_s, I32x4GeS) \
+ V(i32x4_gt_u, I32x4GtU) \
+ V(i32x4_ge_u, I32x4GeU) \
+ V(i32x4_min_s, I32x4MinS) \
+ V(i32x4_min_u, I32x4MinU) \
+ V(i32x4_max_s, I32x4MaxS) \
+ V(i32x4_max_u, I32x4MaxU) \
+ V(i16x8_add, I16x8Add) \
+ V(i16x8_sub, I16x8Sub) \
+ V(i16x8_mul, I16x8Mul) \
+ V(i16x8_eq, I16x8Eq) \
+ V(i16x8_ne, I16x8Ne) \
+ V(i16x8_gt_s, I16x8GtS) \
+ V(i16x8_ge_s, I16x8GeS) \
+ V(i16x8_gt_u, I16x8GtU) \
+ V(i16x8_ge_u, I16x8GeU) \
+ V(i16x8_min_s, I16x8MinS) \
+ V(i16x8_min_u, I16x8MinU) \
+ V(i16x8_max_s, I16x8MaxS) \
+ V(i16x8_max_u, I16x8MaxU) \
+ V(i8x16_add, I8x16Add) \
+ V(i8x16_sub, I8x16Sub) \
+ V(i8x16_eq, I8x16Eq) \
+ V(i8x16_ne, I8x16Ne) \
+ V(i8x16_gt_s, I8x16GtS) \
+ V(i8x16_ge_s, I8x16GeS) \
+ V(i8x16_gt_u, I8x16GtU) \
+ V(i8x16_ge_u, I8x16GeU) \
+ V(i8x16_min_s, I8x16MinS) \
+ V(i8x16_min_u, I8x16MinU) \
+ V(i8x16_max_s, I8x16MaxS) \
+ V(i8x16_max_u, I8x16MaxU)
+
+#define EMIT_SIMD_BINOP(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ op(dst.fp(), lhs.fp(), rhs.fp()); \
+ }
+SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
+#undef EMIT_SIMD_BINOP
+#undef SIMD_BINOP_LIST
+
+#define SIMD_UNOP_LIST(V) \
+ V(f64x2_splat, F64x2Splat, fp, fp) \
+ V(f32x4_splat, F32x4Splat, fp, fp) \
+ V(i64x2_splat, I64x2Splat, fp, gp) \
+ V(i32x4_splat, I32x4Splat, fp, gp) \
+ V(i16x8_splat, I16x8Splat, fp, gp) \
+ V(i8x16_splat, I8x16Splat, fp, gp)
+
+#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ op(dst.dtype(), src.stype()); \
+ }
+SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
+#undef EMIT_SIMD_UNOP
+#undef SIMD_UNOP_LIST
+
+#define SIMD_EXTRACT_LANE_LIST(V) \
+ V(f64x2_extract_lane, F64x2ExtractLane, fp) \
+ V(f32x4_extract_lane, F32x4ExtractLane, fp) \
+ V(i64x2_extract_lane, I64x2ExtractLane, gp) \
+ V(i32x4_extract_lane, I32x4ExtractLane, gp) \
+ V(i16x8_extract_lane_u, I16x8ExtractLaneU, gp) \
+ V(i16x8_extract_lane_s, I16x8ExtractLaneS, gp) \
+ V(i8x16_extract_lane_u, I8x16ExtractLaneU, gp) \
+ V(i8x16_extract_lane_s, I8x16ExtractLaneS, gp)
+
+#define EMIT_SIMD_EXTRACT_LANE(name, op, dtype) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
+ uint8_t imm_lane_idx) { \
+ op(dst.dtype(), src.fp(), imm_lane_idx); \
+ }
+SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE)
+#undef EMIT_SIMD_EXTRACT_LANE
+#undef SIMD_EXTRACT_LANE_LIST
+
+#define SIMD_REPLACE_LANE_LIST(V) \
+ V(f64x2_replace_lane, F64x2ReplaceLane, fp) \
+ V(f32x4_replace_lane, F32x4ReplaceLane, fp) \
+ V(i64x2_replace_lane, I64x2ReplaceLane, gp) \
+ V(i32x4_replace_lane, I32x4ReplaceLane, gp) \
+ V(i16x8_replace_lane, I16x8ReplaceLane, gp) \
+ V(i8x16_replace_lane, I8x16ReplaceLane, gp)
+
+#define EMIT_SIMD_REPLACE_LANE(name, op, stype) \
+ void LiftoffAssembler::emit_##name( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
+ uint8_t imm_lane_idx) { \
+ op(dst.fp(), src1.fp(), src2.stype(), imm_lane_idx); \
+ }
+SIMD_REPLACE_LANE_LIST(EMIT_SIMD_REPLACE_LANE)
+#undef EMIT_SIMD_REPLACE_LANE
+#undef SIMD_REPLACE_LANE_LIST
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
@@ -2170,24 +2301,6 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16_swizzle");
}
-void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f64x2splat");
-}
-
-void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_f64x2extractlane");
-}
-
-void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_f64x2replacelane");
-}
-
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2_abs");
@@ -2227,36 +2340,6 @@ bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
return true;
}
-void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2add");
-}
-
-void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2sub");
-}
-
-void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2mul");
-}
-
-void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2div");
-}
-
-void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2min");
-}
-
-void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2max");
-}
-
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "pmin unimplemented");
@@ -2282,24 +2365,6 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
bailout(kSimd, "f64x2.promote_low_f32x4");
}
-void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
-}
-
-void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_f32x4extractlane");
-}
-
-void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_f32x4replacelane");
-}
-
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_abs");
@@ -2339,36 +2404,6 @@ bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
return true;
}
-void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4add");
-}
-
-void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4sub");
-}
-
-void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4mul");
-}
-
-void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4div");
-}
-
-void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4min");
-}
-
-void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4max");
-}
-
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "pmin unimplemented");
@@ -2379,24 +2414,6 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "pmax unimplemented");
}
-void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64x2splat");
-}
-
-void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i64x2extractlane");
-}
-
-void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i64x2replacelane");
-}
-
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
@@ -2439,21 +2456,6 @@ void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
bailout(kSimd, "i64x2_shri_u");
}
-void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i64x2add");
-}
-
-void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i64x2sub");
-}
-
-void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i64x2mul");
-}
-
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2503,24 +2505,6 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported");
}
-void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_splat");
-}
-
-void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i32x4extractlane");
-}
-
-void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i32x4replacelane");
-}
-
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
@@ -2568,45 +2552,6 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
bailout(kSimd, "i32x4_shri_u");
}
-void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4add");
-}
-
-void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4sub");
-}
-
-void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4mul");
-}
-
-void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_min_s");
-}
-
-void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_min_u");
-}
-
-void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_max_s");
-}
-
-void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
-}
-
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2647,11 +2592,6 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported");
}
-void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8splat");
-}
-
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
@@ -2699,22 +2639,12 @@ void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
bailout(kSimd, "i16x8_shri_u");
}
-void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8add");
-}
-
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
}
-void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8sub");
-}
-
void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2727,54 +2657,12 @@ void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
}
-void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8mul");
-}
-
void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
}
-void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_min_s");
-}
-
-void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_min_u");
-}
-
-void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_max_s");
-}
-
-void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_max_u");
-}
-
-void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_u");
-}
-
-void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i16x8replacelane");
-}
-
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
@@ -2785,12 +2673,6 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
}
-void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
-}
-
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2834,30 +2716,6 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
bailout(kSimd, "i8x16.popcnt");
}
-void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i8x16splat");
-}
-
-void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i8x16extractlane_u");
-}
-
-void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i8x16extractlane_s");
-}
-
-void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- bailout(kUnsupportedArchitecture, "emit_i8x16replacelane");
-}
-
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
@@ -2910,22 +2768,12 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
bailout(kSimd, "i8x16_shri_u");
}
-void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16add");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
}
-void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16sub");
-}
-
void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2944,180 +2792,6 @@ void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
}
-void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_min_s");
-}
-
-void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_min_u");
-}
-
-void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_max_s");
-}
-
-void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_max_u");
-}
-
-void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_eq");
-}
-
-void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_ne");
-}
-
-void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16gt_s");
-}
-
-void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16gt_u");
-}
-
-void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16ge_s");
-}
-
-void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16ge_u");
-}
-
-void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_eq");
-}
-
-void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_ne");
-}
-
-void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8gt_s");
-}
-
-void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8gt_u");
-}
-
-void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8ge_s");
-}
-
-void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8ge_u");
-}
-
-void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_eq");
-}
-
-void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_ne");
-}
-
-void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4gt_s");
-}
-
-void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4gt_u");
-}
-
-void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4ge_s");
-}
-
-void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
-}
-
-void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.eq");
-}
-
-void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_ne");
-}
-
-void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.gt_s");
-}
-
-void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.ge_s");
-}
-
-void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
-}
-
-void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_ne");
-}
-
-void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_lt");
-}
-
-void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_le");
-}
-
-void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2_eq");
-}
-
-void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2_ne");
-}
-
-void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2_lt");
-}
-
-void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64x2_le");
-}
-
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
bailout(kUnsupportedArchitecture, "emit_s128_const");
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 0744d2e09b..d5cda7b3c4 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -13,6 +13,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -206,43 +207,79 @@ void LiftoffAssembler::AlignFrameSize() {
max_used_spill_offset_ = RoundUp(max_used_spill_offset_, kSystemPointerSize);
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
- // The frame_size includes the frame marker. The frame marker has already been
- // pushed on the stack though, so we don't need to allocate memory for it
- // anymore.
- int frame_size = GetTotalFrameSize() - kSystemPointerSize;
- // Need to align sp to system pointer size.
- DCHECK_EQ(frame_size, RoundUp(frame_size, kSystemPointerSize));
- // We can't run out of space, just pass anything big enough to not cause the
- // assembler to try to grow the buffer.
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ DCHECK_EQ(0, frame_size % kSystemPointerSize);
+
+ // We can't run out of space when patching, just pass anything big enough to
+ // not cause the assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
Assembler patching_assembler(
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
-#if V8_TARGET_OS_WIN
- if (frame_size > kStackPageSize) {
- // Generate OOL code (at the end of the function, where the current
- // assembler is pointing) to do the explicit stack limit check (see
- // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-6.0/aa227153(v=vs.60)).
- // At the function start, emit a jump to that OOL code (from {offset} to
- // {pc_offset()}).
- int ool_offset = pc_offset() - offset;
- patching_assembler.jmp_rel(ool_offset);
- DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
- patching_assembler.Nop(liftoff::kSubSpSize -
- patching_assembler.pc_offset());
-
- // Now generate the OOL code.
- AllocateStackSpace(frame_size);
- // Jump back to the start of the function (from {pc_offset()} to {offset +
- // kSubSpSize}).
- int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
- jmp_rel(func_start_offset);
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.sub_sp_32(frame_size);
+ DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
return;
}
-#endif
- patching_assembler.sub_sp_32(frame_size);
- DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, framesize)} with a jump to OOL code that does this
+ // "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+ patching_assembler.jmp_rel(pc_offset() - offset);
+ DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
+ patching_assembler.Nop(liftoff::kSubSpSize - patching_assembler.pc_offset());
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ movq(kScratchRegister,
+ FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ addq(kScratchRegister, Immediate(frame_size));
+ cmpq(rsp, kScratchRegister);
+ j(above_equal, &continuation, Label::kNear);
+ }
+
+ near_call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ AllocateStackSpace(frame_size);
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ int func_start_offset = offset + liftoff::kSubSpSize;
+ jmp_rel(func_start_offset - pc_offset());
}
void LiftoffAssembler::FinishCode() {}
@@ -2321,38 +2358,6 @@ void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-// Can be used by both the immediate and register version of the shifts. psraq
-// is only available in AVX512, so we can't use it yet.
-template <typename ShiftOperand>
-void EmitI64x2ShrS(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, ShiftOperand rhs,
- bool shift_is_rcx = false) {
- bool restore_rcx = false;
- Register backup = kScratchRegister2;
- if (!shift_is_rcx) {
- if (assm->cache_state()->is_used(LiftoffRegister(rcx))) {
- restore_rcx = true;
- assm->movq(backup, rcx);
- }
- assm->movl(rcx, rhs);
- }
-
- Register tmp = kScratchRegister;
-
- assm->Pextrq(tmp, lhs.fp(), int8_t{0x0});
- assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, uint8_t{0x0});
-
- assm->Pextrq(tmp, lhs.fp(), int8_t{0x1});
- assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, uint8_t{0x1});
-
- // restore rcx.
- if (restore_rcx) {
- assm->movq(rcx, backup);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
assm->xorq(dst.gp(), dst.gp());
@@ -3458,13 +3463,13 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI64x2ShrS(this, dst, lhs, rhs.gp(),
- /*shift_is_rcx=*/rhs.gp() == rcx);
+ I64x2ShrS(dst.fp(), lhs.fp(), rhs.gp(), kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2, kScratchRegister);
}
void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- liftoff::EmitI64x2ShrS(this, dst, lhs, Immediate(rhs));
+ I64x2ShrS(dst.fp(), lhs.fp(), rhs & 0x3F, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index de9904ae4a..5a1ab579e7 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -167,7 +167,7 @@ own<ExternType> GetImportExportType(const i::wasm::WasmModule* module,
Mutability mutability = global.mutability ? VAR : CONST;
return GlobalType::make(std::move(content), mutability);
}
- case i::wasm::kExternalException:
+ case i::wasm::kExternalTag:
UNREACHABLE();
}
}
diff --git a/deps/v8/src/wasm/code-space-access.cc b/deps/v8/src/wasm/code-space-access.cc
index 2705edb634..0f71c9a224 100644
--- a/deps/v8/src/wasm/code-space-access.cc
+++ b/deps/v8/src/wasm/code-space-access.cc
@@ -11,63 +11,64 @@ namespace v8 {
namespace internal {
namespace wasm {
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
-
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
-// The {NativeModule} argument is unused; it is just here for a common API with
-// the non-M1 implementation.
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
+#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
- if (code_space_write_nesting_level_ == 0) {
- SwitchMemoryPermissionsToWritable();
- }
+#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
+ : native_module_(native_module) {
+#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+ if (code_space_write_nesting_level_ == 0) SetWritable();
code_space_write_nesting_level_++;
}
CodeSpaceWriteScope::~CodeSpaceWriteScope() {
code_space_write_nesting_level_--;
- if (code_space_write_nesting_level_ == 0) {
- SwitchMemoryPermissionsToExecutable();
- }
+ if (code_space_write_nesting_level_ == 0) SetExecutable();
}
-#else // Not on MacOS on ARM64 (M1 hardware): Use Intel PKU and/or mprotect.
+#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
-CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
- : native_module_(native_module) {
+// Ignoring this warning is considered better than relying on
+// __builtin_available.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability-new"
+void CodeSpaceWriteScope::SetWritable() const {
+ pthread_jit_write_protect_np(0);
+}
+
+void CodeSpaceWriteScope::SetExecutable() const {
+ pthread_jit_write_protect_np(1);
+}
+#pragma clang diagnostic pop
+
+#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+
+void CodeSpaceWriteScope::SetWritable() const {
DCHECK_NOT_NULL(native_module_);
- if (FLAG_wasm_memory_protection_keys) {
- auto* code_manager = GetWasmCodeManager();
- if (code_manager->HasMemoryProtectionKeySupport()) {
- code_manager->SetThreadWritable(true);
- return;
- }
- // Fallback to mprotect-based write protection, if enabled.
- }
- if (FLAG_wasm_write_protect_code_memory) {
- bool success = native_module_->SetWritable(true);
- CHECK(success);
+ auto* code_manager = GetWasmCodeManager();
+ if (code_manager->HasMemoryProtectionKeySupport()) {
+ DCHECK(FLAG_wasm_memory_protection_keys);
+ code_manager->SetThreadWritable(true);
+ } else if (FLAG_wasm_write_protect_code_memory) {
+ native_module_->AddWriter();
}
}
-CodeSpaceWriteScope::~CodeSpaceWriteScope() {
- if (FLAG_wasm_memory_protection_keys) {
- auto* code_manager = GetWasmCodeManager();
- if (code_manager->HasMemoryProtectionKeySupport()) {
- code_manager->SetThreadWritable(false);
- return;
- }
- // Fallback to mprotect-based write protection, if enabled.
- }
- if (FLAG_wasm_write_protect_code_memory) {
- bool success = native_module_->SetWritable(false);
- CHECK(success);
+void CodeSpaceWriteScope::SetExecutable() const {
+ auto* code_manager = GetWasmCodeManager();
+ if (code_manager->HasMemoryProtectionKeySupport()) {
+ DCHECK(FLAG_wasm_memory_protection_keys);
+ code_manager->SetThreadWritable(false);
+ } else if (FLAG_wasm_write_protect_code_memory) {
+ native_module_->RemoveWriter();
}
}
-#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
index 62a252caf9..96f852e63b 100644
--- a/deps/v8/src/wasm/code-space-access.h
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -45,8 +45,8 @@ class NativeModule;
// permissions for all code pages.
class V8_NODISCARD CodeSpaceWriteScope final {
public:
- explicit CodeSpaceWriteScope(NativeModule* native_module);
- ~CodeSpaceWriteScope();
+ explicit V8_EXPORT_PRIVATE CodeSpaceWriteScope(NativeModule* native_module);
+ V8_EXPORT_PRIVATE ~CodeSpaceWriteScope();
// Disable copy constructor and copy-assignment operator, since this manages
// a resource and implicit copying of the scope can yield surprising errors.
@@ -54,44 +54,21 @@ class V8_NODISCARD CodeSpaceWriteScope final {
CodeSpaceWriteScope& operator=(const CodeSpaceWriteScope&) = delete;
private:
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
static thread_local int code_space_write_nesting_level_;
-#else // On non-M1 hardware:
+
+ void SetWritable() const;
+ void SetExecutable() const;
+
// The M1 implementation knows implicitly from the {MAP_JIT} flag during
- // allocation which region to switch permissions for. On non-M1 hardware,
- // however, we either need the protection key or code space from the
+ // allocation which region to switch permissions for. On non-M1 hardware
+ // without memory protection key support, we need the code space from the
// {native_module_}.
- NativeModule* native_module_;
-#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+#if !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+ NativeModule* const native_module_;
+#endif
};
} // namespace wasm
-
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
-
-// Low-level API for switching MAP_JIT pages between writable and executable.
-// TODO(wasm): Access to these functions is only needed in tests. Remove?
-
-// Ignoring this warning is considered better than relying on
-// __builtin_available.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-inline void SwitchMemoryPermissionsToWritable() {
- pthread_jit_write_protect_np(0);
-}
-inline void SwitchMemoryPermissionsToExecutable() {
- pthread_jit_write_protect_np(1);
-}
-#pragma clang diagnostic pop
-
-#else // Not Mac-on-arm64.
-
-// Nothing to do, we map code memory with rwx permissions.
-inline void SwitchMemoryPermissionsToWritable() {}
-inline void SwitchMemoryPermissionsToExecutable() {}
-
-#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index 96b9bbb2a5..773090c4e5 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -132,7 +132,8 @@ class V8_EXPORT_PRIVATE CompilationState {
void AddCallback(callback_t);
- void InitializeAfterDeserialization();
+ void InitializeAfterDeserialization(
+ base::Vector<const int> missing_functions);
// Wait until top tier compilation finished, or compilation failed.
void WaitForTopTierFinished();
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index c175acd8a0..20c6b30ffc 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -33,7 +33,7 @@ namespace internal {
namespace wasm {
struct WasmGlobal;
-struct WasmException;
+struct WasmTag;
#define TRACE(...) \
do { \
@@ -486,11 +486,11 @@ struct IndexImmediate {
};
template <Decoder::ValidateFlag validate>
-struct ExceptionIndexImmediate : public IndexImmediate<validate> {
- const WasmException* exception = nullptr;
+struct TagIndexImmediate : public IndexImmediate<validate> {
+ const WasmTag* tag = nullptr;
- ExceptionIndexImmediate(Decoder* decoder, const byte* pc)
- : IndexImmediate<validate>(decoder, pc, "exception index") {}
+ TagIndexImmediate(Decoder* decoder, const byte* pc)
+ : IndexImmediate<validate>(decoder, pc, "tag index") {}
};
template <Decoder::ValidateFlag validate>
@@ -1020,11 +1020,11 @@ struct ControlBase : public PcForErrors<validate> {
F(S128Const, const Simd128Immediate<validate>& imm, Value* result) \
F(Simd8x16ShuffleOp, const Simd128Immediate<validate>& imm, \
const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexImmediate<validate>& imm, \
+ F(Throw, const TagIndexImmediate<validate>& imm, \
const base::Vector<Value>& args) \
F(Rethrow, Control* block) \
- F(CatchException, const ExceptionIndexImmediate<validate>& imm, \
- Control* block, base::Vector<Value> caught_values) \
+ F(CatchException, const TagIndexImmediate<validate>& imm, Control* block, \
+ base::Vector<Value> caught_values) \
F(Delegate, uint32_t depth, Control* block) \
F(CatchAll, Control* block) \
F(AtomicOp, WasmOpcode opcode, base::Vector<Value> args, \
@@ -1266,12 +1266,12 @@ class WasmDecoder : public Decoder {
return VALIDATE(decoder->ok()) ? assigned : nullptr;
}
- bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
- if (!VALIDATE(imm.index < module_->exceptions.size())) {
- DecodeError(pc, "Invalid exception index: %u", imm.index);
+ bool Validate(const byte* pc, TagIndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->tags.size())) {
+ DecodeError(pc, "Invalid tag index: %u", imm.index);
return false;
}
- imm.exception = &module_->exceptions[imm.index];
+ imm.tag = &module_->tags[imm.index];
return true;
}
@@ -1635,7 +1635,7 @@ class WasmDecoder : public Decoder {
}
case kExprThrow:
case kExprCatch: {
- ExceptionIndexImmediate<validate> imm(decoder, pc + 1);
+ TagIndexImmediate<validate> imm(decoder, pc + 1);
return 1 + imm.length;
}
case kExprLet: {
@@ -1991,10 +1991,10 @@ class WasmDecoder : public Decoder {
imm.sig->return_count()};
}
case kExprThrow: {
- ExceptionIndexImmediate<validate> imm(this, pc + 1);
+ TagIndexImmediate<validate> imm(this, pc + 1);
CHECK(Validate(pc + 1, imm));
- DCHECK_EQ(0, imm.exception->sig->return_count());
- return {imm.exception->sig->parameter_count(), 0};
+ DCHECK_EQ(0, imm.tag->sig->return_count());
+ return {imm.tag->sig->parameter_count(), 0};
}
case kExprBr:
case kExprBlock:
@@ -2565,11 +2565,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(Throw) {
CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_ + 1);
+ TagIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PeekArgs(imm.exception->ToFunctionSig());
+ ArgVector args = PeekArgs(imm.tag->ToFunctionSig());
CALL_INTERFACE_IF_OK_AND_REACHABLE(Throw, imm, base::VectorOf(args));
- DropArgs(imm.exception->ToFunctionSig());
+ DropArgs(imm.tag->ToFunctionSig());
EndControl();
return 1 + imm.length;
}
@@ -2592,7 +2592,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(Catch) {
CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_ + 1);
+ TagIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
DCHECK(!control_.empty());
Control* c = &control_.back();
@@ -2611,7 +2611,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DCHECK_LE(stack_ + c->stack_depth, stack_end_);
stack_end_ = stack_ + c->stack_depth;
c->reachability = control_at(1)->innerReachability();
- const WasmExceptionSig* sig = imm.exception->sig;
+ const WasmTagSig* sig = imm.tag->sig;
EnsureStackSpace(static_cast<int>(sig->parameter_count()));
for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
Push(CreateValue(sig->GetParam(i)));
@@ -3582,27 +3582,15 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
InitMerge(&c->end_merge, imm.out_arity(), [pc, &imm](uint32_t i) {
return Value{pc, imm.out_type(i)};
});
- InitMerge(&c->start_merge, imm.in_arity(),
-#ifdef DEBUG
- [this, pc, &imm, args](uint32_t i) {
-#else
- [pc, &imm, args](uint32_t i) {
-#endif
- // The merge needs to be instantiated with Values of the correct
- // type even in the presence of bottom values (i.e. in
- // unreachable code). Since bottom Values will never be used for
- // code generation, we can safely instantiate new ones in that
- // case.
- DCHECK_IMPLIES(current_code_reachable_and_ok_,
- args[i].type != kWasmBottom);
- // Warning: Do not use a ternary operator here, as gcc bugs out
- // (as of version 10.2.1).
- if (args[i].type != kWasmBottom) {
- return args[i];
- } else {
- return Value{pc, imm.in_type(i)};
- }
- });
+ InitMerge(&c->start_merge, imm.in_arity(), [&imm, args](uint32_t i) {
+ // The merge needs to be instantiated with Values of the correct
+ // type, even if the actual Value is bottom/unreachable or has
+ // a subtype of the static type.
+ // So we copy-construct a new Value, and update its type.
+ Value value = args[i];
+ value.type = imm.in_type(i);
+ return value;
+ });
}
// In reachable code, check if there are at least {count} values on the stack.
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index ae7962f86f..cd9d941a00 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -11,7 +11,7 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/wasm-compiler.h"
#include "src/diagnostics/code-tracer.h"
-#include "src/logging/counters.h"
+#include "src/logging/counters-scopes.h"
#include "src/logging/log.h"
#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-compiler.h"
@@ -77,12 +77,15 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
code.end()};
base::Optional<TimedHistogramScope> wasm_compile_function_time_scope;
+ base::Optional<TimedHistogramScope> wasm_compile_huge_function_time_scope;
if (counters) {
- if ((func_body.end - func_body.start) >= 100 * KB) {
+ if (func_body.end - func_body.start >= 100 * KB) {
auto huge_size_histogram = SELECT_WASM_COUNTER(
counters, env->module->origin, wasm, huge_function_size_bytes);
huge_size_histogram->AddSample(
static_cast<int>(func_body.end - func_body.start));
+ wasm_compile_huge_function_time_scope.emplace(
+ counters->wasm_compile_huge_function_time());
}
auto timed_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
wasm_compile, function_time);
@@ -107,18 +110,20 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
if (V8_LIKELY(FLAG_wasm_tier_mask_for_testing == 0) ||
func_index_ >= 32 ||
((FLAG_wasm_tier_mask_for_testing & (1 << func_index_)) == 0)) {
- if (V8_LIKELY(func_index_ >= 32 || (FLAG_wasm_debug_mask_for_testing &
- (1 << func_index_)) == 0)) {
- result = ExecuteLiftoffCompilation(
- env, func_body, func_index_, for_debugging_, counters, detected);
- } else {
- // We don't use the debug side table, we only pass it to cover
- // different code paths in Liftoff for testing.
- std::unique_ptr<DebugSideTable> debug_sidetable;
- result = ExecuteLiftoffCompilation(env, func_body, func_index_,
- kForDebugging, counters, detected,
- {}, &debug_sidetable);
+ // We do not use the debug side table, we only (optionally) pass it to
+ // cover different code paths in Liftoff for testing.
+ std::unique_ptr<DebugSideTable> unused_debug_sidetable;
+ std::unique_ptr<DebugSideTable>* debug_sidetable_ptr = nullptr;
+ if (V8_UNLIKELY(func_index_ < 32 && (FLAG_wasm_debug_mask_for_testing &
+ (1 << func_index_)) != 0)) {
+ debug_sidetable_ptr = &unused_debug_sidetable;
}
+ result = ExecuteLiftoffCompilation(
+ env, func_body, func_index_, for_debugging_,
+ LiftoffOptions{}
+ .set_counters(counters)
+ .set_detected_features(detected)
+ .set_debug_sidetable(debug_sidetable_ptr));
if (result.succeeded()) break;
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index e53269d72e..84f34cc0ed 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -715,16 +715,16 @@ class WasmGraphBuildingInterface {
result->node = builder_->Simd8x16ShuffleOp(imm.value, input_nodes);
}
- void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
+ void Throw(FullDecoder* decoder, const TagIndexImmediate<validate>& imm,
const base::Vector<Value>& value_args) {
int count = value_args.length();
ZoneVector<TFNode*> args(count, decoder->zone());
for (int i = 0; i < count; ++i) {
args[i] = value_args[i].node;
}
- CheckForException(
- decoder, builder_->Throw(imm.index, imm.exception, base::VectorOf(args),
- decoder->position()));
+ CheckForException(decoder,
+ builder_->Throw(imm.index, imm.tag, base::VectorOf(args),
+ decoder->position()));
TerminateThrow(decoder);
}
@@ -737,8 +737,8 @@ class WasmGraphBuildingInterface {
}
void CatchException(FullDecoder* decoder,
- const ExceptionIndexImmediate<validate>& imm,
- Control* block, base::Vector<Value> values) {
+ const TagIndexImmediate<validate>& imm, Control* block,
+ base::Vector<Value> values) {
DCHECK(block->is_try_catch());
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
@@ -756,7 +756,7 @@ class WasmGraphBuildingInterface {
// Get the exception tag and see if it matches the expected one.
TFNode* caught_tag = builder_->GetExceptionTag(exception);
- TFNode* exception_tag = builder_->LoadExceptionTagFromTable(imm.index);
+ TFNode* exception_tag = builder_->LoadTagFromTable(imm.index);
TFNode* compare = builder_->ExceptionTagEqual(caught_tag, exception_tag);
builder_->BranchNoHint(compare, &if_catch, &if_no_catch);
@@ -773,7 +773,7 @@ class WasmGraphBuildingInterface {
SetEnv(if_catch_env);
NodeVector caught_values(values.size());
base::Vector<TFNode*> caught_vector = base::VectorOf(caught_values);
- builder_->GetExceptionValues(exception, imm.exception, caught_vector);
+ builder_->GetExceptionValues(exception, imm.tag, caught_vector);
for (size_t i = 0, e = values.size(); i < e; ++i) {
values[i].node = caught_values[i];
}
@@ -948,6 +948,9 @@ class WasmGraphBuildingInterface {
result->node = builder_->ArrayNewWithRtt(imm.index, imm.array_type,
length.node, initial_value.node,
rtt.node, decoder->position());
+ // array.new_with_rtt introduces a loop. Therefore, we have to mark the
+ // immediately nesting loop (if any) as non-innermost.
+ if (!loop_infos_.empty()) loop_infos_.back().is_innermost = false;
}
void ArrayNewDefault(FullDecoder* decoder,
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 2b3ad8fd8b..ea714cbe4c 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -17,7 +17,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
-#include "src/logging/counters.h"
+#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
#include "src/objects/property-descriptor.h"
#include "src/tasks/task-utils.h"
@@ -553,7 +553,8 @@ class CompilationStateImpl {
// Initialize the compilation progress after deserialization. This is needed
// for recompilation (e.g. for tier down) to work later.
- void InitializeCompilationProgressAfterDeserialization();
+ void InitializeCompilationProgressAfterDeserialization(
+ base::Vector<const int> missing_functions);
// Initializes compilation units based on the information encoded in the
// {compilation_progress_}.
@@ -660,9 +661,14 @@ class CompilationStateImpl {
}
private:
- void AddCompilationUnitInternal(CompilationUnitBuilder* builder,
- int function_index,
- uint8_t function_progress);
+ uint8_t SetupCompilationProgressForFunction(
+ bool lazy_module, const WasmModule* module,
+ const WasmFeatures& enabled_features, int func_index);
+
+ // Returns the potentially-updated {function_progress}.
+ uint8_t AddCompilationUnitInternal(CompilationUnitBuilder* builder,
+ int function_index,
+ uint8_t function_progress);
// Trigger callbacks according to the internal counters below
// (outstanding_...), plus the given events.
@@ -830,8 +836,10 @@ void CompilationState::WaitForTopTierFinished() {
void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); }
-void CompilationState::InitializeAfterDeserialization() {
- Impl(this)->InitializeCompilationProgressAfterDeserialization();
+void CompilationState::InitializeAfterDeserialization(
+ base::Vector<const int> missing_functions) {
+ Impl(this)->InitializeCompilationProgressAfterDeserialization(
+ missing_functions);
}
bool CompilationState::failed() const { return Impl(this)->failed(); }
@@ -1021,6 +1029,8 @@ class CompilationUnitBuilder {
js_to_wasm_wrapper_units_.clear();
}
+ const WasmModule* module() { return native_module_->module(); }
+
private:
CompilationStateImpl* compilation_state() const {
return Impl(native_module_->compilation_state());
@@ -1121,7 +1131,6 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
}
DCHECK(!native_module->lazy_compile_frozen());
- CodeSpaceWriteScope code_space_write_scope(native_module);
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
@@ -1170,8 +1179,12 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
}
WasmCodeRefScope code_ref_scope;
- WasmCode* code = native_module->PublishCode(
- native_module->AddCompiledCode(std::move(result)));
+ WasmCode* code;
+ {
+ CodeSpaceWriteScope code_space_write_scope(native_module);
+ code = native_module->PublishCode(
+ native_module->AddCompiledCode(std::move(result)));
+ }
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) {
@@ -2851,6 +2864,38 @@ bool CompilationStateImpl::cancelled() const {
return compile_cancelled_.load(std::memory_order_relaxed);
}
+uint8_t CompilationStateImpl::SetupCompilationProgressForFunction(
+ bool lazy_module, const WasmModule* module,
+ const WasmFeatures& enabled_features, int func_index) {
+ ExecutionTierPair requested_tiers =
+ GetRequestedExecutionTiers(module, enabled_features, func_index);
+ CompileStrategy strategy =
+ GetCompileStrategy(module, enabled_features, func_index, lazy_module);
+
+ bool required_for_baseline = strategy == CompileStrategy::kEager;
+ bool required_for_top_tier = strategy != CompileStrategy::kLazy;
+ DCHECK_EQ(required_for_top_tier,
+ strategy == CompileStrategy::kEager ||
+ strategy == CompileStrategy::kLazyBaselineEagerTopTier);
+
+ // Count functions to complete baseline and top tier compilation.
+ if (required_for_baseline) outstanding_baseline_units_++;
+ if (required_for_top_tier) outstanding_top_tier_functions_++;
+
+ // Initialize function's compilation progress.
+ ExecutionTier required_baseline_tier = required_for_baseline
+ ? requested_tiers.baseline_tier
+ : ExecutionTier::kNone;
+ ExecutionTier required_top_tier =
+ required_for_top_tier ? requested_tiers.top_tier : ExecutionTier::kNone;
+ uint8_t function_progress =
+ ReachedTierField::encode(ExecutionTier::kNone) |
+ RequiredBaselineTierField::encode(required_baseline_tier) |
+ RequiredTopTierField::encode(required_top_tier);
+
+ return function_progress;
+}
+
void CompilationStateImpl::InitializeCompilationProgress(
bool lazy_module, int num_import_wrappers, int num_export_wrappers) {
DCHECK(!failed());
@@ -2877,32 +2922,8 @@ void CompilationStateImpl::InitializeCompilationProgress(
outstanding_top_tier_functions_++;
continue;
}
- ExecutionTierPair requested_tiers =
- GetRequestedExecutionTiers(module, enabled_features, func_index);
- CompileStrategy strategy =
- GetCompileStrategy(module, enabled_features, func_index, lazy_module);
-
- bool required_for_baseline = strategy == CompileStrategy::kEager;
- bool required_for_top_tier = strategy != CompileStrategy::kLazy;
- DCHECK_EQ(required_for_top_tier,
- strategy == CompileStrategy::kEager ||
- strategy == CompileStrategy::kLazyBaselineEagerTopTier);
-
- // Count functions to complete baseline and top tier compilation.
- if (required_for_baseline) outstanding_baseline_units_++;
- if (required_for_top_tier) outstanding_top_tier_functions_++;
-
- // Initialize function's compilation progress.
- ExecutionTier required_baseline_tier = required_for_baseline
- ? requested_tiers.baseline_tier
- : ExecutionTier::kNone;
- ExecutionTier required_top_tier =
- required_for_top_tier ? requested_tiers.top_tier : ExecutionTier::kNone;
- uint8_t function_progress = ReachedTierField::encode(ExecutionTier::kNone);
- function_progress = RequiredBaselineTierField::update(
- function_progress, required_baseline_tier);
- function_progress =
- RequiredTopTierField::update(function_progress, required_top_tier);
+ uint8_t function_progress = SetupCompilationProgressForFunction(
+ lazy_module, module, enabled_features, func_index);
compilation_progress_.push_back(function_progress);
}
DCHECK_IMPLIES(lazy_module, outstanding_baseline_units_ == 0);
@@ -2917,7 +2938,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
TriggerCallbacks();
}
-void CompilationStateImpl::AddCompilationUnitInternal(
+uint8_t CompilationStateImpl::AddCompilationUnitInternal(
CompilationUnitBuilder* builder, int function_index,
uint8_t function_progress) {
ExecutionTier required_baseline_tier =
@@ -2928,6 +2949,27 @@ void CompilationStateImpl::AddCompilationUnitInternal(
ExecutionTier reached_tier =
CompilationStateImpl::ReachedTierField::decode(function_progress);
+ if (FLAG_experimental_wasm_gc) {
+ // The Turbofan optimizations we enable for WasmGC code can (for now)
+ // take a very long time, so skip Turbofan compilation for super-large
+ // functions.
+ // Besides, module serialization currently requires that all functions
+ // have been TF-compiled. By enabling this limit only for WasmGC, we
+ // make sure that non-experimental modules can be serialize as usual.
+ // TODO(jkummerow): This is a stop-gap solution to avoid excessive
+ // compile times. We would like to replace this hard threshold with
+ // a better solution (TBD) eventually.
+ constexpr uint32_t kMaxWasmFunctionSizeForTurbofan = 500 * KB;
+ uint32_t size = builder->module()->functions[function_index].code.length();
+ if (size > kMaxWasmFunctionSizeForTurbofan) {
+ required_baseline_tier = ExecutionTier::kLiftoff;
+ if (required_top_tier == ExecutionTier::kTurbofan) {
+ required_top_tier = ExecutionTier::kLiftoff;
+ outstanding_top_tier_functions_--;
+ }
+ }
+ }
+
if (reached_tier < required_baseline_tier) {
builder->AddBaselineUnit(function_index, required_baseline_tier);
}
@@ -2935,6 +2977,10 @@ void CompilationStateImpl::AddCompilationUnitInternal(
required_baseline_tier != required_top_tier) {
builder->AddTopTierUnit(function_index, required_top_tier);
}
+ return CompilationStateImpl::RequiredBaselineTierField::encode(
+ required_baseline_tier) |
+ CompilationStateImpl::RequiredTopTierField::encode(required_top_tier) |
+ CompilationStateImpl::ReachedTierField::encode(reached_tier);
}
void CompilationStateImpl::InitializeCompilationUnits(
@@ -2951,7 +2997,8 @@ void CompilationStateImpl::InitializeCompilationUnits(
for (size_t i = 0; i < compilation_progress_.size(); ++i) {
uint8_t function_progress = compilation_progress_[i];
int func_index = offset + static_cast<int>(i);
- AddCompilationUnitInternal(builder.get(), func_index, function_progress);
+ compilation_progress_[i] = AddCompilationUnitInternal(
+ builder.get(), func_index, function_progress);
}
}
builder->Commit();
@@ -2976,22 +3023,47 @@ void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
base::MutexGuard guard(&callbacks_mutex_);
function_progress = compilation_progress_[progress_index];
}
- AddCompilationUnitInternal(builder, func_index, function_progress);
+ uint8_t updated_function_progress =
+ AddCompilationUnitInternal(builder, func_index, function_progress);
+ if (updated_function_progress != function_progress) {
+ // This should happen very rarely (only for super-large functions), so we're
+ // not worried about overhead.
+ base::MutexGuard guard(&callbacks_mutex_);
+ compilation_progress_[progress_index] = updated_function_progress;
+ }
}
-void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization() {
+void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
+ base::Vector<const int> missing_functions) {
auto* module = native_module_->module();
- base::MutexGuard guard(&callbacks_mutex_);
- DCHECK(compilation_progress_.empty());
- constexpr uint8_t kProgressAfterDeserialization =
- RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) |
- RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
- ReachedTierField::encode(ExecutionTier::kTurbofan);
- finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
- finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
- finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
- compilation_progress_.assign(module->num_declared_functions,
- kProgressAfterDeserialization);
+ auto enabled_features = native_module_->enabled_features();
+ const bool lazy_module = IsLazyModule(module);
+ {
+ base::MutexGuard guard(&callbacks_mutex_);
+ DCHECK(compilation_progress_.empty());
+ constexpr uint8_t kProgressAfterDeserialization =
+ RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) |
+ RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
+ ReachedTierField::encode(ExecutionTier::kTurbofan);
+ finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
+ if (missing_functions.empty() || FLAG_wasm_lazy_compilation) {
+ finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
+ finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ compilation_progress_.assign(module->num_declared_functions,
+ kProgressAfterDeserialization);
+ uint32_t num_imported_functions = module->num_imported_functions;
+ for (auto func_index : missing_functions) {
+ if (FLAG_wasm_lazy_compilation) {
+ native_module_->UseLazyStub(num_imported_functions + func_index);
+ }
+ compilation_progress_[func_index] = SetupCompilationProgressForFunction(
+ lazy_module, module, enabled_features, func_index);
+ }
+ }
+ auto builder = std::make_unique<CompilationUnitBuilder>(native_module_);
+ InitializeCompilationUnits(std::move(builder));
+ WaitForCompilationEvent(CompilationEvent::kFinishedBaselineCompilation);
}
void CompilationStateImpl::InitializeRecompilation(
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 5e0c76025b..b014f8a8c7 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -50,8 +50,8 @@ const char* ExternalKindName(ImportExportKindCode kind) {
return "memory";
case kExternalGlobal:
return "global";
- case kExternalException:
- return "exception";
+ case kExternalTag:
+ return "tag";
}
return "unknown";
}
@@ -84,8 +84,8 @@ const char* SectionName(SectionCode code) {
return "Element";
case kDataSectionCode:
return "Data";
- case kExceptionSectionCode:
- return "Exception";
+ case kTagSectionCode:
+ return "Tag";
case kDataCountSectionCode:
return "DataCount";
case kNameSectionCode:
@@ -413,7 +413,7 @@ class ModuleDecoderImpl : public Decoder {
kCodeSectionCode))
return;
break;
- case kExceptionSectionCode:
+ case kTagSectionCode:
if (!CheckUnorderedSection(section_code)) return;
if (!CheckSectionOrder(section_code, kMemorySectionCode,
kGlobalSectionCode))
@@ -526,9 +526,9 @@ class ModuleDecoderImpl : public Decoder {
case kDataCountSectionCode:
DecodeDataCountSection();
break;
- case kExceptionSectionCode:
+ case kTagSectionCode:
if (enabled_features_.has_eh()) {
- DecodeExceptionSection();
+ DecodeTagSection();
} else {
errorf(pc(),
"unexpected section <%s> (enable with --experimental-wasm-eh)",
@@ -562,6 +562,21 @@ class ModuleDecoderImpl : public Decoder {
module_->add_signature(s);
break;
}
+ case kWasmFunctionExtendingTypeCode: {
+ if (!enabled_features_.has_gc_experiments()) {
+ errorf(pc(),
+ "nominal types need --experimental-wasm-gc-experiments");
+ break;
+ }
+ const FunctionSig* s = consume_sig(module_->signature_zone.get());
+ module_->add_signature(s);
+ uint32_t super_index = consume_u32v("supertype");
+ if (!module_->has_signature(super_index)) {
+ errorf(pc(), "invalid function supertype index: %d", super_index);
+ break;
+ }
+ break;
+ }
case kWasmStructTypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
@@ -575,6 +590,21 @@ class ModuleDecoderImpl : public Decoder {
// {signature_map} does for function signatures?
break;
}
+ case kWasmStructExtendingTypeCode: {
+ if (!enabled_features_.has_gc_experiments()) {
+ errorf(pc(),
+ "nominal types need --experimental-wasm-gc-experiments");
+ break;
+ }
+ const StructType* s = consume_struct(module_->signature_zone.get());
+ module_->add_struct_type(s);
+ uint32_t super_index = consume_u32v("supertype");
+ if (!module_->has_struct(super_index)) {
+ errorf(pc(), "invalid struct supertype: %d", super_index);
+ break;
+ }
+ break;
+ }
case kWasmArrayTypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
@@ -586,6 +616,21 @@ class ModuleDecoderImpl : public Decoder {
module_->add_array_type(type);
break;
}
+ case kWasmArrayExtendingTypeCode: {
+ if (!enabled_features_.has_gc_experiments()) {
+ errorf(pc(),
+ "nominal types need --experimental-wasm-gc-experiments");
+ break;
+ }
+ const ArrayType* type = consume_array(module_->signature_zone.get());
+ module_->add_array_type(type);
+ uint32_t super_index = consume_u32v("supertype");
+ if (!module_->has_array(super_index)) {
+ errorf(pc(), "invalid array supertype: %d", super_index);
+ break;
+ }
+ break;
+ }
default:
errorf(pc(), "unknown type form: %d", kind);
break;
@@ -680,17 +725,17 @@ class ModuleDecoderImpl : public Decoder {
}
break;
}
- case kExternalException: {
- // ===== Imported exception ==========================================
+ case kExternalTag: {
+ // ===== Imported tag ================================================
if (!enabled_features_.has_eh()) {
errorf(pos, "unknown import kind 0x%02x", import->kind);
break;
}
- import->index = static_cast<uint32_t>(module_->exceptions.size());
- const WasmExceptionSig* exception_sig = nullptr;
+ import->index = static_cast<uint32_t>(module_->tags.size());
+ const WasmTagSig* tag_sig = nullptr;
consume_exception_attribute(); // Attribute ignored for now.
- consume_exception_sig_index(module_.get(), &exception_sig);
- module_->exceptions.emplace_back(exception_sig);
+ consume_tag_sig_index(module_.get(), &tag_sig);
+ module_->tags.emplace_back(tag_sig);
break;
}
default:
@@ -845,13 +890,13 @@ class ModuleDecoderImpl : public Decoder {
}
break;
}
- case kExternalException: {
+ case kExternalTag: {
if (!enabled_features_.has_eh()) {
errorf(pos, "invalid export kind 0x%02x", exp->kind);
break;
}
- WasmException* exception = nullptr;
- exp->index = consume_exception_index(module_.get(), &exception);
+ WasmTag* tag = nullptr;
+ exp->index = consume_tag_index(module_.get(), &tag);
break;
}
default:
@@ -1259,16 +1304,14 @@ class ModuleDecoderImpl : public Decoder {
consume_count("data segments count", kV8MaxWasmDataSegments);
}
- void DecodeExceptionSection() {
- uint32_t exception_count =
- consume_count("exception count", kV8MaxWasmExceptions);
- for (uint32_t i = 0; ok() && i < exception_count; ++i) {
- TRACE("DecodeException[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- const WasmExceptionSig* exception_sig = nullptr;
+ void DecodeTagSection() {
+ uint32_t tag_count = consume_count("tag count", kV8MaxWasmTags);
+ for (uint32_t i = 0; ok() && i < tag_count; ++i) {
+ TRACE("DecodeTag[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
+ const WasmTagSig* tag_sig = nullptr;
consume_exception_attribute(); // Attribute ignored for now.
- consume_exception_sig_index(module_.get(), &exception_sig);
- module_->exceptions.emplace_back(exception_sig);
+ consume_tag_sig_index(module_.get(), &tag_sig);
+ module_->tags.emplace_back(tag_sig);
}
}
@@ -1544,12 +1587,11 @@ class ModuleDecoderImpl : public Decoder {
return sig_index;
}
- uint32_t consume_exception_sig_index(WasmModule* module,
- const FunctionSig** sig) {
+ uint32_t consume_tag_sig_index(WasmModule* module, const FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_sig_index(module, sig);
if (*sig && (*sig)->return_count() != 0) {
- errorf(pos, "exception signature %u has non-void return", sig_index);
+ errorf(pos, "tag signature %u has non-void return", sig_index);
*sig = nullptr;
return 0;
}
@@ -1579,8 +1621,8 @@ class ModuleDecoderImpl : public Decoder {
return consume_index("table index", &module->tables, table);
}
- uint32_t consume_exception_index(WasmModule* module, WasmException** except) {
- return consume_index("exception index", &module->exceptions, except);
+ uint32_t consume_tag_index(WasmModule* module, WasmTag** tag) {
+ return consume_index("tag index", &module->tags, tag);
}
template <typename T>
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index e1409952b2..f56ab55cd7 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -7,7 +7,7 @@
#include "src/api/api.h"
#include "src/asmjs/asm-js.h"
#include "src/base/platform/wrappers.h"
-#include "src/logging/counters.h"
+#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/descriptor-array-inl.h"
@@ -65,6 +65,7 @@ class CompileImportWrapperJob final : public JobTask {
}
void Run(JobDelegate* delegate) override {
+ CodeSpaceWriteScope code_space_write_scope(native_module_);
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
CompileImportWrapper(native_module_, counters_, key->kind, key->signature,
@@ -139,7 +140,8 @@ Handle<DescriptorArray> CreateArrayDescriptorArray(
// TODO(jkummerow): Move these elsewhere.
Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
- int struct_index, Handle<Map> opt_rtt_parent) {
+ int struct_index, Handle<Map> opt_rtt_parent,
+ Handle<WasmInstanceObject> instance) {
const wasm::StructType* type = module->struct_type(struct_index);
const int inobject_properties = 0;
// We have to use the variable size sentinel because the instance size
@@ -150,7 +152,8 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
// TODO(jkummerow): If NO_ELEMENTS were supported, we could use that here.
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), opt_rtt_parent, real_instance_size);
+ reinterpret_cast<Address>(type), opt_rtt_parent, real_instance_size,
+ instance);
Handle<DescriptorArray> descriptors =
CreateStructDescriptorArray(isolate, type);
Handle<Map> map = isolate->factory()->NewMap(
@@ -163,7 +166,8 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
}
Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
- int array_index, Handle<Map> opt_rtt_parent) {
+ int array_index, Handle<Map> opt_rtt_parent,
+ Handle<WasmInstanceObject> instance) {
const wasm::ArrayType* type = module->array_type(array_index);
const int inobject_properties = 0;
const int instance_size = kVariableSizeSentinel;
@@ -172,7 +176,8 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
const InstanceType instance_type = WASM_ARRAY_TYPE;
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), opt_rtt_parent, cached_instance_size);
+ reinterpret_cast<Address>(type), opt_rtt_parent, cached_instance_size,
+ instance);
// TODO(ishell): get canonical descriptor array for WasmArrays from roots.
Handle<DescriptorArray> descriptors =
CreateArrayDescriptorArray(isolate, type);
@@ -242,10 +247,10 @@ Handle<Map> AllocateSubRtt(Isolate* isolate,
// Allocate a fresh RTT otherwise.
Handle<Map> rtt;
if (module->has_struct(type)) {
- rtt = wasm::CreateStructMap(isolate, module, type, parent);
+ rtt = wasm::CreateStructMap(isolate, module, type, parent, instance);
} else {
DCHECK(module->has_array(type));
- rtt = wasm::CreateArrayMap(isolate, module, type, parent);
+ rtt = wasm::CreateArrayMap(isolate, module, type, parent, instance);
}
if (mode == WasmRttSubMode::kCanonicalize) {
@@ -288,7 +293,7 @@ class InstanceBuilder {
Handle<WasmMemoryObject> memory_object_;
Handle<JSArrayBuffer> untagged_globals_;
Handle<FixedArray> tagged_globals_;
- std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
+ std::vector<Handle<WasmTagObject>> tags_wrappers_;
Handle<WasmExportedFunction> start_function_;
std::vector<SanitizedImport> sanitized_imports_;
Zone init_expr_zone_;
@@ -381,7 +386,7 @@ class InstanceBuilder {
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
- // functions.
+ // functions, or {-1} on error.
int ProcessImports(Handle<WasmInstanceObject> instance);
template <typename T>
@@ -401,9 +406,9 @@ class InstanceBuilder {
void LoadTableSegments(Handle<WasmInstanceObject> instance);
- // Creates new exception tags for all exceptions. Note that some tags might
- // already exist if they were imported, those tags will be re-used.
- void InitializeExceptions(Handle<WasmInstanceObject> instance);
+ // Creates new tags. Note that some tags might already exist if they were
+ // imported, those tags will be re-used.
+ void InitializeTags(Handle<WasmInstanceObject> instance);
};
MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
@@ -580,14 +585,14 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Set up the exception table used for exception tag checks.
+ // Set up the tag table used for exception tag checks.
//--------------------------------------------------------------------------
- int exceptions_count = static_cast<int>(module_->exceptions.size());
- if (exceptions_count > 0) {
- Handle<FixedArray> exception_table = isolate_->factory()->NewFixedArray(
- exceptions_count, AllocationType::kOld);
- instance->set_exceptions_table(*exception_table);
- exception_wrappers_.resize(exceptions_count);
+ int tags_count = static_cast<int>(module_->tags.size());
+ if (tags_count > 0) {
+ Handle<FixedArray> tag_table =
+ isolate_->factory()->NewFixedArray(tags_count, AllocationType::kOld);
+ instance->set_tags_table(*tag_table);
+ tags_wrappers_.resize(tags_count);
}
//--------------------------------------------------------------------------
@@ -634,14 +639,14 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
instance->set_indirect_function_tables(*tables);
}
- CodeSpaceWriteScope native_modification_scope(native_module);
-
//--------------------------------------------------------------------------
// Process the imports for the module.
//--------------------------------------------------------------------------
- int num_imported_functions = ProcessImports(instance);
- if (num_imported_functions < 0) return {};
- wasm_module_instantiated.imported_function_count = num_imported_functions;
+ if (!module_->import_table.empty()) {
+ int num_imported_functions = ProcessImports(instance);
+ if (num_imported_functions < 0) return {};
+ wasm_module_instantiated.imported_function_count = num_imported_functions;
+ }
//--------------------------------------------------------------------------
// Create maps for managed objects (GC proposal).
@@ -658,10 +663,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<Map> map;
switch (module_->type_kinds[map_index]) {
case kWasmStructTypeCode:
- map = CreateStructMap(isolate_, module_, map_index, Handle<Map>());
+ map = CreateStructMap(isolate_, module_, map_index, Handle<Map>(),
+ instance);
break;
case kWasmArrayTypeCode:
- map = CreateArrayMap(isolate_, module_, map_index, Handle<Map>());
+ map = CreateArrayMap(isolate_, module_, map_index, Handle<Map>(),
+ instance);
break;
case kWasmFunctionTypeCode:
// TODO(7748): Think about canonicalizing rtts to make them work for
@@ -690,10 +697,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Initialize the exceptions table.
+ // Initialize the tags table.
//--------------------------------------------------------------------------
- if (exceptions_count > 0) {
- InitializeExceptions(instance);
+ if (tags_count > 0) {
+ InitializeTags(instance);
}
//--------------------------------------------------------------------------
@@ -1505,24 +1512,22 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
}
break;
}
- case kExternalException: {
- if (!value->IsWasmExceptionObject()) {
- ReportLinkError("exception import requires a WebAssembly.Exception",
- index, module_name, import_name);
+ case kExternalTag: {
+ if (!value->IsWasmTagObject()) {
+ ReportLinkError("tag import requires a WebAssembly.Tag", index,
+ module_name, import_name);
return -1;
}
- Handle<WasmExceptionObject> imported_exception =
- Handle<WasmExceptionObject>::cast(value);
- if (!imported_exception->MatchesSignature(
- module_->exceptions[import.index].sig)) {
- ReportLinkError("imported exception does not match the expected type",
+ Handle<WasmTagObject> imported_tag = Handle<WasmTagObject>::cast(value);
+ if (!imported_tag->MatchesSignature(module_->tags[import.index].sig)) {
+ ReportLinkError("imported tag does not match the expected type",
index, module_name, import_name);
return -1;
}
- Object exception_tag = imported_exception->exception_tag();
- DCHECK(instance->exceptions_table().get(import.index).IsUndefined());
- instance->exceptions_table().set(import.index, exception_tag);
- exception_wrappers_[import.index] = imported_exception;
+ Object tag = imported_tag->tag();
+ DCHECK(instance->tags_table().get(import.index).IsUndefined());
+ instance->tags_table().set(import.index, tag);
+ tags_wrappers_[import.index] = imported_tag;
break;
}
default:
@@ -1734,16 +1739,15 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
desc.set_value(global_obj);
break;
}
- case kExternalException: {
- const WasmException& exception = module_->exceptions[exp.index];
- Handle<WasmExceptionObject> wrapper = exception_wrappers_[exp.index];
+ case kExternalTag: {
+ const WasmTag& tag = module_->tags[exp.index];
+ Handle<WasmTagObject> wrapper = tags_wrappers_[exp.index];
if (wrapper.is_null()) {
- Handle<HeapObject> exception_tag(
- HeapObject::cast(instance->exceptions_table().get(exp.index)),
+ Handle<HeapObject> tag_object(
+ HeapObject::cast(instance->tags_table().get(exp.index)),
isolate_);
- wrapper =
- WasmExceptionObject::New(isolate_, exception.sig, exception_tag);
- exception_wrappers_[exp.index] = wrapper;
+ wrapper = WasmTagObject::New(isolate_, tag.sig, tag_object);
+ tags_wrappers_[exp.index] = wrapper;
}
desc.set_value(wrapper);
break;
@@ -1974,14 +1978,12 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
}
}
-void InstanceBuilder::InitializeExceptions(
- Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate_);
- for (int index = 0; index < exceptions_table->length(); ++index) {
- if (!exceptions_table->get(index).IsUndefined(isolate_)) continue;
- Handle<WasmExceptionTag> exception_tag =
- WasmExceptionTag::New(isolate_, index);
- exceptions_table->set(index, *exception_tag);
+void InstanceBuilder::InitializeTags(Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> tags_table(instance->tags_table(), isolate_);
+ for (int index = 0; index < tags_table->length(); ++index) {
+ if (!tags_table->get(index).IsUndefined(isolate_)) continue;
+ Handle<WasmExceptionTag> tag = WasmExceptionTag::New(isolate_, index);
+ tags_table->set(index, *tag);
}
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index fe30f804b7..d080d1285e 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -4,7 +4,9 @@
#include "src/wasm/wasm-code-manager.h"
+#include <algorithm>
#include <iomanip>
+#include <numeric>
#include "src/base/build_config.h"
#include "src/base/iterator.h"
@@ -265,13 +267,9 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
"wasm-function[%d]", index()));
name = base::VectorOf(name_buffer);
}
- // TODO(clemensb): Remove this #if once this compilation unit is excluded in
- // no-wasm builds.
-#if V8_ENABLE_WEBASSEMBLY
int code_offset = module->functions[index_].code.offset();
PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
source_url, code_offset, script_id));
-#endif // V8_ENABLE_WEBASSEMBLY
if (!source_positions().empty()) {
LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
@@ -516,7 +514,11 @@ int WasmCode::GetSourcePositionBefore(int offset) {
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
- : async_counters_(std::move(async_counters)) {
+ : protect_code_memory_(
+ !V8_HAS_PTHREAD_JIT_WRITE_PROTECT &&
+ FLAG_wasm_write_protect_code_memory &&
+ !GetWasmCodeManager()->HasMemoryProtectionKeySupport()),
+ async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
}
@@ -625,6 +627,66 @@ std::pair<size_t, size_t> ReservationSize(size_t code_size_estimate,
return {minimum_size, reserve_size};
}
+#ifdef DEBUG
+// Check postconditions when returning from this method:
+// 1) {region} must be fully contained in {writable_memory_};
+// 2) {writable_memory_} must be a maximally merged ordered set of disjoint
+// non-empty regions.
+class CheckWritableMemoryRegions {
+ public:
+ CheckWritableMemoryRegions(
+ std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
+ writable_memory,
+ base::AddressRegion new_region, size_t& new_writable_memory)
+ : writable_memory_(writable_memory),
+ new_region_(new_region),
+ new_writable_memory_(new_writable_memory),
+ old_writable_size_(std::accumulate(
+ writable_memory_.begin(), writable_memory_.end(), size_t{0},
+ [](size_t old, base::AddressRegion region) {
+ return old + region.size();
+ })) {}
+
+ ~CheckWritableMemoryRegions() {
+ // {new_region} must be contained in {writable_memory_}.
+ DCHECK(std::any_of(
+ writable_memory_.begin(), writable_memory_.end(),
+ [this](auto region) { return region.contains(new_region_); }));
+
+ // The new total size of writable memory must have increased by
+ // {new_writable_memory}.
+ size_t total_writable_size = std::accumulate(
+ writable_memory_.begin(), writable_memory_.end(), size_t{0},
+ [](size_t old, auto region) { return old + region.size(); });
+ DCHECK_EQ(old_writable_size_ + new_writable_memory_, total_writable_size);
+
+ // There are no empty regions.
+ DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
+ [](auto region) { return region.is_empty(); }));
+
+ // Regions are sorted and disjoint.
+ std::accumulate(writable_memory_.begin(), writable_memory_.end(),
+ Address{0}, [](Address previous_end, auto region) {
+ DCHECK_LT(previous_end, region.begin());
+ return region.end();
+ });
+ }
+
+ private:
+ const std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
+ writable_memory_;
+ const base::AddressRegion new_region_;
+ const size_t& new_writable_memory_;
+ const size_t old_writable_size_;
+};
+#else // !DEBUG
+class CheckWritableMemoryRegions {
+ public:
+ template <typename... Args>
+ explicit CheckWritableMemoryRegions(Args...) {}
+};
+#endif // !DEBUG
+
} // namespace
base::Vector<byte> WasmCodeAllocator::AllocateForCode(
@@ -674,6 +736,10 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
}
const Address commit_page_size = CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
+ if (commit_start != code_space.begin()) {
+ MakeWritable({commit_start - commit_page_size, commit_page_size});
+ }
+
Address commit_end = RoundUp(code_space.end(), commit_page_size);
// {commit_start} will be either code_space.start or the start of the next
// page. {commit_end} will be the start of the page after the one in which
@@ -691,6 +757,11 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
committed_code_space_.fetch_add(commit_end - commit_start);
// Committed code cannot grow bigger than maximum code space size.
DCHECK_LE(committed_code_space_.load(), FLAG_wasm_max_code_space * MB);
+ if (protect_code_memory_) {
+ DCHECK_LT(0, writers_count_);
+ InsertIntoWritableRegions({commit_start, commit_end - commit_start},
+ false);
+ }
}
DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
allocated_code_space_.Merge(code_space);
@@ -701,70 +772,50 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}
-// TODO(dlehmann): Do not return the success as a bool, but instead fail hard.
-// That is, pull the CHECK from {CodeSpaceWriteScope} in here and return void.
-// TODO(dlehmann): Ensure {SetWritable(true)} is always paired up with a
-// {SetWritable(false)}, such that eventually the code space is write protected.
+// TODO(dlehmann): Ensure that {AddWriter()} is always paired up with a
+// {RemoveWriter}, such that eventually the code space is write protected.
// One solution is to make the API foolproof by hiding {SetWritable()} and
// allowing change of permissions only through {CodeSpaceWriteScope}.
// TODO(dlehmann): Add tests that ensure the code space is eventually write-
// protected.
-bool WasmCodeAllocator::SetWritable(bool writable) {
- // Invariant: `this.writers_count_ > 0` iff `code space has W permission`.
- // TODO(dlehmann): This is currently not fulfilled before the first call
- // to SetWritable(false), because initial permissions are RWX.
- // Fix by setting initial permissions to RX and adding writable permission
- // where appropriate. See also {WasmCodeManager::Commit()}.
- if (writable) {
- if (++writers_count_ > 1) return true;
- } else {
- DCHECK_GT(writers_count_, 0);
- if (--writers_count_ > 0) return true;
- }
- writable = writers_count_ > 0;
- TRACE_HEAP("Setting module %p as writable: %d.\n", this, writable);
-
- if (FLAG_wasm_write_protect_code_memory) {
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
-
- // Due to concurrent compilation and execution, we always need the execute
- // permission, however during codegen we additionally need to write.
- // Hence this does not actually achieve write-xor-execute, but merely
- // "always-execute" with "no-write-eventually".
- PageAllocator::Permission permission =
- writable ? PageAllocator::kReadWriteExecute
- : PageAllocator::kReadExecute;
-#if V8_OS_WIN
- // On windows, we need to switch permissions per separate virtual memory
- // reservation.
- // For now, in that case, we commit at reserved memory granularity.
- // Technically, that may be a waste, because we may reserve more than we
- // use. On 32-bit though, the scarce resource is the address space -
- // committed or not.
- for (auto& vmem : owned_code_space_) {
- if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
- permission)) {
- return false;
- }
- TRACE_HEAP("Set %p:%p to writable:%d\n", vmem.address(), vmem.end(),
- writable);
- }
-#else // V8_OS_WIN
- size_t commit_page_size = page_allocator->CommitPageSize();
- for (auto& region : allocated_code_space_.regions()) {
- // allocated_code_space_ is fine-grained, so we need to
- // page-align it.
- size_t region_size = RoundUp(region.size(), commit_page_size);
- if (!SetPermissions(page_allocator, region.begin(), region_size,
- permission)) {
- return false;
- }
- TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to writable:%d\n",
- region.begin(), region.end(), writable);
+void WasmCodeAllocator::AddWriter() {
+ DCHECK(protect_code_memory_);
+ ++writers_count_;
+}
+
+void WasmCodeAllocator::RemoveWriter() {
+ DCHECK(protect_code_memory_);
+ DCHECK_GT(writers_count_, 0);
+ if (--writers_count_ > 0) return;
+
+ // Switch all memory to non-writable.
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+ for (base::AddressRegion writable : writable_memory_) {
+ for (base::AddressRegion split_range :
+ SplitRangeByReservationsIfNeeded(writable, owned_code_space_)) {
+ TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RX\n",
+ split_range.begin(), split_range.end());
+ CHECK(SetPermissions(page_allocator, split_range.begin(),
+ split_range.size(), PageAllocator::kReadExecute));
}
-#endif // V8_OS_WIN
}
- return true;
+ writable_memory_.clear();
+}
+
+void WasmCodeAllocator::MakeWritable(base::AddressRegion region) {
+ if (!protect_code_memory_) return;
+ DCHECK_LT(0, writers_count_);
+ DCHECK(!region.is_empty());
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+
+ // Align to commit page size.
+ size_t commit_page_size = page_allocator->CommitPageSize();
+ DCHECK(base::bits::IsPowerOfTwo(commit_page_size));
+ Address begin = RoundDown(region.begin(), commit_page_size);
+ Address end = RoundUp(region.end(), commit_page_size);
+ region = base::AddressRegion(begin, end - begin);
+
+ InsertIntoWritableRegions(region, true);
}
void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {
@@ -772,9 +823,6 @@ void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {
DisjointAllocationPool freed_regions;
size_t code_size = 0;
for (WasmCode* code : codes) {
- ZapCode(code->instruction_start(), code->instructions().size());
- FlushInstructionCache(code->instruction_start(),
- code->instructions().size());
code_size += code->instructions().size();
freed_regions.Merge(base::AddressRegion{code->instruction_start(),
code->instructions().size()});
@@ -814,6 +862,83 @@ size_t WasmCodeAllocator::GetNumCodeSpaces() const {
return owned_code_space_.size();
}
+void WasmCodeAllocator::InsertIntoWritableRegions(base::AddressRegion region,
+ bool switch_to_writable) {
+ size_t new_writable_memory = 0;
+
+ CheckWritableMemoryRegions check_on_return{writable_memory_, region,
+ new_writable_memory};
+
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+ // Subroutine to make a non-writable region writable (if {switch_to_writable}
+ // is {true}) and insert it into {writable_memory_}.
+ auto make_writable = [&](decltype(writable_memory_)::iterator insert_pos,
+ base::AddressRegion region) {
+ new_writable_memory += region.size();
+ if (switch_to_writable) {
+ for (base::AddressRegion split_range :
+ SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
+ TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RWX\n",
+ split_range.begin(), split_range.end());
+ CHECK(SetPermissions(page_allocator, split_range.begin(),
+ split_range.size(),
+ PageAllocator::kReadWriteExecute));
+ }
+ }
+
+ // Insert {region} into {writable_memory_} before {insert_pos}, potentially
+ // merging it with the surrounding regions.
+ if (insert_pos != writable_memory_.begin()) {
+ auto previous = insert_pos;
+ --previous;
+ if (previous->end() == region.begin()) {
+ region = {previous->begin(), previous->size() + region.size()};
+ writable_memory_.erase(previous);
+ }
+ }
+ if (region.end() == insert_pos->begin()) {
+ region = {region.begin(), insert_pos->size() + region.size()};
+ insert_pos = writable_memory_.erase(insert_pos);
+ }
+ writable_memory_.insert(insert_pos, region);
+ };
+
+ DCHECK(!region.is_empty());
+ // Find a possible insertion position by identifying the first region whose
+ // start address is not less than that of {new_region}, and the starting the
+ // merge from the existing region before that.
+ auto it = writable_memory_.lower_bound(region);
+ if (it != writable_memory_.begin()) --it;
+ for (;; ++it) {
+ if (it == writable_memory_.end() || it->begin() >= region.end()) {
+ // No overlap; add before {it}.
+ make_writable(it, region);
+ return;
+ }
+ if (it->end() <= region.begin()) continue; // Continue after {it}.
+ base::AddressRegion overlap = it->GetOverlap(region);
+ DCHECK(!overlap.is_empty());
+ if (overlap.begin() == region.begin()) {
+ if (overlap.end() == region.end()) return; // Fully contained already.
+ // Remove overlap (which is already writable) and continue.
+ region = {overlap.end(), region.end() - overlap.end()};
+ continue;
+ }
+ if (overlap.end() == region.end()) {
+ // Remove overlap (which is already writable), then make the remaining
+ // region writable.
+ region = {region.begin(), overlap.begin() - region.begin()};
+ make_writable(it, region);
+ return;
+ }
+ // Split {region}, make the split writable, and continue with the rest.
+ base::AddressRegion split = {region.begin(),
+ overlap.begin() - region.begin()};
+ make_writable(it, split);
+ region = {overlap.end(), region.end() - overlap.end()};
+ }
+}
+
// static
constexpr base::AddressRegion WasmCodeAllocator::kUnrestrictedRegion;
@@ -1054,13 +1179,13 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
ExecutionTier tier, ForDebugging for_debugging) {
base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_table_ref;
+ CodeSpaceWriteScope code_space_write_scope(this);
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
jump_table_ref =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
}
- CodeSpaceWriteScope code_space_write_scope(this);
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
source_position_table, kind, tier, for_debugging,
@@ -1336,11 +1461,11 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
allocation_mutex_.AssertHeld();
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
+ CodeSpaceWriteScope code_space_write_scope(this);
base::Vector<uint8_t> code_space =
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
- CodeSpaceWriteScope code_space_write_scope(this);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module
@@ -1388,6 +1513,11 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
DCHECK_NOT_NULL(code_space_data.jump_table);
DCHECK_NOT_NULL(code_space_data.far_jump_table);
+ code_allocator_.MakeWritable(
+ AddressRegionOf(code_space_data.jump_table->instructions()));
+ code_allocator_.MakeWritable(
+ AddressRegionOf(code_space_data.far_jump_table->instructions()));
+
DCHECK_LT(slot_index, module_->num_declared_functions);
Address jump_table_slot =
code_space_data.jump_table->instruction_start() +
@@ -1416,6 +1546,7 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
DCHECK_GE(region.size(),
2 * OverheadPerCodeSpace(module()->num_declared_functions));
+ CodeSpaceWriteScope code_space_write_scope(this);
#if defined(V8_OS_WIN64)
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space.
@@ -1434,7 +1565,6 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
#endif // V8_OS_WIN64
WasmCodeRefScope code_ref_scope;
- CodeSpaceWriteScope code_space_write_scope(this);
WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
@@ -1970,14 +2100,6 @@ size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
void WasmCodeManager::SetThreadWritable(bool writable) {
DCHECK(HasMemoryProtectionKeySupport());
- static thread_local int writable_nesting_level = 0;
- if (writable) {
- if (++writable_nesting_level > 1) return;
- } else {
- DCHECK_GT(writable_nesting_level, 0);
- if (--writable_nesting_level > 0) return;
- }
- writable = writable_nesting_level > 0;
MemoryProtectionKeyPermission permissions =
writable ? kNoRestrictions : kDisableWrite;
@@ -2048,8 +2170,8 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
isolate->async_counters(), &ret);
// The constructor initialized the shared_ptr.
DCHECK_NOT_NULL(ret);
- TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
- size);
+ TRACE_HEAP("New NativeModule %p: Mem: 0x%" PRIxPTR ",+%zu\n", ret.get(),
+ start, size);
base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
@@ -2108,6 +2230,7 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
}
base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_tables;
+ CodeSpaceWriteScope code_space_write_scope(this);
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, total_code_space);
@@ -2125,10 +2248,6 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
generated_code.reserve(results.size());
// Now copy the generated code into the code space and relocate it.
- // Get writable permission already here (and not inside the loop in
- // {AddCodeWithCodeSpace}), to avoid lock contention on the
- // {allocator_mutex_} if we try to switch for each code individually.
- CodeSpaceWriteScope code_space_write_scope(this);
for (auto& result : results) {
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer->start());
size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
@@ -2231,10 +2350,6 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
base::RecursiveMutexGuard guard(&allocation_mutex_);
- // Get writable permission already here (and not inside the loop in
- // {WasmCodeAllocator::FreeCode}), to avoid switching for each {code}
- // individually.
- CodeSpaceWriteScope code_space_write_scope(this);
// Free the code space.
code_allocator_.FreeCode(codes);
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 3d35478cfb..2baf46e888 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -508,10 +508,19 @@ class WasmCodeAllocator {
base::Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
base::AddressRegion);
- // Sets permissions of all owned code space to read-write or read-only (if
- // {writable} is false). Returns true on success.
+ // Increases or decreases the {writers_count_} field. While there is at least
+ // one writer, it is allowed to call {MakeWritable} to make regions writable.
+ // When the last writer is removed, all code is switched back to
+ // write-protected.
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling one of these
+ // methods. The methods should only be called via {CodeSpaceWriteScope}.
+ V8_EXPORT_PRIVATE void AddWriter();
+ V8_EXPORT_PRIVATE void RemoveWriter();
+
+ // Make a code region writable. Only allowed if there is at lease one writer
+ // (see above).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- V8_EXPORT_PRIVATE bool SetWritable(bool writable);
+ void MakeWritable(base::AddressRegion);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
@@ -527,6 +536,9 @@ class WasmCodeAllocator {
static constexpr base::AddressRegion kUnrestrictedRegion{
kNullAddress, std::numeric_limits<size_t>::max()};
+ void InsertIntoWritableRegions(base::AddressRegion region,
+ bool switch_to_writable);
+
//////////////////////////////////////////////////////////////////////////////
// These fields are protected by the mutex in {NativeModule}.
@@ -540,11 +552,18 @@ class WasmCodeAllocator {
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
+ // The following two fields are only used if {protect_code_memory_} is true.
int writers_count_{0};
+ std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>
+ writable_memory_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
+ // {protect_code_memory_} is true if traditional memory permission switching
+ // is used to protect code space. It is false if {MAP_JIT} on Mac or PKU is
+ // being used, or protection is completely disabled.
+ const bool protect_code_memory_;
std::atomic<size_t> committed_code_space_{0};
std::atomic<size_t> generated_code_size_{0};
std::atomic<size_t> freed_code_size_{0};
@@ -657,9 +676,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
// to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
- bool SetWritable(bool writable) {
+ void AddWriter() {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ code_allocator_.AddWriter();
+ }
+
+ void RemoveWriter() {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ code_allocator_.RemoveWriter();
+ }
+
+ void MakeWritable(base::AddressRegion region) {
base::RecursiveMutexGuard guard{&allocation_mutex_};
- return code_allocator_.SetWritable(writable);
+ code_allocator_.MakeWritable(region);
}
// For cctests, where we build both WasmModule and the runtime objects
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index f960e7c201..726ceaa018 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -50,6 +50,9 @@ enum ValueTypeCode : uint8_t {
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
constexpr uint8_t kWasmStructTypeCode = 0x5f;
constexpr uint8_t kWasmArrayTypeCode = 0x5e;
+constexpr uint8_t kWasmFunctionExtendingTypeCode = 0x5d;
+constexpr uint8_t kWasmStructExtendingTypeCode = 0x5c;
+constexpr uint8_t kWasmArrayExtendingTypeCode = 0x5b;
// Binary encoding of import/export kinds.
enum ImportExportKindCode : uint8_t {
@@ -57,7 +60,7 @@ enum ImportExportKindCode : uint8_t {
kExternalTable = 1,
kExternalMemory = 2,
kExternalGlobal = 3,
- kExternalException = 4
+ kExternalTag = 4
};
enum LimitsFlags : uint8_t {
@@ -91,7 +94,7 @@ enum SectionCode : int8_t {
kCodeSectionCode = 10, // Function code
kDataSectionCode = 11, // Data segments
kDataCountSectionCode = 12, // Number of data segments
- kExceptionSectionCode = 13, // Exception section
+ kTagSectionCode = 13, // Tag section
// The following sections are custom sections, and are identified using a
// string rather than an integer. Their enumeration values are not guaranteed
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 45fa789364..65f05ad507 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -280,12 +280,13 @@ class DebugInfoImpl {
// Debug side tables for stepping are generated lazily.
bool generate_debug_sidetable = for_debugging == kWithBreakpoints;
- Counters* counters = nullptr;
- WasmFeatures unused_detected;
WasmCompilationResult result = ExecuteLiftoffCompilation(
- &env, body, func_index, for_debugging, counters, &unused_detected,
- offsets, generate_debug_sidetable ? &debug_sidetable : nullptr,
- dead_breakpoint);
+ &env, body, func_index, for_debugging,
+ LiftoffOptions{}
+ .set_breakpoints(offsets)
+ .set_dead_breakpoint(dead_breakpoint)
+ .set_debug_sidetable(generate_debug_sidetable ? &debug_sidetable
+ : nullptr));
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging.
if (!result.succeeded()) FATAL("Liftoff compilation failed");
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index a452e51855..b65db60154 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -20,6 +20,8 @@
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
@@ -183,6 +185,7 @@ GET_FIRST_ARGUMENT_AS(Module)
GET_FIRST_ARGUMENT_AS(Memory)
GET_FIRST_ARGUMENT_AS(Table)
GET_FIRST_ARGUMENT_AS(Global)
+GET_FIRST_ARGUMENT_AS(Tag)
#undef GET_FIRST_ARGUMENT_AS
@@ -1231,6 +1234,48 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
return true;
}
+namespace {
+
+bool ToI32(Local<v8::Value> value, Local<Context> context, int32_t* i32_value) {
+ if (!value->IsUndefined()) {
+ v8::Local<v8::Int32> int32_value;
+ if (!value->ToInt32(context).ToLocal(&int32_value)) return false;
+ if (!int32_value->Int32Value(context).To(i32_value)) return false;
+ }
+ return true;
+}
+
+bool ToI64(Local<v8::Value> value, Local<Context> context, int64_t* i64_value) {
+ if (!value->IsUndefined()) {
+ v8::Local<v8::BigInt> bigint_value;
+ if (!value->ToBigInt(context).ToLocal(&bigint_value)) return false;
+ *i64_value = bigint_value->Int64Value();
+ }
+ return true;
+}
+
+bool ToF32(Local<v8::Value> value, Local<Context> context, float* f32_value) {
+ if (!value->IsUndefined()) {
+ double f64_value = 0;
+ v8::Local<v8::Number> number_value;
+ if (!value->ToNumber(context).ToLocal(&number_value)) return false;
+ if (!number_value->NumberValue(context).To(&f64_value)) return false;
+ *f32_value = i::DoubleToFloat32(f64_value);
+ }
+ return true;
+}
+
+bool ToF64(Local<v8::Value> value, Local<Context> context, double* f64_value) {
+ if (!value->IsUndefined()) {
+ v8::Local<v8::Number> number_value;
+ if (!value->ToNumber(context).ToLocal(&number_value)) return false;
+ if (!number_value->NumberValue(context).To(f64_value)) return false;
+ }
+ return true;
+}
+
+} // namespace
+
// WebAssembly.Global
void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -1296,43 +1341,25 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
switch (type.kind()) {
case i::wasm::kI32: {
int32_t i32_value = 0;
- if (!value->IsUndefined()) {
- v8::Local<v8::Int32> int32_value;
- if (!value->ToInt32(context).ToLocal(&int32_value)) return;
- if (!int32_value->Int32Value(context).To(&i32_value)) return;
- }
+ if (!ToI32(value, context, &i32_value)) return;
global_obj->SetI32(i32_value);
break;
}
case i::wasm::kI64: {
int64_t i64_value = 0;
- if (!value->IsUndefined()) {
- v8::Local<v8::BigInt> bigint_value;
- if (!value->ToBigInt(context).ToLocal(&bigint_value)) return;
- i64_value = bigint_value->Int64Value();
- }
+ if (!ToI64(value, context, &i64_value)) return;
global_obj->SetI64(i64_value);
break;
}
case i::wasm::kF32: {
float f32_value = 0;
- if (!value->IsUndefined()) {
- double f64_value = 0;
- v8::Local<v8::Number> number_value;
- if (!value->ToNumber(context).ToLocal(&number_value)) return;
- if (!number_value->NumberValue(context).To(&f64_value)) return;
- f32_value = i::DoubleToFloat32(f64_value);
- }
+ if (!ToF32(value, context, &f32_value)) return;
global_obj->SetF32(f32_value);
break;
}
case i::wasm::kF64: {
double f64_value = 0;
- if (!value->IsUndefined()) {
- v8::Local<v8::Number> number_value;
- if (!value->ToNumber(context).ToLocal(&number_value)) return;
- if (!number_value->NumberValue(context).To(&f64_value)) return;
- }
+ if (!ToF64(value, context, &f64_value)) return;
global_obj->SetF64(f64_value);
break;
}
@@ -1408,19 +1435,19 @@ uint32_t GetIterableLength(i::Isolate* isolate, Local<Context> context,
} // namespace
-// WebAssembly.Exception
-void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
+// WebAssembly.Tag
+void WebAssemblyTag(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Exception()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Tag()");
if (!args.IsConstructCall()) {
- thrower.TypeError("WebAssembly.Exception must be invoked with 'new'");
+ thrower.TypeError("WebAssembly.Tag must be invoked with 'new'");
return;
}
if (!args[0]->IsObject()) {
- thrower.TypeError("Argument 0 must be an exception type");
+ thrower.TypeError("Argument 0 must be a tag type");
return;
}
@@ -1435,7 +1462,7 @@ void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Value> parameters_value;
if (!parameters_maybe.ToLocal(&parameters_value) ||
!parameters_value->IsObject()) {
- thrower.TypeError("Argument 0 must be an exception type with 'parameters'");
+ thrower.TypeError("Argument 0 must be a tag type with 'parameters'");
return;
}
Local<Object> parameters = parameters_value.As<Object>();
@@ -1449,7 +1476,7 @@ void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- // Decode the exception type and construct a signature.
+ // Decode the tag type and construct a signature.
std::vector<i::wasm::ValueType> param_types(parameters_len,
i::wasm::kWasmVoid);
for (uint32_t i = 0; i < parameters_len; ++i) {
@@ -1466,9 +1493,132 @@ void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Set the tag index to 0. It is only used for debugging purposes, and has no
// meaningful value when declared outside of a wasm module.
auto tag = i::WasmExceptionTag::New(i_isolate, 0);
- i::Handle<i::Object> exception =
- i::WasmExceptionObject::New(i_isolate, &sig, tag);
- args.GetReturnValue().Set(Utils::ToLocal(exception));
+ i::Handle<i::JSObject> tag_object =
+ i::WasmTagObject::New(i_isolate, &sig, tag);
+ args.GetReturnValue().Set(Utils::ToLocal(tag_object));
+}
+
+namespace {
+
+uint32_t GetEncodedSize(i::Handle<i::WasmTagObject> tag_object) {
+ auto serialized_sig = tag_object->serialized_signature();
+ i::wasm::WasmTagSig sig{0, static_cast<size_t>(serialized_sig.length()),
+ reinterpret_cast<i::wasm::ValueType*>(
+ serialized_sig.GetDataStartAddress())};
+ i::wasm::WasmTag tag(&sig);
+ return i::WasmExceptionPackage::GetEncodedSize(&tag);
+}
+
+void EncodeExceptionValues(v8::Isolate* isolate,
+ i::PodArray<i::wasm::ValueType> signature,
+ const Local<Value>& arg,
+ ScheduledErrorThrower* thrower,
+ i::Handle<i::FixedArray> values_out) {
+ Local<Context> context = isolate->GetCurrentContext();
+ uint32_t index = 0;
+ if (!arg->IsObject()) {
+ thrower->TypeError("Exception values must be an iterable object");
+ return;
+ }
+ auto values = arg.As<Object>();
+ for (int i = 0; i < signature.length(); ++i) {
+ MaybeLocal<Value> maybe_value = values->Get(context, i);
+ Local<Value> value = maybe_value.ToLocalChecked();
+ i::wasm::ValueType type = signature.get(i);
+ switch (type.kind()) {
+ case i::wasm::kI32: {
+ int32_t i32 = 0;
+ if (!ToI32(value, context, &i32)) return;
+ i::EncodeI32ExceptionValue(values_out, &index, i32);
+ break;
+ }
+ case i::wasm::kI64: {
+ int64_t i64 = 0;
+ if (!ToI64(value, context, &i64)) return;
+ i::EncodeI64ExceptionValue(values_out, &index, i64);
+ break;
+ }
+ case i::wasm::kF32: {
+ float f32 = 0;
+ if (!ToF32(value, context, &f32)) return;
+ int32_t i32 = bit_cast<int32_t>(f32);
+ i::EncodeI32ExceptionValue(values_out, &index, i32);
+ break;
+ }
+ case i::wasm::kF64: {
+ double f64 = 0;
+ if (!ToF64(value, context, &f64)) return;
+ int64_t i64 = bit_cast<int64_t>(f64);
+ i::EncodeI64ExceptionValue(values_out, &index, i64);
+ break;
+ }
+ case i::wasm::kRef:
+ case i::wasm::kOptRef:
+ switch (type.heap_representation()) {
+ case i::wasm::HeapType::kExtern:
+ case i::wasm::HeapType::kFunc:
+ case i::wasm::HeapType::kAny:
+ case i::wasm::HeapType::kEq:
+ case i::wasm::HeapType::kI31:
+ case i::wasm::HeapType::kData:
+ values_out->set(index++, *Utils::OpenHandle(*value));
+ break;
+ case internal::wasm::HeapType::kBottom:
+ UNREACHABLE();
+ default:
+ // TODO(7748): Add support for custom struct/array types.
+ UNIMPLEMENTED();
+ }
+ break;
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kVoid:
+ case i::wasm::kBottom:
+ case i::wasm::kS128:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+} // namespace
+
+void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Exception()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Exception must be invoked with 'new'");
+ return;
+ }
+ if (!args[0]->IsObject()) {
+ thrower.TypeError("Argument 0 must be a WebAssembly tag");
+ return;
+ }
+ i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
+ if (!i::HeapObject::cast(*arg0).IsWasmTagObject()) {
+ thrower.TypeError("Argument 0 must be a WebAssembly tag");
+ return;
+ }
+ auto tag_object = i::Handle<i::WasmTagObject>::cast(arg0);
+ auto tag = i::Handle<i::WasmExceptionTag>(
+ i::WasmExceptionTag::cast(tag_object->tag()), i_isolate);
+ uint32_t size = GetEncodedSize(tag_object);
+ i::Handle<i::WasmExceptionPackage> runtime_exception =
+ i::WasmExceptionPackage::New(i_isolate, tag, size);
+ // The constructor above should guarantee that the cast below succeeds.
+ auto values = i::Handle<i::FixedArray>::cast(
+ i::WasmExceptionPackage::GetExceptionValues(i_isolate,
+ runtime_exception));
+ auto signature = tag_object->serialized_signature();
+ EncodeExceptionValues(isolate, signature, args[1], &thrower, values);
+ if (thrower.error()) return;
+ args.GetReturnValue().Set(
+ Utils::ToLocal(i::Handle<i::Object>::cast(runtime_exception)));
}
// WebAssembly.Function
@@ -1621,6 +1771,8 @@ constexpr const char* kName_WasmGlobalObject = "WebAssembly.Global";
constexpr const char* kName_WasmMemoryObject = "WebAssembly.Memory";
constexpr const char* kName_WasmInstanceObject = "WebAssembly.Instance";
constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
+constexpr const char* kName_WasmTagObject = "WebAssembly.Tag";
+constexpr const char* kName_WasmExceptionPackage = "WebAssembly.Exception";
#define EXTRACT_THIS(var, WasmType) \
i::Handle<i::WasmType> var; \
@@ -1852,6 +2004,196 @@ void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(type));
}
+// WebAssembly.Tag.type() -> FunctionType
+void WebAssemblyTagType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Tag.type()");
+
+ EXTRACT_THIS(tag, WasmTagObject);
+ if (thrower.error()) return;
+
+ int n = tag->serialized_signature().length();
+ std::vector<i::wasm::ValueType> data(n);
+ if (n > 0) {
+ tag->serialized_signature().copy_out(0, data.data(), n);
+ }
+ const i::wasm::FunctionSig sig{0, data.size(), data.data()};
+ constexpr bool kForException = true;
+ auto type = i::wasm::GetTypeForFunction(i_isolate, &sig, kForException);
+ args.GetReturnValue().Set(Utils::ToLocal(type));
+}
+
+void WebAssemblyExceptionGetArg(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Exception.getArg()");
+
+ EXTRACT_THIS(exception, WasmExceptionPackage);
+ if (thrower.error()) return;
+
+ i::MaybeHandle<i::WasmTagObject> maybe_tag =
+ GetFirstArgumentAsTag(args, &thrower);
+ if (thrower.error()) return;
+ auto tag = maybe_tag.ToHandleChecked();
+ Local<Context> context = isolate->GetCurrentContext();
+ uint32_t index;
+ if (!EnforceUint32("Index", args[1], context, &thrower, &index)) {
+ return;
+ }
+ auto maybe_values =
+ i::WasmExceptionPackage::GetExceptionValues(i_isolate, exception);
+
+ auto this_tag =
+ i::WasmExceptionPackage::GetExceptionTag(i_isolate, exception);
+ if (this_tag->IsUndefined()) {
+ thrower.TypeError("Expected a WebAssembly.Exception object");
+ return;
+ }
+ DCHECK(this_tag->IsWasmExceptionTag());
+ if (tag->tag() != *this_tag) {
+ thrower.TypeError("First argument does not match the exception tag");
+ return;
+ }
+
+ DCHECK(!maybe_values->IsUndefined());
+ auto values = i::Handle<i::FixedArray>::cast(maybe_values);
+ auto signature = tag->serialized_signature();
+ if (index >= static_cast<uint32_t>(signature.length())) {
+ thrower.RangeError("Index out of range");
+ return;
+ }
+ // First, find the index in the values array.
+ uint32_t decode_index = 0;
+ // Since the bounds check above passed, the cast to int is safe.
+ for (int i = 0; i < static_cast<int>(index); ++i) {
+ switch (signature.get(i).kind()) {
+ case i::wasm::kI32:
+ case i::wasm::kF32:
+ decode_index += 2;
+ break;
+ case i::wasm::kI64:
+ case i::wasm::kF64:
+ decode_index += 4;
+ break;
+ case i::wasm::kRef:
+ case i::wasm::kOptRef:
+ switch (signature.get(i).heap_representation()) {
+ case i::wasm::HeapType::kExtern:
+ case i::wasm::HeapType::kFunc:
+ case i::wasm::HeapType::kAny:
+ case i::wasm::HeapType::kEq:
+ case i::wasm::HeapType::kI31:
+ case i::wasm::HeapType::kData:
+ decode_index++;
+ break;
+ case i::wasm::HeapType::kBottom:
+ UNREACHABLE();
+ default:
+ // TODO(7748): Add support for custom struct/array types.
+ UNIMPLEMENTED();
+ }
+ break;
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kVoid:
+ case i::wasm::kBottom:
+ case i::wasm::kS128:
+ UNREACHABLE();
+ }
+ }
+ // Decode the value at {decode_index}.
+ Local<Value> result;
+ switch (signature.get(index).kind()) {
+ case i::wasm::kI32: {
+ uint32_t u32_bits = 0;
+ i::DecodeI32ExceptionValue(values, &decode_index, &u32_bits);
+ int32_t i32 = static_cast<int32_t>(u32_bits);
+ result = v8::Integer::New(isolate, i32);
+ break;
+ }
+ case i::wasm::kI64: {
+ uint64_t u64_bits = 0;
+ i::DecodeI64ExceptionValue(values, &decode_index, &u64_bits);
+ int64_t i64 = static_cast<int64_t>(u64_bits);
+ result = v8::BigInt::New(isolate, i64);
+ break;
+ }
+ case i::wasm::kF32: {
+ uint32_t f32_bits = 0;
+ DecodeI32ExceptionValue(values, &decode_index, &f32_bits);
+ float f32 = bit_cast<float>(f32_bits);
+ result = v8::Number::New(isolate, f32);
+ break;
+ }
+ case i::wasm::kF64: {
+ uint64_t f64_bits = 0;
+ DecodeI64ExceptionValue(values, &decode_index, &f64_bits);
+ double f64 = bit_cast<double>(f64_bits);
+ result = v8::Number::New(isolate, f64);
+ break;
+ }
+ case i::wasm::kRef:
+ case i::wasm::kOptRef:
+ switch (signature.get(index).heap_representation()) {
+ case i::wasm::HeapType::kExtern:
+ case i::wasm::HeapType::kFunc:
+ case i::wasm::HeapType::kAny:
+ case i::wasm::HeapType::kEq:
+ case i::wasm::HeapType::kI31:
+ case i::wasm::HeapType::kData: {
+ auto obj = values->get(decode_index);
+ result = Utils::ToLocal(i::Handle<i::Object>(obj, i_isolate));
+ break;
+ }
+ case i::wasm::HeapType::kBottom:
+ UNREACHABLE();
+ default:
+ // TODO(7748): Add support for custom struct/array types.
+ UNIMPLEMENTED();
+ }
+ break;
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kVoid:
+ case i::wasm::kBottom:
+ case i::wasm::kS128:
+ UNREACHABLE();
+ }
+ args.GetReturnValue().Set(result);
+}
+
+void WebAssemblyExceptionIs(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Exception.is()");
+
+ EXTRACT_THIS(exception, WasmExceptionPackage);
+ if (thrower.error()) return;
+
+ auto tag = i::WasmExceptionPackage::GetExceptionTag(i_isolate, exception);
+ if (tag->IsUndefined()) {
+ thrower.TypeError("Expected a WebAssembly.Exception object");
+ return;
+ }
+ DCHECK(tag->IsWasmExceptionTag());
+
+ auto maybe_tag = GetFirstArgumentAsTag(args, &thrower);
+ if (thrower.error()) {
+ return;
+ }
+ auto tag_arg = maybe_tag.ToHandleChecked();
+ args.GetReturnValue().Set(tag_arg->tag() == *tag);
+}
+
void WebAssemblyGlobalGetValueCommon(
const v8::FunctionCallbackInfo<v8::Value>& args, const char* name) {
v8::Isolate* isolate = args.GetIsolate();
@@ -2287,15 +2629,40 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup Exception
if (enabled_features.has_eh()) {
+ Handle<JSFunction> tag_constructor =
+ InstallConstructorFunc(isolate, webassembly, "Tag", WebAssemblyTag);
+ context->set_wasm_tag_constructor(*tag_constructor);
+
+ SetDummyInstanceTemplate(isolate, tag_constructor);
+ JSFunction::EnsureHasInitialMap(tag_constructor);
+ Handle<JSObject> tag_proto(
+ JSObject::cast(tag_constructor->instance_prototype()), isolate);
+ JSObject::AddProperty(isolate, tag_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Tag"), ro_attributes);
+ if (enabled_features.has_type_reflection()) {
+ InstallFunc(isolate, tag_proto, "type", WebAssemblyTagType, 0);
+ }
+ Handle<Map> tag_map = isolate->factory()->NewMap(
+ i::WASM_TAG_OBJECT_TYPE, WasmTagObject::kHeaderSize);
+ JSFunction::SetInitialMap(isolate, tag_constructor, tag_map, tag_proto);
+
+ // Set up runtime exception constructor.
Handle<JSFunction> exception_constructor = InstallConstructorFunc(
isolate, webassembly, "Exception", WebAssemblyException);
- context->set_wasm_exception_constructor(*exception_constructor);
SetDummyInstanceTemplate(isolate, exception_constructor);
- JSFunction::EnsureHasInitialMap(exception_constructor);
+ Handle<Map> exception_map(isolate->native_context()
+ ->wasm_exception_error_function()
+ .initial_map(),
+ isolate);
Handle<JSObject> exception_proto(
- JSObject::cast(exception_constructor->instance_prototype()), isolate);
- Handle<Map> exception_map = isolate->factory()->NewMap(
- i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kHeaderSize);
+ JSObject::cast(isolate->native_context()
+ ->wasm_exception_error_function()
+ .instance_prototype()),
+ isolate);
+ InstallFunc(isolate, exception_proto, "getArg", WebAssemblyExceptionGetArg,
+ 2);
+ InstallFunc(isolate, exception_proto, "is", WebAssemblyExceptionIs, 1);
+ context->set_wasm_exception_constructor(*exception_constructor);
JSFunction::SetInitialMap(isolate, exception_constructor, exception_map,
exception_proto);
}
@@ -2349,7 +2716,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
void WasmJs::InstallConditionalFeatures(Isolate* isolate,
Handle<Context> context) {
// Exception handling may have been enabled by an origin trial. If so, make
- // sure that the {WebAssembly.Exception} constructor is set up.
+ // sure that the {WebAssembly.Tag} constructor is set up.
auto enabled_features = i::wasm::WasmFeatures::FromContext(isolate, context);
if (enabled_features.has_eh()) {
Handle<JSGlobalObject> global = handle(context->global_object(), isolate);
@@ -2358,45 +2725,47 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
Handle<Object> webassembly_obj;
if (!maybe_webassembly.ToHandle(&webassembly_obj)) {
// There is not {WebAssembly} object. We just return without adding the
- // {Exception} constructor.
+ // {Tag} constructor.
return;
}
if (!webassembly_obj->IsJSObject()) {
- // The {WebAssembly} object is invalid. As we cannot add the {Exception}
+ // The {WebAssembly} object is invalid. As we cannot add the {Tag}
// constructor, we just return.
return;
}
Handle<JSObject> webassembly = Handle<JSObject>::cast(webassembly_obj);
// Setup Exception
- Handle<String> exception_name = v8_str(isolate, "Exception");
- if (JSObject::HasOwnProperty(webassembly, exception_name).FromMaybe(true)) {
+ Handle<String> tag_name = v8_str(isolate, "Tag");
+ if (JSObject::HasOwnProperty(webassembly, tag_name).FromMaybe(true)) {
// The {Exception} constructor already exists, there is nothing more to
// do.
return;
}
bool has_prototype = true;
- Handle<JSFunction> exception_constructor =
- CreateFunc(isolate, exception_name, WebAssemblyException, has_prototype,
+ Handle<JSFunction> tag_constructor =
+ CreateFunc(isolate, tag_name, WebAssemblyTag, has_prototype,
SideEffectType::kHasNoSideEffect);
- exception_constructor->shared().set_length(1);
- auto result = Object::SetProperty(
- isolate, webassembly, exception_name, exception_constructor,
- StoreOrigin::kNamed, Just(ShouldThrow::kDontThrow));
+ tag_constructor->shared().set_length(1);
+ auto result =
+ Object::SetProperty(isolate, webassembly, tag_name, tag_constructor,
+ StoreOrigin::kNamed, Just(ShouldThrow::kDontThrow));
if (result.is_null()) {
- // Setting the {Exception} constructor failed. We just bail out.
+ // Setting the {Tag} constructor failed. We just bail out.
return;
}
// Install the constructor on the context.
- context->set_wasm_exception_constructor(*exception_constructor);
- SetDummyInstanceTemplate(isolate, exception_constructor);
- JSFunction::EnsureHasInitialMap(exception_constructor);
- Handle<JSObject> exception_proto(
- JSObject::cast(exception_constructor->instance_prototype()), isolate);
- Handle<Map> exception_map = isolate->factory()->NewMap(
- i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kHeaderSize);
- JSFunction::SetInitialMap(isolate, exception_constructor, exception_map,
- exception_proto);
+ context->set_wasm_tag_constructor(*tag_constructor);
+ SetDummyInstanceTemplate(isolate, tag_constructor);
+ JSFunction::EnsureHasInitialMap(tag_constructor);
+ Handle<JSObject> tag_proto(
+ JSObject::cast(tag_constructor->instance_prototype()), isolate);
+ if (enabled_features.has_type_reflection()) {
+ InstallFunc(isolate, tag_proto, "type", WebAssemblyTagType, 0);
+ }
+ Handle<Map> tag_map = isolate->factory()->NewMap(
+ i::WASM_TAG_OBJECT_TYPE, WasmTagObject::kHeaderSize);
+ JSFunction::SetInitialMap(isolate, tag_constructor, tag_map, tag_proto);
}
}
#undef ASSIGN
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 9e2ddc7fcc..b7806af797 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -31,7 +31,7 @@ constexpr size_t kV8MaxWasmFunctions = 1000000;
constexpr size_t kV8MaxWasmImports = 100000;
constexpr size_t kV8MaxWasmExports = 100000;
constexpr size_t kV8MaxWasmGlobals = 1000000;
-constexpr size_t kV8MaxWasmExceptions = 1000000;
+constexpr size_t kV8MaxWasmTags = 1000000;
constexpr size_t kV8MaxWasmExceptionTypes = 1000000;
constexpr size_t kV8MaxWasmDataSegments = 100000;
// This indicates the maximum memory size our implementation supports.
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index e5209cdcde..2bf20ea3ec 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -687,7 +687,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// Emit event section.
if (exceptions_.size() > 0) {
- size_t start = EmitSection(kExceptionSectionCode, buffer);
+ size_t start = EmitSection(kTagSectionCode, buffer);
buffer->write_size(exceptions_.size());
for (int type : exceptions_) {
buffer->write_u32v(kExceptionAttribute);
@@ -730,7 +730,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// yet, so there is no index offset to add.
buffer->write_size(ex.index);
break;
- case kExternalException:
+ case kExternalTag:
UNREACHABLE();
}
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index d36db5f009..db2091cdba 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -292,9 +292,22 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
DCHECK(types_[index].kind == Type::kFunctionSig);
return types_[index].sig;
}
+ bool IsStructType(uint32_t index) {
+ return types_[index].kind == Type::kStructType;
+ }
+ StructType* GetStructType(uint32_t index) {
+ return types_[index].struct_type;
+ }
+
+ bool IsArrayType(uint32_t index) {
+ return types_[index].kind == Type::kArrayType;
+ }
+ ArrayType* GetArrayType(uint32_t index) { return types_[index].array_type; }
int NumExceptions() { return static_cast<int>(exceptions_.size()); }
+ int NumTypes() { return static_cast<int>(types_.size()); }
+
FunctionSig* GetExceptionType(int index) {
return types_[exceptions_[index]].sig;
}
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 65c78e0b95..97a31487ea 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -51,13 +51,23 @@ int MaxNumExportWrappers(const WasmModule* module) {
return static_cast<int>(module->signature_map.size()) * 2;
}
-// static
+int GetExportWrapperIndexInternal(const WasmModule* module,
+ int canonical_sig_index, bool is_import) {
+ if (is_import) canonical_sig_index += module->signature_map.size();
+ return canonical_sig_index;
+}
+
int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
bool is_import) {
- int result = module->signature_map.Find(*sig);
- CHECK_GE(result, 0);
- result += is_import ? module->signature_map.size() : 0;
- return result;
+ int canonical_sig_index = module->signature_map.Find(*sig);
+ CHECK_GE(canonical_sig_index, 0);
+ return GetExportWrapperIndexInternal(module, canonical_sig_index, is_import);
+}
+
+int GetExportWrapperIndex(const WasmModule* module, uint32_t sig_index,
+ bool is_import) {
+ uint32_t canonical_sig_index = module->canonicalized_type_ids[sig_index];
+ return GetExportWrapperIndexInternal(module, canonical_sig_index, is_import);
}
// static
@@ -227,7 +237,8 @@ Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
}
} // namespace
-Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig) {
+Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig,
+ bool for_exception) {
Factory* factory = isolate->factory();
// Extract values for the {ValueType[]} arrays.
@@ -238,23 +249,29 @@ Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig) {
Handle<String> type_value = ToValueTypeString(isolate, type);
param_values->set(param_index++, *type_value);
}
- int result_index = 0;
- int result_count = static_cast<int>(sig->return_count());
- Handle<FixedArray> result_values = factory->NewFixedArray(result_count);
- for (ValueType type : sig->returns()) {
- Handle<String> type_value = ToValueTypeString(isolate, type);
- result_values->set(result_index++, *type_value);
- }
// Create the resulting {FunctionType} object.
Handle<JSFunction> object_function = isolate->object_function();
Handle<JSObject> object = factory->NewJSObject(object_function);
Handle<JSArray> params = factory->NewJSArrayWithElements(param_values);
- Handle<JSArray> results = factory->NewJSArrayWithElements(result_values);
Handle<String> params_string = factory->InternalizeUtf8String("parameters");
Handle<String> results_string = factory->InternalizeUtf8String("results");
JSObject::AddProperty(isolate, object, params_string, params, NONE);
- JSObject::AddProperty(isolate, object, results_string, results, NONE);
+
+ // Now add the result types if needed.
+ if (for_exception) {
+ DCHECK_EQ(sig->returns().size(), 0);
+ } else {
+ int result_index = 0;
+ int result_count = static_cast<int>(sig->return_count());
+ Handle<FixedArray> result_values = factory->NewFixedArray(result_count);
+ for (ValueType type : sig->returns()) {
+ Handle<String> type_value = ToValueTypeString(isolate, type);
+ result_values->set(result_index++, *type_value);
+ }
+ Handle<JSArray> results = factory->NewJSArrayWithElements(result_values);
+ JSObject::AddProperty(isolate, object, results_string, results, NONE);
+ }
return object;
}
@@ -337,7 +354,7 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<String> table_string = factory->InternalizeUtf8String("table");
Handle<String> memory_string = factory->InternalizeUtf8String("memory");
Handle<String> global_string = factory->InternalizeUtf8String("global");
- Handle<String> exception_string = factory->InternalizeUtf8String("exception");
+ Handle<String> tag_string = factory->InternalizeUtf8String("tag");
// Create the result array.
const WasmModule* module = module_object->module();
@@ -396,8 +413,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
}
import_kind = global_string;
break;
- case kExternalException:
- import_kind = exception_string;
+ case kExternalTag:
+ import_kind = tag_string;
break;
}
DCHECK(!import_kind->is_null());
@@ -436,7 +453,7 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<String> table_string = factory->InternalizeUtf8String("table");
Handle<String> memory_string = factory->InternalizeUtf8String("memory");
Handle<String> global_string = factory->InternalizeUtf8String("global");
- Handle<String> exception_string = factory->InternalizeUtf8String("exception");
+ Handle<String> tag_string = factory->InternalizeUtf8String("tag");
// Create the result array.
const WasmModule* module = module_object->module();
@@ -493,8 +510,8 @@ Handle<JSArray> GetExports(Isolate* isolate,
}
export_kind = global_string;
break;
- case kExternalException:
- export_kind = exception_string;
+ case kExternalTag:
+ export_kind = tag_string;
break;
default:
UNREACHABLE();
@@ -601,7 +618,7 @@ size_t EstimateStoredSize(const WasmModule* module) {
VectorSize(module->canonicalized_type_ids) +
VectorSize(module->functions) + VectorSize(module->data_segments) +
VectorSize(module->tables) + VectorSize(module->import_table) +
- VectorSize(module->export_table) + VectorSize(module->exceptions) +
+ VectorSize(module->export_table) + VectorSize(module->tags) +
VectorSize(module->elem_segments);
}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index ed48532fca..d1f874a908 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -80,16 +80,16 @@ struct WasmGlobal {
bool exported; // true if exported.
};
-// Note: An exception signature only uses the params portion of a
-// function signature.
-using WasmExceptionSig = FunctionSig;
+// Note: An exception tag signature only uses the params portion of a function
+// signature.
+using WasmTagSig = FunctionSig;
-// Static representation of a wasm exception type.
-struct WasmException {
- explicit WasmException(const WasmExceptionSig* sig) : sig(sig) {}
+// Static representation of a wasm tag type.
+struct WasmTag {
+ explicit WasmTag(const WasmTagSig* sig) : sig(sig) {}
const FunctionSig* ToFunctionSig() const { return sig; }
- const WasmExceptionSig* sig; // type signature of the exception.
+ const WasmTagSig* sig; // type signature of the tag.
};
// Static representation of a wasm data segment.
@@ -342,7 +342,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmTable> tables;
std::vector<WasmImport> import_table;
std::vector<WasmExport> export_table;
- std::vector<WasmException> exceptions;
+ std::vector<WasmTag> tags;
std::vector<WasmElemSegment> elem_segments;
std::vector<WasmCompilationHint> compilation_hints;
BranchHintInfo branch_hints;
@@ -396,9 +396,12 @@ size_t EstimateStoredSize(const WasmModule* module);
V8_EXPORT_PRIVATE int MaxNumExportWrappers(const WasmModule* module);
// Returns the wrapper index for a function in {module} with signature {sig}
-// and origin defined by {is_import}.
+// or {sig_index} and origin defined by {is_import}.
+// Prefer to use the {sig_index} consuming version, as it is much faster.
int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
bool is_import);
+int GetExportWrapperIndex(const WasmModule* module, uint32_t sig_index,
+ bool is_import);
// Return the byte offset of the function identified by the given index.
// The offset will be relative to the start of the module bytes.
@@ -469,7 +472,8 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate,
Handle<Context> context);
-Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig);
+Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig,
+ bool for_exception = false);
Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
ValueType type);
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 95303ed253..a75d83df02 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -35,7 +35,7 @@ namespace internal {
#include "torque-generated/src/wasm/wasm-objects-tq-inl.inc"
-TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmTagObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmCapiFunctionData)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData)
@@ -193,6 +193,14 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
kStackLimitAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, real_stack_limit_address, Address,
kRealStackLimitAddressOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, new_allocation_limit_address, Address*,
+ kNewAllocationLimitAddressOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, new_allocation_top_address, Address*,
+ kNewAllocationTopAddressOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, old_allocation_limit_address, Address*,
+ kOldAllocationLimitAddressOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, old_allocation_top_address, Address*,
+ kOldAllocationTopAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, imported_function_targets, Address*,
kImportedFunctionTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, globals_start, byte*,
@@ -241,8 +249,7 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
kIndirectFunctionTableRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
- kExceptionsTableOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, tags_table, FixedArray, kTagsTableOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_external_functions, FixedArray,
kWasmExternalFunctionsOffset)
ACCESSORS(WasmInstanceObject, managed_object_maps, FixedArray,
@@ -290,7 +297,7 @@ ImportedFunctionEntry::ImportedFunctionEntry(
}
// WasmExceptionPackage
-OBJECT_CONSTRUCTORS_IMPL(WasmExceptionPackage, JSReceiver)
+OBJECT_CONSTRUCTORS_IMPL(WasmExceptionPackage, JSObject)
CAST_ACCESSOR(WasmExceptionPackage)
// WasmExportedFunction
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 7b94b60561..a6ff80f624 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -850,20 +850,22 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
}
#ifdef V8_TARGET_ARCH_32_BIT
- if (shared == SharedFlag::kNotShared) {
- // On 32-bit platforms we need a heuristic here to balance overall memory
- // and address space consumption. If a maximum memory size is defined, then
- // we reserve that maximum size up to 1GB. If no maximum memory size is
- // defined, we just allocate the initial size and grow with a realloc.
- constexpr int kGBPages = 1024 * 1024 * 1024 / wasm::kWasmPageSize;
- if (initial > kGBPages || !has_maximum) {
- // We allocate at least the initial size. If no maximum is specified we
- // also start with the initial size.
- heuristic_maximum = initial;
- } else {
- // We reserve the maximum size, but at most 1GB.
- heuristic_maximum = std::min(maximum, kGBPages);
- }
+ // On 32-bit platforms we need an heuristic here to balance overall memory
+ // and address space consumption.
+ constexpr int kGBPages = 1024 * 1024 * 1024 / wasm::kWasmPageSize;
+ if (initial > kGBPages) {
+ // We always allocate at least the initial size.
+ heuristic_maximum = initial;
+ } else if (has_maximum) {
+ // We try to reserve the maximum, but at most 1GB to avoid OOMs.
+ heuristic_maximum = std::min(maximum, kGBPages);
+ } else if (shared == SharedFlag::kShared) {
+ // If shared memory has no maximum, we use an implicit maximum of 1GB.
+ heuristic_maximum = kGBPages;
+ } else {
+ // If non-shared memory has no maximum, we only allocate the initial size
+ // and then grow with realloc.
+ heuristic_maximum = initial;
}
#endif
@@ -1295,6 +1297,14 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate->stack_guard()->address_of_jslimit());
instance->set_real_stack_limit_address(
isolate->stack_guard()->address_of_real_jslimit());
+ instance->set_new_allocation_limit_address(
+ isolate->heap()->NewSpaceAllocationLimitAddress());
+ instance->set_new_allocation_top_address(
+ isolate->heap()->NewSpaceAllocationTopAddress());
+ instance->set_old_allocation_limit_address(
+ isolate->heap()->OldSpaceAllocationLimitAddress());
+ instance->set_old_allocation_top_address(
+ isolate->heap()->OldSpaceAllocationTopAddress());
instance->set_globals_start(nullptr);
instance->set_indirect_function_table_size(0);
instance->set_indirect_function_table_refs(
@@ -1469,7 +1479,9 @@ WasmInstanceObject::GetOrCreateWasmExternalFunction(
const WasmModule* module = module_object->module();
const WasmFunction& function = module->functions[function_index];
int wrapper_index =
- GetExportWrapperIndex(module, function.sig, function.imported);
+ GetExportWrapperIndex(module, function.sig_index, function.imported);
+ DCHECK_EQ(wrapper_index,
+ GetExportWrapperIndex(module, function.sig, function.imported));
Handle<Object> entry =
FixedArray::get(module_object->export_wrappers(), wrapper_index, isolate);
@@ -1694,11 +1706,11 @@ Address WasmArray::ElementAddress(uint32_t index) {
}
// static
-Handle<WasmExceptionObject> WasmExceptionObject::New(
- Isolate* isolate, const wasm::FunctionSig* sig,
- Handle<HeapObject> exception_tag) {
- Handle<JSFunction> exception_cons(
- isolate->native_context()->wasm_exception_constructor(), isolate);
+Handle<WasmTagObject> WasmTagObject::New(Isolate* isolate,
+ const wasm::FunctionSig* sig,
+ Handle<HeapObject> tag) {
+ Handle<JSFunction> tag_cons(isolate->native_context()->wasm_tag_constructor(),
+ isolate);
// Serialize the signature.
DCHECK_EQ(0, sig->return_count());
@@ -1711,18 +1723,17 @@ Handle<WasmExceptionObject> WasmExceptionObject::New(
serialized_sig->set(index++, param);
}
- Handle<JSObject> exception_object =
- isolate->factory()->NewJSObject(exception_cons, AllocationType::kOld);
- Handle<WasmExceptionObject> exception =
- Handle<WasmExceptionObject>::cast(exception_object);
- exception->set_serialized_signature(*serialized_sig);
- exception->set_exception_tag(*exception_tag);
+ Handle<JSObject> tag_object =
+ isolate->factory()->NewJSObject(tag_cons, AllocationType::kOld);
+ Handle<WasmTagObject> tag_wrapper = Handle<WasmTagObject>::cast(tag_object);
+ tag_wrapper->set_serialized_signature(*serialized_sig);
+ tag_wrapper->set_tag(*tag);
- return exception;
+ return tag_wrapper;
}
// TODO(9495): Update this if function type variance is introduced.
-bool WasmExceptionObject::MatchesSignature(const wasm::FunctionSig* sig) {
+bool WasmTagObject::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
int sig_size = static_cast<int>(sig->parameter_count());
@@ -1760,14 +1771,20 @@ bool WasmCapiFunction::MatchesSignature(const wasm::FunctionSig* sig) const {
// static
Handle<WasmExceptionPackage> WasmExceptionPackage::New(
Isolate* isolate, Handle<WasmExceptionTag> exception_tag, int size) {
- Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
+ Handle<FixedArray> values = isolate->factory()->NewFixedArray(size);
+ return New(isolate, exception_tag, values);
+}
+
+Handle<WasmExceptionPackage> WasmExceptionPackage::New(
+ Isolate* isolate, Handle<WasmExceptionTag> exception_tag,
+ Handle<FixedArray> values) {
+ Handle<JSObject> exception = isolate->factory()->NewWasmExceptionError(
MessageTemplate::kWasmExceptionError);
CHECK(!Object::SetProperty(isolate, exception,
isolate->factory()->wasm_exception_tag_symbol(),
exception_tag, StoreOrigin::kMaybeKeyed,
Just(ShouldThrow::kThrowOnError))
.is_null());
- Handle<FixedArray> values = isolate->factory()->NewFixedArray(size);
CHECK(!Object::SetProperty(isolate, exception,
isolate->factory()->wasm_exception_values_symbol(),
values, StoreOrigin::kMaybeKeyed,
@@ -1802,6 +1819,35 @@ Handle<Object> WasmExceptionPackage::GetExceptionValues(
return ReadOnlyRoots(isolate).undefined_value_handle();
}
+void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint32_t value) {
+ encoded_values->set((*encoded_index)++, Smi::FromInt(value >> 16));
+ encoded_values->set((*encoded_index)++, Smi::FromInt(value & 0xffff));
+}
+
+void EncodeI64ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint64_t value) {
+ EncodeI32ExceptionValue(encoded_values, encoded_index,
+ static_cast<uint32_t>(value >> 32));
+ EncodeI32ExceptionValue(encoded_values, encoded_index,
+ static_cast<uint32_t>(value));
+}
+
+void DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint32_t* value) {
+ uint32_t msb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
+ uint32_t lsb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
+ *value = (msb << 16) | (lsb & 0xffff);
+}
+
+void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint64_t* value) {
+ uint32_t lsb = 0, msb = 0;
+ DecodeI32ExceptionValue(encoded_values, encoded_index, &msb);
+ DecodeI32ExceptionValue(encoded_values, encoded_index, &lsb);
+ *value = (static_cast<uint64_t>(msb) << 32) | static_cast<uint64_t>(lsb);
+}
+
#ifdef DEBUG
namespace {
@@ -1820,9 +1866,8 @@ size_t ComputeEncodedElementSize(wasm::ValueType type) {
#endif // DEBUG
// static
-uint32_t WasmExceptionPackage::GetEncodedSize(
- const wasm::WasmException* exception) {
- const wasm::WasmExceptionSig* sig = exception->sig;
+uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTag* tag) {
+ const wasm::WasmTagSig* sig = tag->sig;
uint32_t encoded_size = 0;
for (size_t i = 0; i < sig->parameter_count(); ++i) {
switch (sig->GetParam(i).kind()) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 1969a3a478..11d5c265ed 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -31,9 +31,9 @@ namespace wasm {
class InterpretedFrame;
class NativeModule;
class WasmCode;
-struct WasmException;
struct WasmGlobal;
struct WasmModule;
+struct WasmTag;
class WasmValue;
class WireBytesRef;
} // namespace wasm
@@ -351,7 +351,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_ACCESSORS(imported_function_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
- DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(tags_table, FixedArray)
DECL_OPTIONAL_ACCESSORS(wasm_external_functions, FixedArray)
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
@@ -360,6 +360,10 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
+ DECL_PRIMITIVE_ACCESSORS(new_allocation_limit_address, Address*)
+ DECL_PRIMITIVE_ACCESSORS(new_allocation_top_address, Address*)
+ DECL_PRIMITIVE_ACCESSORS(old_allocation_limit_address, Address*)
+ DECL_PRIMITIVE_ACCESSORS(old_allocation_top_address, Address*)
DECL_PRIMITIVE_ACCESSORS(imported_function_targets, Address*)
DECL_PRIMITIVE_ACCESSORS(globals_start, byte*)
DECL_PRIMITIVE_ACCESSORS(imported_mutable_globals, Address*)
@@ -385,23 +389,36 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
// Layout description.
#define WASM_INSTANCE_OBJECT_FIELDS(V) \
/* Often-accessed fields go first to minimize generated code size. */ \
+ /* Less than system pointer sized fields come first. */ \
+ V(kImportedFunctionRefsOffset, kTaggedSize) \
+ V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
+ V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
+ /* Optional padding to align system pointer size fields */ \
+ V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kMemoryStartOffset, kSystemPointerSize) \
V(kMemorySizeOffset, kSizetSize) \
V(kMemoryMaskOffset, kSizetSize) \
V(kStackLimitAddressOffset, kSystemPointerSize) \
- V(kImportedFunctionRefsOffset, kTaggedSize) \
V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
- V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \
- V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
- /* Optional padding to align system pointer size fields */ \
- V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kGlobalsStartOffset, kSystemPointerSize) \
V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
V(kIsolateRootOffset, kSystemPointerSize) \
V(kJumpTableStartOffset, kSystemPointerSize) \
/* End of often-accessed fields. */ \
+ /* Continue with system pointer size fields to maintain alignment. */ \
+ V(kNewAllocationLimitAddressOffset, kSystemPointerSize) \
+ V(kNewAllocationTopAddressOffset, kSystemPointerSize) \
+ V(kOldAllocationLimitAddressOffset, kSystemPointerSize) \
+ V(kOldAllocationTopAddressOffset, kSystemPointerSize) \
+ V(kRealStackLimitAddressOffset, kSystemPointerSize) \
+ V(kDataSegmentStartsOffset, kSystemPointerSize) \
+ V(kDataSegmentSizesOffset, kSystemPointerSize) \
+ V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
+ V(kHookOnFunctionCallAddressOffset, kSystemPointerSize) \
+ V(kNumLiftoffFunctionCallsArrayOffset, kSystemPointerSize) \
+ /* Less than system pointer size aligned fields are below. */ \
V(kModuleObjectOffset, kTaggedSize) \
V(kExportsObjectOffset, kTaggedSize) \
V(kNativeContextOffset, kTaggedSize) \
@@ -412,15 +429,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kTablesOffset, kTaggedSize) \
V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
- V(kExceptionsTableOffset, kTaggedSize) \
+ V(kTagsTableOffset, kTaggedSize) \
V(kWasmExternalFunctionsOffset, kTaggedSize) \
V(kManagedObjectMapsOffset, kTaggedSize) \
- V(kRealStackLimitAddressOffset, kSystemPointerSize) \
- V(kDataSegmentStartsOffset, kSystemPointerSize) \
- V(kDataSegmentSizesOffset, kSystemPointerSize) \
- V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
- V(kHookOnFunctionCallAddressOffset, kSystemPointerSize) \
- V(kNumLiftoffFunctionCallsArrayOffset, kSystemPointerSize) \
V(kBreakOnEntryOffset, kUInt8Size) \
/* More padding to make the header pointer-size aligned */ \
V(kHeaderPaddingOffset, POINTER_SIZE_PADDING(kHeaderPaddingOffset)) \
@@ -454,7 +465,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
kTablesOffset,
kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
- kExceptionsTableOffset,
+ kTagsTableOffset,
kWasmExternalFunctionsOffset,
kManagedObjectMapsOffset};
@@ -541,30 +552,34 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
};
// Representation of WebAssembly.Exception JavaScript-level object.
-class WasmExceptionObject
- : public TorqueGeneratedWasmExceptionObject<WasmExceptionObject, JSObject> {
+class WasmTagObject
+ : public TorqueGeneratedWasmTagObject<WasmTagObject, JSObject> {
public:
// Dispatched behavior.
- DECL_PRINTER(WasmExceptionObject)
+ DECL_PRINTER(WasmTagObject)
// Checks whether the given {sig} has the same parameter types as the
- // serialized signature stored within this exception object.
+ // serialized signature stored within this tag object.
bool MatchesSignature(const wasm::FunctionSig* sig);
- static Handle<WasmExceptionObject> New(Isolate* isolate,
- const wasm::FunctionSig* sig,
- Handle<HeapObject> exception_tag);
+ static Handle<WasmTagObject> New(Isolate* isolate,
+ const wasm::FunctionSig* sig,
+ Handle<HeapObject> tag);
- TQ_OBJECT_CONSTRUCTORS(WasmExceptionObject)
+ TQ_OBJECT_CONSTRUCTORS(WasmTagObject)
};
// A Wasm exception that has been thrown out of Wasm code.
-class V8_EXPORT_PRIVATE WasmExceptionPackage : public JSReceiver {
+class V8_EXPORT_PRIVATE WasmExceptionPackage : public JSObject {
public:
static Handle<WasmExceptionPackage> New(
Isolate* isolate, Handle<WasmExceptionTag> exception_tag,
int encoded_size);
+ static Handle<WasmExceptionPackage> New(
+ Isolate* isolate, Handle<WasmExceptionTag> exception_tag,
+ Handle<FixedArray> values);
+
// The below getters return {undefined} in case the given exception package
// does not carry the requested values (i.e. is of a different type).
static Handle<Object> GetExceptionTag(
@@ -573,12 +588,26 @@ class V8_EXPORT_PRIVATE WasmExceptionPackage : public JSReceiver {
Isolate* isolate, Handle<WasmExceptionPackage> exception_package);
// Determines the size of the array holding all encoded exception values.
- static uint32_t GetEncodedSize(const wasm::WasmException* exception);
+ static uint32_t GetEncodedSize(const wasm::WasmTag* tag);
DECL_CAST(WasmExceptionPackage)
- OBJECT_CONSTRUCTORS(WasmExceptionPackage, JSReceiver);
+ OBJECT_CONSTRUCTORS(WasmExceptionPackage, JSObject);
};
+void V8_EXPORT_PRIVATE EncodeI32ExceptionValue(
+ Handle<FixedArray> encoded_values, uint32_t* encoded_index, uint32_t value);
+
+void V8_EXPORT_PRIVATE EncodeI64ExceptionValue(
+ Handle<FixedArray> encoded_values, uint32_t* encoded_index, uint64_t value);
+
+void V8_EXPORT_PRIVATE
+DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint32_t* value);
+
+void V8_EXPORT_PRIVATE
+DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint64_t* value);
+
// A Wasm function that is wrapped and exported to JavaScript.
// Representation of WebAssembly.Function JavaScript-level object.
class WasmExportedFunction : public JSFunction {
@@ -805,8 +834,8 @@ class WasmScript : public AllStatic {
// Tags provide an object identity for each exception defined in a wasm module
// header. They are referenced by the following fields:
-// - {WasmExceptionObject::exception_tag} : The tag of the exception object.
-// - {WasmInstanceObject::exceptions_table}: List of tags used by an instance.
+// - {WasmTagObject::tag}: The tag of the {Tag} object.
+// - {WasmInstanceObject::tags_table}: List of tags used by an instance.
class WasmExceptionTag
: public TorqueGeneratedWasmExceptionTag<WasmExceptionTag, Struct> {
public:
@@ -932,9 +961,11 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
namespace wasm {
Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
- int struct_index, MaybeHandle<Map> rtt_parent);
+ int struct_index, MaybeHandle<Map> rtt_parent,
+ Handle<WasmInstanceObject> instance);
Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
- int array_index, MaybeHandle<Map> rtt_parent);
+ int array_index, MaybeHandle<Map> rtt_parent,
+ Handle<WasmInstanceObject> instance);
Handle<Map> AllocateSubRtt(Isolate* isolate,
Handle<WasmInstanceObject> instance, uint32_t type,
Handle<Map> parent, WasmRttSubMode mode);
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index cc66d1ebc0..dfdee08eed 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -11,7 +11,6 @@ type ManagedWasmNativeModule extends Foreign
extern class WasmInstanceObject extends JSObject;
-@generateCppClass
extern class WasmFunctionData extends Foreign {
// This is the "reference" value that must be passed along in the "instance"
// register when calling the given function. It is either the target instance,
@@ -26,7 +25,6 @@ extern class WasmFunctionData extends Foreign {
@ifnot(V8_EXTERNAL_CODE_SPACE) wrapper_code: Code;
}
-@generateCppClass
extern class WasmExportedFunctionData extends WasmFunctionData {
// This is the instance that exported the function (which in case of
// imported and re-exported functions is different from the instance
@@ -42,7 +40,6 @@ extern class WasmExportedFunctionData extends WasmFunctionData {
packed_args_size: Smi;
}
-@generateCppClass
extern class WasmJSFunctionData extends WasmFunctionData {
@if(V8_EXTERNAL_CODE_SPACE) wasm_to_js_wrapper_code: CodeDataContainer;
@ifnot(V8_EXTERNAL_CODE_SPACE) wasm_to_js_wrapper_code: Code;
@@ -51,13 +48,11 @@ extern class WasmJSFunctionData extends WasmFunctionData {
serialized_signature: PodArrayOfWasmValueType;
}
-@generateCppClass
extern class WasmCapiFunctionData extends WasmFunctionData {
embedder_data: Foreign; // Managed<wasm::FuncData>
serialized_signature: PodArrayOfWasmValueType;
}
-@generateCppClass
extern class WasmIndirectFunctionTable extends Struct {
size: uint32;
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
@@ -68,7 +63,6 @@ extern class WasmIndirectFunctionTable extends Struct {
refs: FixedArray;
}
-@generateCppClass
extern class WasmExceptionTag extends Struct {
// Note that this index is only useful for debugging purposes and it is not
// unique across modules. The GC however does not allow objects without at
@@ -76,14 +70,12 @@ extern class WasmExceptionTag extends Struct {
index: Smi;
}
-@generateCppClass
extern class WasmModuleObject extends JSObject {
managed_native_module: ManagedWasmNativeModule;
export_wrappers: FixedArray;
script: Script;
}
-@generateCppClass
extern class WasmTableObject extends JSObject {
// The instance in which this WasmTableObject is defined.
// This field is undefined if the global is defined outside any Wasm module,
@@ -99,14 +91,12 @@ extern class WasmTableObject extends JSObject {
raw_type: Smi;
}
-@generateCppClass
extern class WasmMemoryObject extends JSObject {
array_buffer: JSArrayBuffer;
maximum_pages: Smi;
instances: WeakArrayList|Undefined;
}
-@generateCppClass
extern class WasmGlobalObject extends JSObject {
// The instance in which this WasmGlobalObject is defined.
// This field is undefined if the global is defined outside any Wasm module,
@@ -122,41 +112,44 @@ extern class WasmGlobalObject extends JSObject {
is_mutable: Smi;
}
-@generateCppClass
-extern class WasmExceptionObject extends JSObject {
+extern class WasmTagObject extends JSObject {
serialized_signature: PodArrayOfWasmValueType;
- exception_tag: HeapObject;
+ tag: HeapObject;
}
type WasmExportedFunction extends JSFunction;
-@generateCppClass
extern class AsmWasmData extends Struct {
managed_native_module: ManagedWasmNativeModule;
export_wrappers: FixedArray;
uses_bitset: HeapNumber;
}
-@generateCppClass
extern class WasmTypeInfo extends Foreign {
supertypes: FixedArray;
subtypes: ArrayList;
// In bytes, used for struct allocation.
instance_size: Smi;
+ // We must make sure that the StructType/ArrayType, which is allocated in
+ // the WasmModule's "signature_zone", stays around as long as there are
+ // HeapObjects referring to it. Short term, we simply keep a reference to
+ // the instance, which in turn keeps the entire WasmModule alive.
+ // TODO(jkummerow): Possible optimization: manage the "signature_zone"'s
+ // lifetime separately by having WasmModule refer to it via std::shared_ptr,
+ // and introduce a new link from here to just that zone using a Managed<...>.
+ // Details: https://bit.ly/2UxD4hW
+ instance: WasmInstanceObject;
}
// WasmObject corresponds to data ref types which are WasmStruct and WasmArray.
@abstract
-@generateCppClass
extern class WasmObject extends JSReceiver {
}
-@generateCppClass
@highestInstanceTypeWithinParentClassRange
extern class WasmStruct extends WasmObject {
}
-@generateCppClass
@lowestInstanceTypeWithinParentClassRange
extern class WasmArray extends WasmObject {
length: uint32;
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 4c60d82c1b..d3165582c8 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -304,7 +304,7 @@ NativeModuleSerializer::NativeModuleSerializer(
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code == nullptr) return sizeof(bool);
DCHECK_EQ(WasmCode::kFunction, code->kind());
- if (FLAG_wasm_lazy_compilation && code->tier() != ExecutionTier::kTurbofan) {
+ if (code->tier() != ExecutionTier::kTurbofan) {
return sizeof(bool);
}
return kCodeHeaderSize + code->instructions().size() +
@@ -338,11 +338,8 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
// Only serialize TurboFan code, as Liftoff code can contain breakpoints or
// non-relocatable constants.
if (code->tier() != ExecutionTier::kTurbofan) {
- if (FLAG_wasm_lazy_compilation) {
- writer->Write(false);
- return true;
- }
- return false;
+ writer->Write(false);
+ return true;
}
writer->Write(true);
// Write the size of the entire code section, followed by the code header.
@@ -536,6 +533,10 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
bool Read(Reader* reader);
+ base::Vector<const int> missing_functions() {
+ return base::VectorOf(missing_functions_);
+ }
+
private:
friend class CopyAndRelocTask;
friend class PublishTask;
@@ -554,6 +555,7 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
size_t remaining_code_size_ = 0;
base::Vector<byte> current_code_space_;
NativeModule::JumpTablesRef current_jump_tables_;
+ std::vector<int> missing_functions_;
};
class CopyAndRelocTask : public JobTask {
@@ -648,6 +650,7 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
std::vector<DeserializationUnit> batch;
const byte* batch_start = reader->current_location();
+ CodeSpaceWriteScope code_space_write_scope(native_module_);
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
DeserializationUnit unit = ReadCode(i, reader);
if (!unit.code) continue;
@@ -687,9 +690,7 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
Reader* reader) {
bool has_code = reader->Read<bool>();
if (!has_code) {
- DCHECK(FLAG_wasm_lazy_compilation ||
- native_module_->enabled_features().has_compilation_hints());
- native_module_->UseLazyStub(fn_index);
+ missing_functions_.push_back(fn_index);
return {};
}
int constant_pool_offset = reader->Read<int>();
@@ -862,9 +863,14 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
NativeModuleDeserializer deserializer(shared_native_module.get());
Reader reader(data + WasmSerializer::kHeaderSize);
bool error = !deserializer.Read(&reader);
- shared_native_module->compilation_state()->InitializeAfterDeserialization();
+ if (error) {
+ wasm_engine->UpdateNativeModuleCache(error, &shared_native_module,
+ isolate);
+ return {};
+ }
+ shared_native_module->compilation_state()->InitializeAfterDeserialization(
+ deserializer.missing_functions());
wasm_engine->UpdateNativeModuleCache(error, &shared_native_module, isolate);
- if (error) return {};
}
Handle<FixedArray> export_wrappers;
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
index 39c65ab9dd..5e8ae15c0b 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -51,39 +51,139 @@ uint32_t WebSnapshotSerializerDeserializer::FunctionKindToFunctionFlags(
case FunctionKind::kAsyncFunction:
case FunctionKind::kAsyncArrowFunction:
case FunctionKind::kAsyncGeneratorFunction:
+ case FunctionKind::kBaseConstructor:
+ case FunctionKind::kDefaultBaseConstructor:
+ case FunctionKind::kConciseMethod:
+ case FunctionKind::kAsyncConciseMethod:
break;
default:
Throw("Web Snapshot: Unsupported function kind");
}
- auto thing = ArrowFunctionBitField::encode(IsArrowFunction(kind)) +
- AsyncFunctionBitField::encode(IsAsyncFunction(kind)) +
- GeneratorFunctionBitField::encode(IsGeneratorFunction(kind));
- return thing;
+ auto flags = AsyncFunctionBitField::encode(IsAsyncFunction(kind)) |
+ GeneratorFunctionBitField::encode(IsGeneratorFunction(kind)) |
+ ArrowFunctionBitField::encode(IsArrowFunction(kind)) |
+ MethodBitField::encode(IsConciseMethod(kind)) |
+ StaticBitField::encode(IsStatic(kind)) |
+ ClassConstructorBitField::encode(IsClassConstructor(kind)) |
+ DefaultConstructorBitField::encode(IsDefaultConstructor(kind)) |
+ DerivedConstructorBitField::encode(IsDerivedConstructor(kind));
+ return flags;
}
+// TODO(v8:11525): Optionally, use an enum instead.
FunctionKind WebSnapshotSerializerDeserializer::FunctionFlagsToFunctionKind(
uint32_t flags) {
- static const FunctionKind kFunctionKinds[] = {
- // is_generator = false, is_async = false
- FunctionKind::kNormalFunction, // is_arrow = false
- FunctionKind::kArrowFunction, // is_arrow = true
- // is_generator = false, is_async = true
- FunctionKind::kAsyncFunction, // is_arrow = false
- FunctionKind::kAsyncArrowFunction, // is_arrow = true
- // is_generator = true, is_async = false
- FunctionKind::kGeneratorFunction, // is_arrow = false
- FunctionKind::kInvalid, // is_arrow = true
- // is_generator = true, is_async = true
- FunctionKind::kAsyncGeneratorFunction, // is_arrow = false
- FunctionKind::kInvalid}; // is_arrow = true
-
- FunctionKind kind = kFunctionKinds[flags];
+ FunctionKind kind;
+ if (IsFunctionOrMethod(flags)) {
+ if (ArrowFunctionBitField::decode(flags) && MethodBitField::decode(flags)) {
+ kind = FunctionKind::kInvalid;
+ } else {
+ uint32_t index = AsyncFunctionBitField::decode(flags) << 0 |
+ GeneratorFunctionBitField::decode(flags) << 1 |
+ (ArrowFunctionBitField::decode(flags) ||
+ StaticBitField::decode(flags))
+ << 2 |
+ MethodBitField::decode(flags) << 3;
+ static const FunctionKind kFunctionKinds[] = {
+ // kNormalFunction
+ // is_generator = false
+ FunctionKind::kNormalFunction, // is_async = false
+ FunctionKind::kAsyncFunction, // is_async = true
+ // is_generator = true
+ FunctionKind::kGeneratorFunction, // is_async = false
+ FunctionKind::kAsyncGeneratorFunction, // is_async = true
+
+ // kArrowFunction
+ // is_generator = false
+ FunctionKind::kArrowFunction, // is_async = false
+ FunctionKind::kAsyncArrowFunction, // is_async = true
+ // is_generator = true
+ FunctionKind::kInvalid, // is_async = false
+ FunctionKind::kInvalid, // is_async = true
+
+ // kNonStaticMethod
+ // is_generator = false
+ FunctionKind::kConciseMethod, // is_async = false
+ FunctionKind::kAsyncConciseMethod, // is_async = true
+ // is_generator = true
+ // TODO(v8::11525) Support FunctionKind::kConciseGeneratorMethod.
+ FunctionKind::kInvalid, // is_async = false
+ // TODO(v8::11525) Support FunctionKind::kAsyncConciseGeneratorMethod.
+ FunctionKind::kInvalid, // is_async = true
+
+ // kStaticMethod
+ // is_generator = false
+ // TODO(v8::11525) Support FunctionKind::kStaticConciseMethod.
+ FunctionKind::kInvalid, // is_async = false
+ // TODO(v8::11525) Support FunctionKind::kStaticAsyncConciseMethod.
+ FunctionKind::kInvalid, // is_async = true
+ // is_generator = true
+ // TODO(v8::11525) Support
+ // FunctionKind::kStaticConciseGeneratorMethod.
+ FunctionKind::kInvalid, // is_async = false
+ // TODO(v8::11525) Support
+ // FunctionKind::kStaticAsyncConciseGeneratorMethod.
+ FunctionKind::kInvalid // is_async = true
+ };
+ kind = kFunctionKinds[index];
+ }
+ } else if (IsConstructor(flags)) {
+ static const FunctionKind kFunctionKinds[] = {
+ // is_derived = false
+ FunctionKind::kBaseConstructor, // is_default = false
+ FunctionKind::kDefaultBaseConstructor, // is_default = true
+ // is_derived = true
+ FunctionKind::kDerivedConstructor, // is_default = false
+ FunctionKind::kDefaultDerivedConstructor // is_default = true
+ };
+ kind = kFunctionKinds[flags >> DefaultConstructorBitField::kShift];
+ } else {
+ kind = FunctionKind::kInvalid;
+ }
if (kind == FunctionKind::kInvalid) {
Throw("Web Snapshots: Invalid function flags\n");
}
return kind;
}
+bool WebSnapshotSerializerDeserializer::IsFunctionOrMethod(uint32_t flags) {
+ uint32_t mask = AsyncFunctionBitField::kMask |
+ GeneratorFunctionBitField::kMask |
+ ArrowFunctionBitField::kMask | MethodBitField::kMask |
+ StaticBitField::kMask;
+ return (flags & mask) == flags;
+}
+
+bool WebSnapshotSerializerDeserializer::IsConstructor(uint32_t flags) {
+ uint32_t mask = ClassConstructorBitField::kMask |
+ DefaultConstructorBitField::kMask |
+ DerivedConstructorBitField::kMask;
+ return ClassConstructorBitField::decode(flags) && (flags & mask) == flags;
+}
+
+uint32_t WebSnapshotSerializerDeserializer::GetDefaultAttributeFlags() {
+ auto flags = ReadOnlyBitField::encode(false) |
+ ConfigurableBitField::encode(true) |
+ EnumerableBitField::encode(true);
+ return flags;
+}
+
+uint32_t WebSnapshotSerializerDeserializer::AttributesToFlags(
+ PropertyDetails details) {
+ auto flags = ReadOnlyBitField::encode(details.IsReadOnly()) |
+ ConfigurableBitField::encode(details.IsConfigurable()) |
+ EnumerableBitField::encode(details.IsEnumerable());
+ return flags;
+}
+
+PropertyAttributes WebSnapshotSerializerDeserializer::FlagsToAttributes(
+ uint32_t flags) {
+ uint32_t attributes = ReadOnlyBitField::decode(flags) * READ_ONLY +
+ !ConfigurableBitField::decode(flags) * DONT_DELETE +
+ !EnumerableBitField::decode(flags) * DONT_ENUM;
+ return static_cast<PropertyAttributes>(attributes);
+}
+
WebSnapshotSerializer::WebSnapshotSerializer(v8::Isolate* isolate)
: WebSnapshotSerializerDeserializer(
reinterpret_cast<v8::internal::Isolate*>(isolate)),
@@ -91,6 +191,7 @@ WebSnapshotSerializer::WebSnapshotSerializer(v8::Isolate* isolate)
map_serializer_(isolate_, nullptr),
context_serializer_(isolate_, nullptr),
function_serializer_(isolate_, nullptr),
+ class_serializer_(isolate_, nullptr),
array_serializer_(isolate_, nullptr),
object_serializer_(isolate_, nullptr),
export_serializer_(isolate_, nullptr),
@@ -98,6 +199,7 @@ WebSnapshotSerializer::WebSnapshotSerializer(v8::Isolate* isolate)
map_ids_(isolate_->heap()),
context_ids_(isolate_->heap()),
function_ids_(isolate_->heap()),
+ class_ids_(isolate_->heap()),
array_ids_(isolate_->heap()),
object_ids_(isolate_->heap()) {}
@@ -178,8 +280,9 @@ void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
size_t needed_size =
string_serializer_.buffer_size_ + map_serializer_.buffer_size_ +
context_serializer_.buffer_size_ + function_serializer_.buffer_size_ +
- array_serializer_.buffer_size_ + object_serializer_.buffer_size_ +
- export_serializer_.buffer_size_ + 6 * sizeof(uint32_t);
+ class_serializer_.buffer_size_ + array_serializer_.buffer_size_ +
+ object_serializer_.buffer_size_ + export_serializer_.buffer_size_ +
+ 8 * sizeof(uint32_t);
if (total_serializer.ExpandBuffer(needed_size).IsNothing()) {
Throw("Web snapshot: Out of memory");
return;
@@ -203,6 +306,9 @@ void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
total_serializer.WriteUint32(static_cast<uint32_t>(object_count()));
total_serializer.WriteRawBytes(object_serializer_.buffer_,
object_serializer_.buffer_size_);
+ total_serializer.WriteUint32(static_cast<uint32_t>(class_count()));
+ total_serializer.WriteRawBytes(class_serializer_.buffer_,
+ class_serializer_.buffer_size_);
total_serializer.WriteUint32(export_count_);
total_serializer.WriteRawBytes(export_serializer_.buffer_,
export_serializer_.buffer_size_);
@@ -266,7 +372,11 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
return;
}
+ int first_custom_index = -1;
std::vector<uint32_t> string_ids;
+ std::vector<uint32_t> attributes;
+ string_ids.reserve(map->NumberOfOwnDescriptors());
+ attributes.reserve(map->NumberOfOwnDescriptors());
for (InternalIndex i : map->IterateOwnDescriptors()) {
Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
isolate_);
@@ -277,41 +387,59 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
PropertyDetails details =
map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- if (details.IsDontEnum()) {
- Throw("Web snapshot: Non-enumerable properties not supported");
- return;
- }
if (details.location() != kField) {
Throw("Web snapshot: Properties which are not fields not supported");
return;
}
+ if (first_custom_index >= 0 || details.IsReadOnly() ||
+ !details.IsConfigurable() || details.IsDontEnum()) {
+ if (first_custom_index == -1) first_custom_index = i.as_int();
+ attributes.push_back(AttributesToFlags(details));
+ }
uint32_t string_id = 0;
SerializeString(Handle<String>::cast(key), string_id);
string_ids.push_back(string_id);
-
- // TODO(v8:11525): Support property attributes.
}
+
+ map_serializer_.WriteUint32(first_custom_index == -1
+ ? PropertyAttributesType::DEFAULT
+ : PropertyAttributesType::CUSTOM);
map_serializer_.WriteUint32(static_cast<uint32_t>(string_ids.size()));
- for (auto i : string_ids) {
- map_serializer_.WriteUint32(i);
+
+ uint32_t default_flags = GetDefaultAttributeFlags();
+ for (size_t i = 0; i < string_ids.size(); ++i) {
+ if (first_custom_index >= 0) {
+ if (static_cast<int>(i) < first_custom_index) {
+ map_serializer_.WriteUint32(default_flags);
+ } else {
+ map_serializer_.WriteUint32(attributes[i - first_custom_index]);
+ }
+ }
+ map_serializer_.WriteUint32(string_ids[i]);
}
}
-// Format (serialized function):
-// - 0 if there's no context, 1 + context id otherwise
-// - String id (source snippet)
-// - Start position in the source snippet
-// - Length in the source snippet
-// - Flags (see FunctionFlags)
-// TODO(v8:11525): Investigate whether the length is really needed.
-void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
- uint32_t& id) {
- if (InsertIntoIndexMap(function_ids_, function, id)) {
- return;
- }
+void WebSnapshotSerializer::SerializeSource(ValueSerializer* serializer,
+ Handle<JSFunction> function) {
+ // TODO(v8:11525): Don't write the full source but instead, a set of minimal
+ // snippets which cover the serialized functions.
+ Handle<String> full_source(
+ String::cast(Script::cast(function->shared().script()).source()),
+ isolate_);
+ uint32_t source_id = 0;
+ SerializeString(full_source, source_id);
+ serializer->WriteUint32(source_id);
+
+ int start = function->shared().StartPosition();
+ serializer->WriteUint32(start);
+ int end = function->shared().EndPosition();
+ serializer->WriteUint32(end - start);
+}
+void WebSnapshotSerializer::SerializeFunctionInfo(ValueSerializer* serializer,
+ Handle<JSFunction> function) {
if (!function->shared().HasSourceCode()) {
Throw("Web snapshot: Function without source code");
return;
@@ -319,36 +447,69 @@ void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
Handle<Context> context(function->context(), isolate_);
if (context->IsNativeContext() || context->IsScriptContext()) {
- function_serializer_.WriteUint32(0);
+ serializer->WriteUint32(0);
} else {
- DCHECK(context->IsFunctionContext());
+ DCHECK(context->IsFunctionContext() || context->IsBlockContext());
uint32_t context_id = 0;
SerializeContext(context, context_id);
- function_serializer_.WriteUint32(context_id + 1);
+ serializer->WriteUint32(context_id + 1);
}
- // TODO(v8:11525): Don't write the full source but instead, a set of minimal
- // snippets which cover the serialized functions.
- Handle<String> full_source(
- String::cast(Script::cast(function->shared().script()).source()),
- isolate_);
- uint32_t source_id = 0;
- SerializeString(full_source, source_id);
- function_serializer_.WriteUint32(source_id);
+ SerializeSource(serializer, function);
- int start = function->shared().StartPosition();
- function_serializer_.WriteUint32(start);
- int end = function->shared().EndPosition();
- function_serializer_.WriteUint32(end - start);
-
- function_serializer_.WriteUint32(
+ serializer->WriteUint32(
FunctionKindToFunctionFlags(function->shared().kind()));
+}
+
+// Format (serialized function):
+// - 0 if there's no context, 1 + context id otherwise
+// - String id (source snippet)
+// - Start position in the source snippet
+// - Length in the source snippet
+// - Flags (see FunctionFlags)
+// TODO(v8:11525): Investigate whether the length is really needed.
+// TODO(v8:11525): Serialize the formal parameter count.
+void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
+ uint32_t& id) {
+ if (InsertIntoIndexMap(function_ids_, function, id)) {
+ return;
+ }
+
+ SerializeFunctionInfo(&function_serializer_, function);
+
// TODO(v8:11525): Serialize .prototype.
// TODO(v8:11525): Support properties in functions.
// TODO(v8:11525): Support function referencing a function indirectly (e.g.,
// function -> context -> array -> function).
}
+// Format (serialized class):
+// - 1 + context id
+// - String id (source snippet)
+// - Start position in the source snippet
+// - Length in the source snippet
+// - Flags (see FunctionFlags)
+// - Object id (function prototype)
+void WebSnapshotSerializer::SerializeClass(Handle<JSFunction> function,
+ uint32_t& id) {
+ if (InsertIntoIndexMap(class_ids_, function, id)) {
+ return;
+ }
+
+ SerializeFunctionInfo(&class_serializer_, function);
+
+ Handle<JSObject> prototype =
+ Handle<JSObject>::cast(handle(function->prototype(), isolate_));
+ uint32_t prototype_id;
+ SerializeObject(prototype, prototype_id);
+ class_serializer_.WriteUint32(prototype_id);
+
+ // TODO(v8:11525): Support properties in classes.
+ // TODO(v8:11525): Support class referencing a class indirectly (e.g.,
+ // class -> context -> array -> class).
+ // TODO(v8:11525): Support class members.
+}
+
// Format (serialized context):
// - 0 if there's no parent context, 1 + parent context id otherwise
// - Variable count
@@ -376,6 +537,16 @@ void WebSnapshotSerializer::SerializeContext(Handle<Context> context,
InsertIntoIndexMap(context_ids_, context, id);
+ // TODO(v8:11525): Use less space for encoding the context type.
+ if (context->IsFunctionContext()) {
+ context_serializer_.WriteUint32(ContextType::FUNCTION);
+ } else if (context->IsBlockContext()) {
+ context_serializer_.WriteUint32(ContextType::BLOCK);
+ } else {
+ Throw("Web snapshot: Unsupported context type");
+ return;
+ }
+
context_serializer_.WriteUint32(parent_context_id);
Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
@@ -519,11 +690,19 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
serializer.WriteUint32(ValueType::DOUBLE);
serializer.WriteDouble(HeapNumber::cast(*object).value());
break;
- case JS_FUNCTION_TYPE:
- SerializeFunction(Handle<JSFunction>::cast(object), id);
- serializer.WriteUint32(ValueType::FUNCTION_ID);
+ case JS_FUNCTION_TYPE: {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object);
+ FunctionKind kind = function->shared().kind();
+ if (IsClassConstructor(kind)) {
+ SerializeClass(function, id);
+ serializer.WriteUint32(ValueType::CLASS_ID);
+ } else {
+ SerializeFunction(function, id);
+ serializer.WriteUint32(ValueType::FUNCTION_ID);
+ }
serializer.WriteUint32(id);
break;
+ }
case JS_OBJECT_TYPE:
SerializeObject(Handle<JSObject>::cast(object), id);
serializer.WriteUint32(ValueType::OBJECT_ID);
@@ -573,6 +752,7 @@ void WebSnapshotDeserializer::Throw(const char* message) {
string_count_ = 0;
map_count_ = 0;
context_count_ = 0;
+ class_count_ = 0;
function_count_ = 0;
object_count_ = 0;
// Make sure we don't read any more data
@@ -603,7 +783,13 @@ bool WebSnapshotDeserializer::UseWebSnapshot(const uint8_t* data,
DeserializeFunctions();
DeserializeArrays();
DeserializeObjects();
+ // It comes in handy to deserialize objects before classes. This
+ // way, we already have the function prototype for a class deserialized when
+ // processing the class and it's easier to adjust it as needed.
+ DeserializeClasses();
+ ProcessDeferredReferences();
DeserializeExports();
+ DCHECK_EQ(deferred_references_->Length(), 0);
if (deserializer_->position_ != deserializer_->end_) {
Throw("Web snapshot: Snapshot length mismatch");
return false;
@@ -665,6 +851,24 @@ void WebSnapshotDeserializer::DeserializeMaps() {
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
maps_ = isolate_->factory()->NewFixedArray(map_count_);
for (uint32_t i = 0; i < map_count_; ++i) {
+ uint32_t map_type;
+ if (!deserializer_->ReadUint32(&map_type)) {
+ Throw("Web snapshot: Malformed shape");
+ return;
+ }
+ bool has_custom_property_attributes;
+ switch (map_type) {
+ case PropertyAttributesType::DEFAULT:
+ has_custom_property_attributes = false;
+ break;
+ case PropertyAttributesType::CUSTOM:
+ has_custom_property_attributes = true;
+ break;
+ default:
+ Throw("Web snapshot: Unsupported map type");
+ return;
+ }
+
uint32_t property_count;
if (!deserializer_->ReadUint32(&property_count)) {
Throw("Web snapshot: Malformed shape");
@@ -688,13 +892,23 @@ void WebSnapshotDeserializer::DeserializeMaps() {
Handle<DescriptorArray> descriptors =
isolate_->factory()->NewDescriptorArray(0, property_count);
for (uint32_t p = 0; p < property_count; ++p) {
+ PropertyAttributes attributes = PropertyAttributes::NONE;
+ if (has_custom_property_attributes) {
+ uint32_t flags;
+ if (!deserializer_->ReadUint32(&flags)) {
+ Throw("Web snapshot: Malformed shape");
+ return;
+ }
+ attributes = FlagsToAttributes(flags);
+ }
+
Handle<String> key = ReadString(true);
// Use the "none" representation until we see the first object having this
// map. At that point, modify the representation.
- Descriptor desc = Descriptor::DataField(
- isolate_, key, static_cast<int>(p), PropertyAttributes::NONE,
- Representation::None());
+ Descriptor desc =
+ Descriptor::DataField(isolate_, key, static_cast<int>(p), attributes,
+ Representation::None());
descriptors->Append(&desc);
}
@@ -702,6 +916,7 @@ void WebSnapshotDeserializer::DeserializeMaps() {
JS_OBJECT_TYPE, JSObject::kHeaderSize * kTaggedSize, HOLEY_ELEMENTS, 0);
map->InitializeDescriptors(isolate_, *descriptors);
// TODO(v8:11525): Set 'constructor'.
+ // TODO(v8:11525): Set the correct prototype.
maps_->set(i, *map);
}
@@ -717,6 +932,12 @@ void WebSnapshotDeserializer::DeserializeContexts() {
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
contexts_ = isolate_->factory()->NewFixedArray(context_count_);
for (uint32_t i = 0; i < context_count_; ++i) {
+ uint32_t context_type;
+ if (!deserializer_->ReadUint32(&context_type)) {
+ Throw("Web snapshot: Malformed context type");
+ return;
+ }
+
uint32_t parent_context_id;
// Parent context is serialized before child context. Note: not >= on
// purpose, we're going to subtract 1 later.
@@ -733,7 +954,8 @@ void WebSnapshotDeserializer::DeserializeContexts() {
}
// TODO(v8:11525): Enforce upper limit for variable count.
Handle<ScopeInfo> scope_info =
- CreateScopeInfo(variable_count, parent_context_id > 0);
+ CreateScopeInfo(variable_count, parent_context_id > 0,
+ static_cast<ContextType>(context_type));
Handle<Context> parent_context;
if (parent_context_id > 0) {
@@ -767,8 +989,20 @@ void WebSnapshotDeserializer::DeserializeContexts() {
// Allocate the FunctionContext after setting up the ScopeInfo to avoid
// pointing to a ScopeInfo which is not set up yet.
- Handle<Context> context =
- isolate_->factory()->NewFunctionContext(parent_context, scope_info);
+ Handle<Context> context;
+ switch (context_type) {
+ case ContextType::FUNCTION:
+ context =
+ isolate_->factory()->NewFunctionContext(parent_context, scope_info);
+ break;
+ case ContextType::BLOCK:
+ context =
+ isolate_->factory()->NewBlockContext(parent_context, scope_info);
+ break;
+ default:
+ Throw("Web snapshot: Unsupported context type");
+ return;
+ }
for (int variable_index = 0;
variable_index < static_cast<int>(variable_count); ++variable_index) {
Handle<Object> value;
@@ -782,22 +1016,18 @@ void WebSnapshotDeserializer::DeserializeContexts() {
}
Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
- uint32_t variable_count, bool has_parent) {
+ uint32_t variable_count, bool has_parent, ContextType context_type) {
// TODO(v8:11525): Decide how to handle language modes. (The code below sets
// the language mode as strict.)
// TODO(v8:11525): Support (context-allocating) receiver.
// TODO(v8:11525): Support function variable & function name.
// TODO(v8:11525): Support classes.
- const int length = ScopeInfo::kVariablePartIndex +
- ScopeInfo::kPositionInfoEntries + (has_parent ? 1 : 0) +
- 2 * variable_count;
- Handle<ScopeInfo> scope_info = isolate_->factory()->NewScopeInfo(length);
+ ScopeType scope_type;
int flags =
- ScopeInfo::ScopeTypeBits::encode(ScopeType::FUNCTION_SCOPE) |
ScopeInfo::SloppyEvalCanExtendVarsBit::encode(false) |
ScopeInfo::LanguageModeBit::encode(LanguageMode::kStrict) |
- ScopeInfo::DeclarationScopeBit::encode(true) |
+ ScopeInfo::DeclarationScopeBit::encode(false) |
ScopeInfo::ReceiverVariableBits::encode(VariableAllocationInfo::NONE) |
ScopeInfo::HasClassBrandBit::encode(false) |
ScopeInfo::HasSavedClassVariableIndexBit::encode(false) |
@@ -805,7 +1035,7 @@ Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
ScopeInfo::FunctionVariableBits::encode(VariableAllocationInfo::NONE) |
ScopeInfo::HasInferredFunctionNameBit::encode(false) |
ScopeInfo::IsAsmModuleBit::encode(false) |
- ScopeInfo::HasSimpleParametersBit::encode(true) |
+ ScopeInfo::HasSimpleParametersBit::encode(false) |
ScopeInfo::FunctionKindBits::encode(FunctionKind::kNormalFunction) |
ScopeInfo::HasOuterScopeInfoBit::encode(has_parent) |
ScopeInfo::IsDebugEvaluateScopeBit::encode(false) |
@@ -814,16 +1044,84 @@ Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
ScopeInfo::HasContextExtensionSlotBit::encode(false) |
ScopeInfo::IsReplModeScopeBit::encode(false) |
ScopeInfo::HasLocalsBlockListBit::encode(false);
+ switch (context_type) {
+ case ContextType::FUNCTION:
+ scope_type = ScopeType::FUNCTION_SCOPE;
+ flags |= ScopeInfo::DeclarationScopeBit::encode(true) |
+ ScopeInfo::HasSimpleParametersBit::encode(true);
+ break;
+ case ContextType::BLOCK:
+ scope_type = ScopeType::CLASS_SCOPE;
+ flags |= ScopeInfo::ForceContextAllocationBit::encode(true);
+ break;
+ default:
+ // Default to a CLASS_SCOPE, so that the rest of the code can be executed
+ // without failures.
+ scope_type = ScopeType::CLASS_SCOPE;
+ Throw("Web snapshot: Unsupported context type");
+ }
+ flags |= ScopeInfo::ScopeTypeBits::encode(scope_type);
+ const int length = ScopeInfo::kVariablePartIndex +
+ (ScopeInfo::NeedsPositionInfo(scope_type)
+ ? ScopeInfo::kPositionInfoEntries
+ : 0) +
+ (has_parent ? 1 : 0) + 2 * variable_count;
+ Handle<ScopeInfo> scope_info = isolate_->factory()->NewScopeInfo(length);
+
scope_info->set_flags(flags);
DCHECK(!scope_info->IsEmpty());
scope_info->set_context_local_count(variable_count);
// TODO(v8:11525): Support parameters.
scope_info->set_parameter_count(0);
- scope_info->SetPositionInfo(0, 0);
+ if (scope_info->HasPositionInfo()) {
+ scope_info->SetPositionInfo(0, 0);
+ }
return scope_info;
}
+Handle<JSFunction> WebSnapshotDeserializer::CreateJSFunction(
+ int shared_function_info_index, uint32_t start_position, uint32_t length,
+ uint32_t flags, uint32_t context_id) {
+ // TODO(v8:11525): Deduplicate the SFIs for class methods.
+ FunctionKind kind = FunctionFlagsToFunctionKind(flags);
+ Handle<SharedFunctionInfo> shared =
+ isolate_->factory()->NewSharedFunctionInfo(
+ isolate_->factory()->empty_string(), MaybeHandle<Code>(),
+ Builtin::kCompileLazy, kind);
+ if (IsConciseMethod(kind)) {
+ shared->set_syntax_kind(FunctionSyntaxKind::kAccessorOrMethod);
+ }
+ shared->set_script(*script_);
+ shared->set_function_literal_id(shared_function_info_index);
+ // TODO(v8:11525): Decide how to handle language modes.
+ shared->set_language_mode(LanguageMode::kStrict);
+ shared->set_uncompiled_data(
+ *isolate_->factory()->NewUncompiledDataWithoutPreparseData(
+ ReadOnlyRoots(isolate_).empty_string_handle(), start_position,
+ start_position + length));
+ shared->set_allows_lazy_compilation(true);
+ shared_function_infos_->Set(shared_function_info_index,
+ HeapObjectReference::Weak(*shared));
+ shared_function_info_table_ = ObjectHashTable::Put(
+ shared_function_info_table_,
+ handle(Smi::FromInt(start_position), isolate_),
+ handle(Smi::FromInt(shared_function_info_index), isolate_));
+
+ Handle<JSFunction> function =
+ Factory::JSFunctionBuilder(isolate_, shared, isolate_->native_context())
+ .Build();
+ if (context_id > 0) {
+ DCHECK_LT(context_id - 1, context_count_);
+ // Guards raw pointer "context" below.
+ DisallowHeapAllocation no_heap_access;
+ Context context = Context::cast(contexts_->get(context_id - 1));
+ function->set_context(context);
+ shared->set_outer_scope_info(context.scope_info());
+ }
+ return function;
+}
+
void WebSnapshotDeserializer::DeserializeFunctions() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Functions);
if (!deserializer_->ReadUint32(&function_count_) ||
@@ -836,16 +1134,14 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
// Overallocate the array for SharedFunctionInfos; functions which we
// deserialize soon will create more SharedFunctionInfos when called.
- Handle<WeakFixedArray> infos(isolate_->factory()->NewWeakFixedArray(
+ shared_function_infos_ = isolate_->factory()->NewWeakFixedArray(
WeakArrayList::CapacityForLength(function_count_ + 1),
- AllocationType::kOld));
- Handle<ObjectHashTable> shared_function_info_table =
- ObjectHashTable::New(isolate_, function_count_);
- Handle<Script> script =
- isolate_->factory()->NewScript(isolate_->factory()->empty_string());
- script->set_type(Script::TYPE_WEB_SNAPSHOT);
- script->set_shared_function_infos(*infos);
- script->set_shared_function_info_table(*shared_function_info_table);
+ AllocationType::kOld);
+ shared_function_info_table_ = ObjectHashTable::New(isolate_, function_count_);
+ script_ = isolate_->factory()->NewScript(isolate_->factory()->empty_string());
+ script_->set_type(Script::TYPE_WEB_SNAPSHOT);
+ script_->set_shared_function_infos(*shared_function_infos_);
+ script_->set_shared_function_info_table(*shared_function_info_table_);
for (; current_function_count_ < function_count_; ++current_function_count_) {
uint32_t context_id;
@@ -858,10 +1154,10 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
Handle<String> source = ReadString(false);
if (current_function_count_ == 0) {
- script->set_source(*source);
+ script_->set_source(*source);
} else {
// TODO(v8:11525): Support multiple source snippets.
- DCHECK_EQ(script->source(), *source);
+ DCHECK_EQ(script_->source(), *source);
}
uint32_t start_position;
@@ -874,46 +1170,86 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
return;
}
- // TODO(v8:11525): Support (exported) top level functions.
-
- // TODO(v8:11525): Deduplicate the SFIs for inner functions the user creates
- // post-deserialization (by calling the outer function, if it's also in the
- // snapshot) against the ones we create here.
- Handle<SharedFunctionInfo> shared =
- isolate_->factory()->NewSharedFunctionInfo(
- isolate_->factory()->empty_string(), MaybeHandle<Code>(),
- Builtin::kCompileLazy, FunctionFlagsToFunctionKind(flags));
- shared->set_script(*script);
// Index 0 is reserved for top-level shared function info (which web
// snapshot scripts don't have).
- const int shared_function_info_index = current_function_count_ + 1;
- shared->set_function_literal_id(shared_function_info_index);
- // TODO(v8:11525): Decide how to handle language modes.
- shared->set_language_mode(LanguageMode::kStrict);
- shared->set_uncompiled_data(
- *isolate_->factory()->NewUncompiledDataWithoutPreparseData(
- ReadOnlyRoots(isolate_).empty_string_handle(), start_position,
- start_position + length));
- shared->set_allows_lazy_compilation(true);
- infos->Set(shared_function_info_index, HeapObjectReference::Weak(*shared));
-
- shared_function_info_table = ObjectHashTable::Put(
- shared_function_info_table,
- handle(Smi::FromInt(start_position), isolate_),
- handle(Smi::FromInt(shared_function_info_index), isolate_));
+ Handle<JSFunction> function = CreateJSFunction(
+ current_function_count_ + 1, start_position, length, flags, context_id);
+ functions_->set(current_function_count_, *function);
+ }
+}
+
+void WebSnapshotDeserializer::DeserializeClasses() {
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Classes);
+ if (!deserializer_->ReadUint32(&class_count_) ||
+ class_count_ > kMaxItemCount) {
+ Throw("Web snapshot: Malformed class table");
+ return;
+ }
+ STATIC_ASSERT(kMaxItemCount + 1 <= FixedArray::kMaxLength);
+ classes_ = isolate_->factory()->NewFixedArray(class_count_);
+
+ // Grow the array for SharedFunctionInfos.
+ shared_function_infos_ = WeakFixedArray::EnsureSpace(
+ isolate_, shared_function_infos_,
+ WeakArrayList::CapacityForLength(function_count_ + 1 + class_count_));
+ script_->set_shared_function_infos(*shared_function_infos_);
+ for (; current_class_count_ < class_count_; ++current_class_count_) {
+ uint32_t context_id;
+ // Note: > (not >= on purpose, we will subtract 1).
+ if (!deserializer_->ReadUint32(&context_id) ||
+ context_id > context_count_) {
+ Throw("Web snapshot: Malformed class");
+ return;
+ }
+
+ Handle<String> source = ReadString(false);
+ if (current_function_count_ + current_class_count_ == 0) {
+ script_->set_source(*source);
+ } else {
+ // TODO(v8:11525): Support multiple source snippets.
+ DCHECK_EQ(script_->source(), *source);
+ }
+
+ uint32_t start_position;
+ uint32_t length;
+ uint32_t flags;
+ if (!deserializer_->ReadUint32(&start_position) ||
+ !deserializer_->ReadUint32(&length) ||
+ !deserializer_->ReadUint32(&flags)) {
+ Throw("Web snapshot: Malformed class");
+ return;
+ }
+
+ // Index 0 is reserved for top-level shared function info (which web
+ // snapshot scripts don't have).
Handle<JSFunction> function =
- Factory::JSFunctionBuilder(isolate_, shared, isolate_->native_context())
- .Build();
- if (context_id > 0) {
- DCHECK_LT(context_id - 1, context_count_);
- // Guards raw pointer "context" below.
- DisallowHeapAllocation no_heap_access;
- Context context = Context::cast(contexts_->get(context_id - 1));
- function->set_context(context);
- shared->set_outer_scope_info(context.scope_info());
+ CreateJSFunction(function_count_ + current_class_count_ + 1,
+ start_position, length, flags, context_id);
+ classes_->set(current_class_count_, *function);
+
+ uint32_t function_prototype;
+ if (!deserializer_->ReadUint32(&function_prototype) ||
+ function_prototype >= object_count_) {
+ Throw("Web snapshot: Malformed class");
+ return;
}
- functions_->set(current_function_count_, *function);
+
+ Handle<JSObject> prototype = Handle<JSObject>::cast(
+ handle(Object::cast(objects_->get(function_prototype)), isolate_));
+
+ // TODO(v8:11525): Enforce the invariant that no two prototypes share a map.
+ Map map = prototype->map();
+ map.set_is_prototype_map(true);
+ if (!map.constructor_or_back_pointer().IsNullOrUndefined()) {
+ Throw("Web snapshot: Map already has a constructor or back pointer set");
+ return;
+ }
+ map.set_constructor_or_back_pointer(*function);
+
+ function->set_prototype_or_initial_map(*prototype, kReleaseStore);
+
+ classes_->set(current_class_count_, *function);
}
}
@@ -962,7 +1298,6 @@ void WebSnapshotDeserializer::DeserializeObjects() {
object->set_raw_properties_or_hash(*property_array, kRelaxedStore);
objects_->set(static_cast<int>(current_object_count_), *object);
}
- ProcessDeferredReferences();
}
void WebSnapshotDeserializer::DeserializeArrays() {
@@ -1131,7 +1466,7 @@ void WebSnapshotDeserializer::ReadValue(
}
representation = Representation::Tagged();
break;
- case ValueType::FUNCTION_ID:
+ case ValueType::FUNCTION_ID: {
uint32_t function_id;
if (!deserializer_->ReadUint32(&function_id) ||
function_id >= function_count_) {
@@ -1153,6 +1488,28 @@ void WebSnapshotDeserializer::ReadValue(
}
representation = Representation::Tagged();
break;
+ }
+ case ValueType::CLASS_ID: {
+ uint32_t class_id;
+ if (!deserializer_->ReadUint32(&class_id) || class_id >= kMaxItemCount) {
+ Throw("Web snapshot: Malformed object property");
+ return;
+ }
+ if (class_id < current_class_count_) {
+ value = handle(classes_->get(class_id), isolate_);
+ } else {
+ // The class hasn't been deserialized yet.
+ value = isolate_->factory()->undefined_value();
+ if (object_for_deferred_reference.is_null()) {
+ Throw("Web snapshot: Invalid object reference");
+ return;
+ }
+ AddDeferredReference(object_for_deferred_reference,
+ index_for_deferred_reference, CLASS_ID, class_id);
+ }
+ representation = Representation::Tagged();
+ break;
+ }
case ValueType::REGEXP: {
Handle<String> pattern = ReadString(false);
Handle<String> flags_string = ReadString(false);
@@ -1194,6 +1551,7 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
DisallowGarbageCollection no_gc;
ArrayList raw_deferred_references = *deferred_references_;
FixedArray raw_functions = *functions_;
+ FixedArray raw_classes = *classes_;
FixedArray raw_arrays = *arrays_;
FixedArray raw_objects = *objects_;
@@ -1217,6 +1575,16 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
}
target = raw_functions.get(target_index);
break;
+ case CLASS_ID:
+ if (static_cast<uint32_t>(target_index) >= class_count_) {
+ // Throw can allocate, but it's ok, since we're not using the raw
+ // pointers after that.
+ AllowGarbageCollection allow_gc;
+ Throw("Web Snapshots: Invalid class reference");
+ return;
+ }
+ target = raw_classes.get(target_index);
+ break;
case ARRAY_ID:
if (static_cast<uint32_t>(target_index) >= array_count_) {
AllowGarbageCollection allow_gc;
@@ -1246,6 +1614,7 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
UNREACHABLE();
}
}
+ deferred_references_->SetLength(0);
}
} // namespace internal
diff --git a/deps/v8/src/web-snapshot/web-snapshot.h b/deps/v8/src/web-snapshot/web-snapshot.h
index bbb70c0f6e..bc922a75c8 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.h
+++ b/deps/v8/src/web-snapshot/web-snapshot.h
@@ -52,11 +52,22 @@ class WebSnapshotSerializerDeserializer {
ARRAY_ID,
OBJECT_ID,
FUNCTION_ID,
+ CLASS_ID,
REGEXP
};
+ enum ContextType : uint8_t { FUNCTION, BLOCK };
+
+ enum PropertyAttributesType : uint8_t { DEFAULT, CUSTOM };
+
uint32_t FunctionKindToFunctionFlags(FunctionKind kind);
FunctionKind FunctionFlagsToFunctionKind(uint32_t flags);
+ bool IsFunctionOrMethod(uint32_t flags);
+ bool IsConstructor(uint32_t flags);
+
+ uint32_t GetDefaultAttributeFlags();
+ uint32_t AttributesToFlags(PropertyDetails details);
+ PropertyAttributes FlagsToAttributes(uint32_t flags);
// The maximum count of items for each value type (strings, objects etc.)
static constexpr uint32_t kMaxItemCount =
@@ -81,9 +92,18 @@ class WebSnapshotSerializerDeserializer {
// Keep most common function kinds in the 7 least significant bits to make the
// flags fit in 1 byte.
- using ArrowFunctionBitField = base::BitField<bool, 0, 1>;
- using AsyncFunctionBitField = ArrowFunctionBitField::Next<bool, 1>;
+ using AsyncFunctionBitField = base::BitField<bool, 0, 1>;
using GeneratorFunctionBitField = AsyncFunctionBitField::Next<bool, 1>;
+ using ArrowFunctionBitField = GeneratorFunctionBitField::Next<bool, 1>;
+ using MethodBitField = ArrowFunctionBitField::Next<bool, 1>;
+ using StaticBitField = MethodBitField::Next<bool, 1>;
+ using ClassConstructorBitField = StaticBitField::Next<bool, 1>;
+ using DefaultConstructorBitField = ClassConstructorBitField::Next<bool, 1>;
+ using DerivedConstructorBitField = DefaultConstructorBitField::Next<bool, 1>;
+
+ using ReadOnlyBitField = base::BitField<bool, 0, 1>;
+ using ConfigurableBitField = ReadOnlyBitField::Next<bool, 1>;
+ using EnumerableBitField = ConfigurableBitField::Next<bool, 1>;
};
class V8_EXPORT WebSnapshotSerializer
@@ -111,6 +131,10 @@ class V8_EXPORT WebSnapshotSerializer
return static_cast<uint32_t>(function_ids_.size());
}
+ uint32_t class_count() const {
+ return static_cast<uint32_t>(class_ids_.size());
+ }
+
uint32_t array_count() const {
return static_cast<uint32_t>(array_ids_.size());
}
@@ -129,10 +153,15 @@ class V8_EXPORT WebSnapshotSerializer
// Returns true if the object was already in the map, false if it was added.
bool InsertIntoIndexMap(ObjectCacheIndexMap& map, Handle<HeapObject> object,
uint32_t& id);
+ void SerializeSource(ValueSerializer* serializer,
+ Handle<JSFunction> function);
+ void SerializeFunctionInfo(ValueSerializer* serializer,
+ Handle<JSFunction> function);
void SerializeString(Handle<String> string, uint32_t& id);
void SerializeMap(Handle<Map> map, uint32_t& id);
void SerializeFunction(Handle<JSFunction> function, uint32_t& id);
+ void SerializeClass(Handle<JSFunction> function, uint32_t& id);
void SerializeContext(Handle<Context> context, uint32_t& id);
void SerializeArray(Handle<JSArray> array, uint32_t& id);
void SerializePendingArray(Handle<JSArray> array);
@@ -145,6 +174,7 @@ class V8_EXPORT WebSnapshotSerializer
ValueSerializer map_serializer_;
ValueSerializer context_serializer_;
ValueSerializer function_serializer_;
+ ValueSerializer class_serializer_;
ValueSerializer array_serializer_;
ValueSerializer object_serializer_;
ValueSerializer export_serializer_;
@@ -153,6 +183,7 @@ class V8_EXPORT WebSnapshotSerializer
ObjectCacheIndexMap map_ids_;
ObjectCacheIndexMap context_ids_;
ObjectCacheIndexMap function_ids_;
+ ObjectCacheIndexMap class_ids_;
ObjectCacheIndexMap array_ids_;
ObjectCacheIndexMap object_ids_;
uint32_t export_count_ = 0;
@@ -173,6 +204,7 @@ class V8_EXPORT WebSnapshotDeserializer
uint32_t map_count() const { return map_count_; }
uint32_t context_count() const { return context_count_; }
uint32_t function_count() const { return function_count_; }
+ uint32_t class_count() const { return class_count_; }
uint32_t array_count() const { return array_count_; }
uint32_t object_count() const { return object_count_; }
@@ -184,8 +216,14 @@ class V8_EXPORT WebSnapshotDeserializer
Handle<String> ReadString(bool internalize = false);
void DeserializeMaps();
void DeserializeContexts();
- Handle<ScopeInfo> CreateScopeInfo(uint32_t variable_count, bool has_parent);
+ Handle<ScopeInfo> CreateScopeInfo(uint32_t variable_count, bool has_parent,
+ ContextType context_type);
+ Handle<JSFunction> CreateJSFunction(int index, uint32_t start,
+ uint32_t length, uint32_t flags,
+ uint32_t context_id);
+ void DeserializeFunctionData(uint32_t count, uint32_t current_count);
void DeserializeFunctions();
+ void DeserializeClasses();
void DeserializeArrays();
void DeserializeObjects();
void DeserializeExports();
@@ -205,15 +243,22 @@ class V8_EXPORT WebSnapshotDeserializer
Handle<FixedArray> maps_;
Handle<FixedArray> contexts_;
Handle<FixedArray> functions_;
+ Handle<FixedArray> classes_;
Handle<FixedArray> arrays_;
Handle<FixedArray> objects_;
Handle<ArrayList> deferred_references_;
+ Handle<WeakFixedArray> shared_function_infos_;
+ Handle<ObjectHashTable> shared_function_info_table_;
+ Handle<Script> script_;
+
uint32_t string_count_ = 0;
uint32_t map_count_ = 0;
uint32_t context_count_ = 0;
uint32_t function_count_ = 0;
uint32_t current_function_count_ = 0;
+ uint32_t class_count_ = 0;
+ uint32_t current_class_count_ = 0;
uint32_t array_count_ = 0;
uint32_t current_array_count_ = 0;
uint32_t object_count_ = 0;
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 292146195b..a603f00258 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -39,7 +39,7 @@
'octane/typescript': [PASS, SLOW],
# https://crbug.com/v8/11905
- 'kraken/stanford-crypto-pbkdf2': [PASS, SLOW, ['tsan', SKIP]],
+ 'kraken/stanford-crypto-pbkdf2': [PASS, SLOW],
# https://crbug.com/v8/11222
'octane/raytrace': [PASS, SLOW],
@@ -117,6 +117,7 @@
'kraken/audio-dft': [SLOW],
'kraken/audio-fft': [SLOW],
'kraken/audio-oscillator': [SLOW],
+ 'octane/pdfjs': [SLOW],
}], # 'tsan'
]
diff --git a/deps/v8/test/bigint/BUILD.gn b/deps/v8/test/bigint/BUILD.gn
index 37605124be..d3fbb76345 100644
--- a/deps/v8/test/bigint/BUILD.gn
+++ b/deps/v8/test/bigint/BUILD.gn
@@ -22,8 +22,4 @@ v8_executable("bigint_shell") {
configs = [ "../..:internal_config_base" ]
sources = [ "bigint-shell.cc" ]
-
- if (v8_advanced_bigint_algorithms) {
- defines = [ "V8_ADVANCED_BIGINT_ALGORITHMS" ]
- }
}
diff --git a/deps/v8/test/bigint/bigint-shell.cc b/deps/v8/test/bigint/bigint-shell.cc
index b524944625..43692d69c5 100644
--- a/deps/v8/test/bigint/bigint-shell.cc
+++ b/deps/v8/test/bigint/bigint-shell.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
#include <string>
#include "src/bigint/bigint-internal.h"
@@ -28,8 +29,12 @@ int PrintHelp(char** argv) {
}
#define TESTS(V) \
+ V(kBarrett, "barrett") \
+ V(kBurnikel, "burnikel") \
+ V(kFFT, "fft") \
V(kKaratsuba, "karatsuba") \
- V(kBurnikel, "burnikel") V(kToom, "toom") V(kFFT, "fft")
+ V(kToom, "toom") \
+ V(kToString, "tostring")
enum Operation { kNoOp, kList, kTest };
@@ -155,23 +160,44 @@ class Runner {
error_ = true;
}
+ void AssertEquals(Digits X, int radix, char* expected, int expected_length,
+ char* actual, int actual_length) {
+ if (expected_length == actual_length &&
+ std::memcmp(expected, actual, actual_length) == 0) {
+ return;
+ }
+ std::cerr << "Input: " << FormatHex(X) << "\n";
+ std::cerr << "Radix: " << radix << "\n";
+ std::cerr << "Expected: " << std::string(expected, expected_length) << "\n";
+ std::cerr << "Actual: " << std::string(actual, actual_length) << "\n";
+ error_ = true;
+ }
+
int RunTest() {
int count = 0;
- if (test_ == kKaratsuba) {
+ if (test_ == kBarrett) {
for (int i = 0; i < runs_; i++) {
- TestKaratsuba(&count);
+ TestBarrett(&count);
}
} else if (test_ == kBurnikel) {
for (int i = 0; i < runs_; i++) {
TestBurnikel(&count);
}
+ } else if (test_ == kFFT) {
+ for (int i = 0; i < runs_; i++) {
+ TestFFT(&count);
+ }
+ } else if (test_ == kKaratsuba) {
+ for (int i = 0; i < runs_; i++) {
+ TestKaratsuba(&count);
+ }
} else if (test_ == kToom) {
for (int i = 0; i < runs_; i++) {
TestToom(&count);
}
- } else if (test_ == kFFT) {
+ } else if (test_ == kToString) {
for (int i = 0; i < runs_; i++) {
- TestFFT(&count);
+ TestToString(&count);
}
} else {
DCHECK(false); // Unreachable.
@@ -291,6 +317,80 @@ class Runner {
}
}
+#if V8_ADVANCED_BIGINT_ALGORITHMS
+ void TestBarrett_Internal(int left_size, int right_size) {
+ ScratchDigits A(left_size);
+ ScratchDigits B(right_size);
+ GenerateRandom(A);
+ GenerateRandom(B);
+ int quotient_len = DivideResultLength(A, B);
+ // {DivideResultLength} doesn't expect to be called for sizes below
+ // {kBarrettThreshold} (which we do here to save time), so we have to
+ // manually adjust the allocated result length.
+ if (B.len() < kBarrettThreshold) quotient_len++;
+ int remainder_len = right_size;
+ ScratchDigits quotient(quotient_len);
+ ScratchDigits quotient_burnikel(quotient_len);
+ ScratchDigits remainder(remainder_len);
+ ScratchDigits remainder_burnikel(remainder_len);
+ processor()->DivideBarrett(quotient, remainder, A, B);
+ processor()->DivideBurnikelZiegler(quotient_burnikel, remainder_burnikel, A,
+ B);
+ AssertEquals(A, B, quotient_burnikel, quotient);
+ AssertEquals(A, B, remainder_burnikel, remainder);
+ }
+
+ void TestBarrett(int* count) {
+ // We pick a range around kBurnikelThreshold (instead of kBarrettThreshold)
+ // to save test execution time.
+ constexpr int kMin = kBurnikelThreshold / 2;
+ constexpr int kMax = 2 * kBurnikelThreshold;
+ // {DivideBarrett(A, B)} requires that A.len > B.len!
+ for (int right_size = kMin; right_size <= kMax; right_size++) {
+ for (int left_size = right_size + 1; left_size <= kMax; left_size++) {
+ TestBarrett_Internal(left_size, right_size);
+ if (error_) return;
+ (*count)++;
+ }
+ }
+ // We also test one random large case.
+ uint64_t random_bits = rng_.NextUint64();
+ int right_size = kBarrettThreshold + static_cast<int>(random_bits & 0x3FF);
+ random_bits >>= 10;
+ int left_size = right_size + 1 + static_cast<int>(random_bits & 0x3FFF);
+ random_bits >>= 14;
+ TestBarrett_Internal(left_size, right_size);
+ if (error_) return;
+ (*count)++;
+ }
+#else
+ void TestBarrett(int* count) {}
+#endif // V8_ADVANCED_BIGINT_ALGORITHMS
+
+ void TestToString(int* count) {
+ constexpr int kMin = kToStringFastThreshold / 2;
+ constexpr int kMax = kToStringFastThreshold * 2;
+ for (int size = kMin; size < kMax; size++) {
+ ScratchDigits X(size);
+ GenerateRandom(X);
+ for (int radix = 2; radix <= 36; radix++) {
+ int chars_required = ToStringResultLength(X, radix, false);
+ int result_len = chars_required;
+ int reference_len = chars_required;
+ std::unique_ptr<char[]> result(new char[result_len]);
+ std::unique_ptr<char[]> reference(new char[reference_len]);
+ processor()->ToStringImpl(result.get(), &result_len, X, radix, false,
+ true);
+ processor()->ToStringImpl(reference.get(), &reference_len, X, radix,
+ false, false);
+ AssertEquals(X, radix, reference.get(), reference_len, result.get(),
+ result_len);
+ if (error_) return;
+ (*count)++;
+ }
+ }
+ }
+
int ParseOptions(int argc, char** argv) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "--list") == 0) {
@@ -325,6 +425,9 @@ class Runner {
}
private:
+ // TODO(jkummerow): Also generate "non-random-looking" inputs, i.e. long
+ // strings of zeros and ones in the binary representation, such as
+ // ((1 << random) ± 1).
void GenerateRandom(RWDigits Z) {
if (Z.len() == 0) return;
if (sizeof(digit_t) == 8) {
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 89a9e4bf0d..e7f011df74 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -89,8 +89,6 @@ v8_source_set("cctest_sources") {
"compiler/function-tester.cc",
"compiler/function-tester.h",
"compiler/node-observer-tester.h",
- "compiler/serializer-tester.cc",
- "compiler/serializer-tester.h",
"compiler/test-basic-block-profiler.cc",
"compiler/test-branch-combine.cc",
"compiler/test-calls-with-arraylike-or-spread.cc",
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 2478fa7871..fa42921b77 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -414,7 +414,9 @@ int RegisterThreadedTest::count_ = 0;
bool IsValidUnwrapObject(v8::Object* object) {
i::Address addr = *reinterpret_cast<i::Address*>(object);
auto instance_type = i::Internals::GetInstanceType(addr);
- return (instance_type == i::Internals::kJSObjectType ||
- instance_type == i::Internals::kJSApiObjectType ||
+ return (v8::base::IsInRange(instance_type,
+ i::Internals::kFirstJSApiObjectType,
+ i::Internals::kLastJSApiObjectType) ||
+ instance_type == i::Internals::kJSObjectType ||
instance_type == i::Internals::kJSSpecialApiObjectType);
}
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index f7a7726b30..9b36904475 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -117,9 +117,6 @@
'test-serialize/CustomSnapshotDataBlobImmortalImmovableRoots': [PASS, ['mode == debug', SKIP]],
'test-parsing/ObjectRestNegativeTestSlow': [PASS, ['mode == debug', SKIP]],
- # pthread_rwlock_t combined with signals is broken on Mac (https://crbug.com/v8/11399).
- 'signals-and-mutexes/SignalsPlusSharedMutexes': [PASS, ['system == macos', SKIP]],
-
# Tests that need to run sequentially (e.g. due to memory consumption).
'test-accessors/HandleScopePop': [PASS, HEAVY],
'test-api/FastApiCalls': [PASS, HEAVY],
@@ -614,7 +611,6 @@
# Tests that generate code at runtime.
'codegen-tester/*': [SKIP],
- 'serializer-tester/*': [SKIP],
'test-accessor-assembler/*': [SKIP],
'test-assembler-*': [SKIP],
'test-basic-block-profiler/*': [SKIP],
@@ -689,17 +685,6 @@
##############################################################################
['variant == jitless', {
# https://crbug.com/v8/7777
- 'serializer-tester/SerializeCallAnyReceiver': [SKIP],
- 'serializer-tester/SerializeCallArguments': [SKIP],
- 'serializer-tester/SerializeCallProperty': [SKIP],
- 'serializer-tester/SerializeCallProperty2': [SKIP],
- 'serializer-tester/SerializeCallUndefinedReceiver': [SKIP],
- 'serializer-tester/SerializeCallUndefinedReceiver2': [SKIP],
- 'serializer-tester/SerializeCallWithSpread': [SKIP],
- 'serializer-tester/SerializeConstruct': [SKIP],
- 'serializer-tester/SerializeConstructWithSpread': [SKIP],
- 'serializer-tester/SerializeInlinedClosure': [SKIP],
- 'serializer-tester/SerializeInlinedFunction': [SKIP],
'test-cpu-profiler/TickLinesOptimized': [SKIP],
'test-heap/TestOptimizeAfterBytecodeFlushingCandidate': [SKIP],
'test-js-to-wasm/*': [SKIP],
@@ -745,8 +730,6 @@
# Turboprop doesn't use call feedback and hence doesn't inline even if
# the inlining flag is explicitly set.
'test-cpu-profiler/DetailedSourcePositionAPI_Inlining': [SKIP],
- 'serializer-tester/BoundFunctionArguments': [SKIP],
- 'serializer-tester/BoundFunctionTarget': [SKIP],
'test-calls-with-arraylike-or-spread/*': [SKIP],
'test-js-to-wasm/*': [SKIP],
}], # variant == turboprop or variant == turboprop_as_toptier
@@ -822,28 +805,7 @@
'test-invalidated-slots/InvalidatedSlotsNoInvalidatedRanges': [SKIP],
'test-invalidated-slots/InvalidatedSlotsResetObjectRegression': [SKIP],
'test-invalidated-slots/InvalidatedSlotsSomeInvalidatedRanges': [SKIP],
- # Requires --concurrent_inlining / --finalize_streaming_on_background:
- 'serializer-tester/ArrowFunctionInlined': [SKIP],
- 'serializer-tester/BoundFunctionArguments': [SKIP],
- 'serializer-tester/BoundFunctionResult': [SKIP],
- 'serializer-tester/BoundFunctionTarget': [SKIP],
- 'serializer-tester/MergeJumpTargetEnvironment': [SKIP],
- 'serializer-tester/MultipleFunctionCalls': [SKIP],
- 'serializer-tester/SerializeCallAnyReceiver': [SKIP],
- 'serializer-tester/SerializeCallArguments': [SKIP],
- 'serializer-tester/SerializeCallProperty': [SKIP],
- 'serializer-tester/SerializeCallProperty2': [SKIP],
- 'serializer-tester/SerializeCallUndefinedReceiver': [SKIP],
- 'serializer-tester/SerializeCallUndefinedReceiver2': [SKIP],
- 'serializer-tester/SerializeCallWithSpread': [SKIP],
- 'serializer-tester/SerializeConditionalJump': [SKIP],
- 'serializer-tester/SerializeConstruct': [SKIP],
- 'serializer-tester/SerializeConstructSuper': [SKIP],
- 'serializer-tester/SerializeConstructWithSpread': [SKIP],
- 'serializer-tester/SerializeEmptyFunction': [SKIP],
- 'serializer-tester/SerializeInlinedClosure': [SKIP],
- 'serializer-tester/SerializeInlinedFunction': [SKIP],
- 'serializer-tester/SerializeUnconditionalJump': [SKIP],
+ # Requires --finalize_streaming_on_background:
'test-concurrent-allocation/ConcurrentAllocationWhileMainThreadIsParked': [SKIP],
'test-concurrent-allocation/ConcurrentAllocationWhileMainThreadParksAndUnparks': [SKIP],
'test-concurrent-allocation/ConcurrentAllocationWhileMainThreadRunsWithSafepoints': [SKIP],
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.cc b/deps/v8/test/cctest/compiler/serializer-tester.cc
deleted file mode 100644
index 9829f798a0..0000000000
--- a/deps/v8/test/cctest/compiler/serializer-tester.cc
+++ /dev/null
@@ -1,383 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Serializer tests don't make sense in lite mode, as it doesn't gather
-// IC feedback.
-#ifndef V8_LITE_MODE
-
-#include "test/cctest/compiler/serializer-tester.h"
-
-#include "src/api/api-inl.h"
-#include "src/codegen/optimized-compilation-info.h"
-#include "src/compiler/serializer-for-background-compilation.h"
-#include "src/compiler/zone-stats.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-SerializerTester::SerializerTester(const char* global_source,
- const char* local_source)
- : canonical_(main_isolate()) {
- // The tests only make sense in the context of concurrent compilation.
- FLAG_concurrent_inlining = true;
- // The tests don't make sense when optimizations are turned off.
- FLAG_opt = true;
- // We need the IC to feed it to the serializer.
- FLAG_use_ic = true;
- // We need manual control over when a given function is optimized.
- FLAG_always_opt = false;
- // We need allocation of executable memory for the compilation.
- FLAG_jitless = false;
- FLAG_allow_natives_syntax = true;
- FlagList::EnforceFlagImplications();
-
- CompileRun(global_source);
-
- std::string function_string = "(function() { ";
- function_string += local_source;
- function_string += " })();";
- Handle<JSFunction> function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(function_string.c_str()))));
- uint32_t flags = i::OptimizedCompilationInfo::kInlining;
- Optimize(function, main_zone(), main_isolate(), flags, &broker_);
- // Update handle to the corresponding serialized Handle in the broker.
- function =
- broker_->FindCanonicalPersistentHandleForTesting<JSFunction>(*function);
- function_ = MakeRef(broker(), function);
-}
-
-TEST(SerializeEmptyFunction) {
- SerializerTester tester(
- "", "function f() {}; %EnsureFeedbackVectorForFunction(f); return f;");
- JSFunctionRef function = tester.function();
- CHECK(tester.broker()->IsSerializedForCompilation(
- function.shared(), function.feedback_vector()));
-}
-
-// This helper function allows for testing whether an inlinee candidate
-// was properly serialized. It expects that the top-level function (that is
-// run through the SerializerTester) will return its inlinee candidate.
-void CheckForSerializedInlinee(const char* global_source,
- const char* local_source, int argc = 0,
- Handle<Object> argv[] = {}) {
- SerializerTester tester(global_source, local_source);
- JSFunctionRef f = tester.function();
- CHECK(tester.broker()->IsSerializedForCompilation(f.shared(),
- f.feedback_vector()));
-
- MaybeHandle<Object> g_obj = Execution::Call(
- tester.isolate(), tester.function().object(),
- tester.isolate()->factory()->undefined_value(), argc, argv);
- Handle<Object> g;
- CHECK(g_obj.ToHandle(&g));
- CHECK_WITH_MSG(
- g->IsJSFunction(),
- "The return value of the outer function must be a function too");
- Handle<JSFunction> g_func = Handle<JSFunction>::cast(g);
-
- // Look up corresponding serialized Handles in the broker.
- Handle<SharedFunctionInfo> sfi(
- tester.broker()
- ->FindCanonicalPersistentHandleForTesting<SharedFunctionInfo>(
- g_func->shared()));
- SharedFunctionInfoRef g_sfi = MakeRef(tester.broker(), sfi);
- Handle<FeedbackVector> fv(
- tester.broker()->FindCanonicalPersistentHandleForTesting<FeedbackVector>(
- g_func->feedback_vector()));
- FeedbackVectorRef g_fv = MakeRef(tester.broker(), fv);
- CHECK(tester.broker()->IsSerializedForCompilation(g_sfi, g_fv));
-}
-
-TEST(SerializeInlinedClosure) {
- CheckForSerializedInlinee("",
- "function f() {"
- " function g(){ return g; }"
- " %EnsureFeedbackVectorForFunction(g);"
- " return g();"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeInlinedFunction) {
- CheckForSerializedInlinee("",
- "function g() {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "function f() {"
- " g(); return g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeCallUndefinedReceiver) {
- CheckForSerializedInlinee("",
- "function g(a,b,c) {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "function f() {"
- " g(1,2,3); return g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeCallUndefinedReceiver2) {
- CheckForSerializedInlinee("",
- "function g(a,b) {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "function f() {"
- " g(1,2); return g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeCallProperty) {
- CheckForSerializedInlinee("",
- "let obj = {"
- " g: function g(a,b,c) {}"
- "};"
- "%EnsureFeedbackVectorForFunction(obj.g);"
- "function f() {"
- " obj.g(1,2,3); return obj.g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeCallProperty2) {
- CheckForSerializedInlinee("",
- "let obj = {"
- " g: function g(a,b) {}"
- "};"
- "%EnsureFeedbackVectorForFunction(obj.g);"
- "function f() {"
- " obj.g(1,2); return obj.g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeCallAnyReceiver) {
- CheckForSerializedInlinee("",
- "let obj = {"
- " g: function g() {}"
- "};"
- "%EnsureFeedbackVectorForFunction(obj.g);"
- "function f() {"
- " with(obj) {"
- " g(); return g;"
- " };"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeCallWithSpread) {
- CheckForSerializedInlinee("",
- "function g(args) {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "const arr = [1,2,3];"
- "function f() {"
- " g(...arr); return g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-// The following test causes the CallIC of `g` to turn megamorphic,
-// thus allowing us to test if we forward arguments hints (`callee` in this
-// example) and correctly serialize the inlining candidate `j`.
-TEST(SerializeCallArguments) {
- CheckForSerializedInlinee("",
- "function g(callee) { callee(); };"
- "function h() {};"
- "function i() {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "g(h); g(i);"
- "function f() {"
- " function j() {};"
- " g(j);"
- " return j;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "var j = f();"
- "%EnsureFeedbackVectorForFunction(j);"
- "f(); return f;");
-}
-
-TEST(SerializeConstruct) {
- CheckForSerializedInlinee("",
- "function g() {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "function f() {"
- " new g(); return g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeConstructWithSpread) {
- CheckForSerializedInlinee("",
- "function g(a, b, c) {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "const arr = [1, 2];"
- "function f() {"
- " new g(0, ...arr); return g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeConstructSuper) {
- CheckForSerializedInlinee("",
- "class A {};"
- "class B extends A { constructor() { super(); } };"
- "%EnsureFeedbackVectorForFunction(A);"
- "%EnsureFeedbackVectorForFunction(B);"
- "function f() {"
- " new B(); return A;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "f(); return f;");
-}
-
-TEST(SerializeConditionalJump) {
- CheckForSerializedInlinee("",
- "function g(callee) { callee(); };"
- "function h() {};"
- "function i() {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "let a = true;"
- "g(h); g(i);"
- "function f() {"
- " function q() {};"
- " if (a) g(q);"
- " return q;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "var q = f();"
- "%EnsureFeedbackVectorForFunction(q);"
- "f(); return f;");
-}
-
-TEST(SerializeUnconditionalJump) {
- CheckForSerializedInlinee("",
- "function g(callee) { callee(); };"
- "function h() {};"
- "function i() {};"
- "%EnsureFeedbackVectorForFunction(g);"
- "%EnsureFeedbackVectorForFunction(h);"
- "%EnsureFeedbackVectorForFunction(i);"
- "let a = false;"
- "g(h); g(i);"
- "function f() {"
- " function p() {};"
- " function q() {};"
- " if (a) q();"
- " else g(p);"
- " return p;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "var p = f();"
- "%EnsureFeedbackVectorForFunction(p);"
- "f(); return f;");
-}
-
-TEST(MergeJumpTargetEnvironment) {
- CheckForSerializedInlinee(
- "",
- "function f() {"
- " let g;"
- " while (true) {"
- " if (g === undefined) {g = ()=>1; break;} else {g = ()=>2; break};"
- " };"
- " g(); return g;"
- "};"
- "%EnsureFeedbackVectorForFunction(f);"
- "%EnsureFeedbackVectorForFunction(f());"
- "f(); return f;"); // Two calls to f to make g() megamorhpic.
-}
-
-TEST(BoundFunctionTarget) {
- const char* global = "function apply1(foo, arg) { return foo(arg); };";
- CheckForSerializedInlinee(
- global,
- "%EnsureFeedbackVectorForFunction(apply1);"
- "function test() {"
- " const lambda = (a) => a;"
- " %EnsureFeedbackVectorForFunction(lambda);"
- " let bound = apply1.bind(null, lambda).bind(null, 42);"
- " %TurbofanStaticAssert(bound() == 42); return apply1;"
- "};"
- "%EnsureFeedbackVectorForFunction(test);"
- "test(); return test;");
-}
-
-TEST(BoundFunctionArguments) {
- const char* global = "function apply2(foo, arg) { return foo(arg); };";
- CheckForSerializedInlinee(
- global,
- "%EnsureFeedbackVectorForFunction(apply2);"
- "function test() {"
- " const lambda = (a) => a;"
- " %EnsureFeedbackVectorForFunction(lambda);"
- " let bound = apply2.bind(null, lambda).bind(null, 42);"
- " %TurbofanStaticAssert(bound() == 42); return lambda;"
- "};"
- "%EnsureFeedbackVectorForFunction(test);"
- "test(); return test;");
-}
-
-TEST(ArrowFunctionInlined) {
- // The loop is to ensure there is a feedback vector for the arrow function
- // {b}.
- CheckForSerializedInlinee("",
- "function foo() {"
- " let b = x => x * x;"
- " let a = [1, 2, 3].map(b);"
- " return b;"
- "}"
- "%EnsureFeedbackVectorForFunction(foo);"
- "for (let i = 0; i < 100; ++i) foo();"
- "return foo;");
-}
-
-TEST(BoundFunctionResult) {
- CheckForSerializedInlinee(
- "",
- "function id(x) { return x }"
- "function foo() { id.bind(undefined, 42)(); return id; }"
- "%PrepareFunctionForOptimization(foo);"
- "%PrepareFunctionForOptimization(id);"
- "foo();"
- "foo();"
- "%OptimizeFunctionOnNextCall(foo);"
- "foo(); return foo;");
-}
-
-TEST(MultipleFunctionCalls) {
- CheckForSerializedInlinee(
- "",
- "function inc(x) { return ++x; }"
- "function dec(x) { return --x; }"
- "function apply(f, x) { return f(x); }"
- "function foo() { apply(inc, 42); apply(dec, 42); return dec; }"
- "%PrepareFunctionForOptimization(inc);"
- "%PrepareFunctionForOptimization(dec);"
- "%PrepareFunctionForOptimization(apply);"
- "%PrepareFunctionForOptimization(foo);"
- "foo();"
- "foo();"
- "%OptimizeFunctionOnNextCall(foo);"
- "foo(); return foo;");
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_LITE_MODE
diff --git a/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc b/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
index f47e03cf64..27c6465bca 100644
--- a/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
+++ b/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
@@ -126,6 +126,57 @@ TEST(ReduceJSCreateBoundFunction) {
IrOpcode::kPhi);
}
+static void SumF(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ ApiTestFuzzer::Fuzz();
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ int this_x = args.This()
+ ->Get(context, v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust();
+ args.GetReturnValue().Set(v8_num(
+ args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust() +
+ args[1]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust() +
+ this_x));
+}
+
+TEST(ReduceCAPICallWithArrayLike) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ FLAG_allow_natives_syntax = true;
+ FLAG_turbo_optimize_apply = true;
+
+ Local<v8::FunctionTemplate> sum = v8::FunctionTemplate::New(isolate, SumF);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("sum"),
+ sum->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+
+ Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::String> class_name = v8_str("the_class_name");
+ fun->SetClassName(class_name);
+ Local<ObjectTemplate> templ1 = ObjectTemplate::New(isolate, fun);
+ templ1->Set(isolate, "x", v8_num(42));
+ templ1->Set(isolate, "foo", sum);
+ Local<v8::Object> instance1 =
+ templ1->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("p"), instance1).FromJust());
+
+ std::string js_code =
+ "function bar(a, b) { return sum.apply(p, [a, b]); }"
+ "%PrepareFunctionForOptimization(bar);"
+ "bar(20, 22);"
+ "%OptimizeFunctionOnNextCall(bar);"
+ "bar(20, 22);";
+ v8::Local<v8::Value> result_value = CompileRun(js_code.c_str());
+ CHECK(result_value->IsNumber());
+ int32_t result =
+ ConvertJSValue<int32_t>::Get(result_value, env.local()).ToChecked();
+ CHECK_EQ(result, 84);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
index c286c1705c..81c5e69e4a 100644
--- a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
+++ b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
@@ -187,9 +187,15 @@ TEST(TestConcurrentSharedFunctionInfo) {
// Finalize job.
{
+ // Cannot assert successful completion here since concurrent modifications
+ // may have invalidated compilation dependencies (e.g. since the serialized
+ // JSFunctionRef no longer matches the actual JSFunction state).
const CompilationJob::Status status = job->FinalizeJob(isolate);
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
- CHECK(job->compilation_info()->has_bytecode_array());
+ if (status == CompilationJob::SUCCEEDED) {
+ CHECK(job->compilation_info()->has_bytecode_array());
+ } else {
+ CHECK_EQ(status, CompilationJob::FAILED);
+ }
}
}
diff --git a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
index b4bc78a72f..2fab39506d 100644
--- a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
@@ -77,8 +77,8 @@ TEST(DeoptInMiddleOfBasicBlock) {
Node* node = Node::New(zone, 0, nullptr, 0, nullptr, false);
FeedbackSource feedback;
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, DeoptimizeKind::kEager, DeoptimizeReason::kUnknown, feedback,
- node);
+ kEqual, DeoptimizeKind::kEager, DeoptimizeReason::kUnknown, node->id(),
+ feedback, node);
jmp_opcode = cont.Encode(jmp_opcode);
Instruction* jmp_inst = Instruction::New(zone, jmp_opcode);
tester.CheckIsDeopt(jmp_inst);
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index aaec5e992c..d12c039f11 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -201,7 +201,7 @@ TEST(ReduceJSLoadContext1) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- Handle<ScopeInfo> empty(ScopeInfo::Empty(t.main_isolate()), t.main_isolate());
+ ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -271,7 +271,7 @@ TEST(ReduceJSLoadContext2) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- Handle<ScopeInfo> empty(ScopeInfo::Empty(t.main_isolate()), t.main_isolate());
+ ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -365,8 +365,7 @@ TEST(ReduceJSLoadContext3) {
Node* start = t.graph()->NewNode(t.common()->Start(2));
t.graph()->SetStart(start);
- Handle<ScopeInfo> empty(ScopeInfo::Empty(t.main_isolate()),
- handle_zone_scope.main_isolate());
+ ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -496,7 +495,7 @@ TEST(ReduceJSStoreContext1) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- Handle<ScopeInfo> empty(ScopeInfo::Empty(t.main_isolate()), t.main_isolate());
+ ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -540,7 +539,7 @@ TEST(ReduceJSStoreContext2) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- Handle<ScopeInfo> empty(ScopeInfo::Empty(t.main_isolate()), t.main_isolate());
+ ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -606,8 +605,7 @@ TEST(ReduceJSStoreContext3) {
Node* start = t.graph()->NewNode(t.common()->Start(2));
t.graph()->SetStart(start);
- Handle<ScopeInfo> empty(ScopeInfo::Empty(t.main_isolate()),
- handle_zone_scope.main_isolate());
+ ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index a2db7d62bd..b911164f3f 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/codegen/tick-counter.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-heap-copy-reducer.h"
@@ -37,7 +38,8 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
common(main_zone()),
graph(main_zone()),
typer(&js_heap_broker, Typer::kNoFlags, &graph, &tick_counter),
- context_node(nullptr) {
+ context_node(nullptr),
+ deps(&js_heap_broker, main_zone()) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
graph.SetEnd(graph.NewNode(common.End(1), graph.start()));
typer.Run();
@@ -56,6 +58,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Graph graph;
Typer typer;
Node* context_node;
+ CompilationDependencies deps;
Node* Parameter(Type t, int32_t index = 0) {
Node* n = graph.NewNode(common.Parameter(index), graph.start());
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index a66bfb207f..64c8db7d0b 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -736,7 +736,7 @@ TEST(Rewire1_deferred) {
code.Defer();
int j3 = code.Jump(3);
// B3
- code.End();
+ code.Return(0);
static int forward[] = {3, 3, 3, 3};
ApplyForwarding(&code, kBlockCount, forward);
@@ -774,6 +774,29 @@ TEST(Rewire2_deferred) {
CheckAssemblyOrder(&code, kBlockCount, assembly);
}
+TEST(Rewire_deferred_diamond) {
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
+
+ // B0
+ int b1 = code.Branch(1, 2);
+ // B1
+ code.Fallthru(); // To B3
+ // B2
+ code.Defer();
+ int j1 = code.Jump(3);
+ // B3
+ code.Return(0);
+
+ static int forward[] = {0, 3, 3, 3};
+ VerifyForwarding(&code, kBlockCount, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
+ CheckBranch(&code, b1, 3, 3);
+ CheckNop(&code, j1);
+
+ static int assembly[] = {0, 1, 2, 1};
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
+}
TEST(Rewire_diamond) {
constexpr size_t kBlockCount = 5;
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 7157dfdf39..ec801a8af4 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -6,6 +6,7 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/script-details.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
@@ -34,8 +35,7 @@ static Handle<JSFunction> Compile(const char* source) {
.ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- isolate, source_code, Compiler::ScriptDetails(),
- v8::ScriptOriginOptions(), nullptr, nullptr,
+ isolate, source_code, ScriptDetails(), nullptr, nullptr,
v8::ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 930084f61d..2974b82cb0 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -34,6 +34,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/script-details.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -1489,10 +1490,10 @@ TEST(CompilationCacheCachingBehavior) {
// The script should be in the cache now.
{
v8::HandleScope scope(CcTest::isolate());
+ ScriptDetails script_details(Handle<Object>(),
+ v8::ScriptOriginOptions(true, false));
MaybeHandle<SharedFunctionInfo> cached_script =
- compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(true, false),
- language_mode);
+ compilation_cache->LookupScript(source, script_details, language_mode);
CHECK(!cached_script.is_null());
}
@@ -1500,10 +1501,10 @@ TEST(CompilationCacheCachingBehavior) {
{
CcTest::CollectAllGarbage();
v8::HandleScope scope(CcTest::isolate());
+ ScriptDetails script_details(Handle<Object>(),
+ v8::ScriptOriginOptions(true, false));
MaybeHandle<SharedFunctionInfo> cached_script =
- compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(true, false),
- language_mode);
+ compilation_cache->LookupScript(source, script_details, language_mode);
CHECK(!cached_script.is_null());
// Progress code age until it's old and ready for GC.
@@ -1520,10 +1521,10 @@ TEST(CompilationCacheCachingBehavior) {
{
v8::HandleScope scope(CcTest::isolate());
// Ensure code aging cleared the entry from the cache.
+ ScriptDetails script_details(Handle<Object>(),
+ v8::ScriptOriginOptions(true, false));
MaybeHandle<SharedFunctionInfo> cached_script =
- compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(true, false),
- language_mode);
+ compilation_cache->LookupScript(source, script_details, language_mode);
CHECK(cached_script.is_null());
}
}
@@ -5317,7 +5318,7 @@ TEST(OldSpaceAllocationCounter) {
static void CheckLeak(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = CcTest::i_isolate();
Object message(
- *reinterpret_cast<Address*>(isolate->pending_message_obj_address()));
+ *reinterpret_cast<Address*>(isolate->pending_message_address()));
CHECK(message.IsTheHole(isolate));
}
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index 6f39298a33..f277c6d846 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -95,11 +95,8 @@ TEST(SimpleAllocate) {
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
- intptr_t sum = 0;
for (auto size : sizes) {
- if (AllocateFromLab(heap, &lab, size)) {
- sum += size;
- }
+ AllocateFromLab(heap, &lab, size);
}
}
VerifyIterable(base, limit, expected_sizes);
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 0e5a857b92..17b0909059 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -19,6 +19,8 @@
#ifdef V8_OS_POSIX
#include <dirent.h>
+#elif V8_OS_WIN
+#include <windows.h>
#endif
using v8::internal::interpreter::BytecodeExpectationsPrinter;
diff --git a/deps/v8/test/cctest/libsampler/signals-and-mutexes.cc b/deps/v8/test/cctest/libsampler/signals-and-mutexes.cc
index d642501be1..b52365bc1f 100644
--- a/deps/v8/test/cctest/libsampler/signals-and-mutexes.cc
+++ b/deps/v8/test/cctest/libsampler/signals-and-mutexes.cc
@@ -19,6 +19,8 @@ namespace sampler {
// https://stackoverflow.com/questions/22643374/deadlock-with-pthread-rwlock-t-and-signals
// This test reproduces it, and can be used to test if this problem is fixed in
// future Mac releases.
+// Note: For now, we fall back to using pthread_mutex_t to implement SharedMutex
+// on Mac, so this test succeeds.
#ifdef USE_SIGNALS
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index cb32668ac5..25fba193bb 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -22622,25 +22622,86 @@ TEST(ChainSignatureCheck) {
static const char* last_event_message;
-static int last_event_status;
+// See v8::LogEventStatus
+static v8::LogEventStatus last_event_status;
+static int event_count = 0;
void StoringEventLoggerCallback(const char* message, int status) {
last_event_message = message;
- last_event_status = status;
+ last_event_status = static_cast<v8::LogEventStatus>(status);
+ event_count++;
}
TEST(EventLogging) {
v8::Isolate* isolate = CcTest::isolate();
isolate->SetEventLogger(StoringEventLoggerCallback);
- v8::internal::HistogramTimer histogramTimer(
- "V8.Test", 0, 10000, v8::internal::HistogramTimerResolution::MILLISECOND,
+ v8::internal::NestedTimedHistogram histogram(
+ "V8.Test", 0, 10000, v8::internal::TimedHistogramResolution::MILLISECOND,
50, reinterpret_cast<v8::internal::Isolate*>(isolate)->counters());
- histogramTimer.Start();
- CHECK_EQ(0, strcmp("V8.Test", last_event_message));
- CHECK_EQ(0, last_event_status);
- histogramTimer.Stop();
+ event_count = 0;
+ int count = 0;
+ {
+ CHECK_EQ(0, event_count);
+ {
+ CHECK_EQ(0, event_count);
+ v8::internal::NestedTimedHistogramScope scope0(&histogram);
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
+ CHECK_EQ(v8::LogEventStatus::kStart, last_event_status);
+ CHECK_EQ(++count, event_count);
+ }
+ CHECK_EQ(v8::LogEventStatus::kEnd, last_event_status);
+ CHECK_EQ(++count, event_count);
+
+ v8::internal::NestedTimedHistogramScope scope1(&histogram);
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
+ CHECK_EQ(v8::LogEventStatus::kStart, last_event_status);
+ CHECK_EQ(++count, event_count);
+ {
+ CHECK_EQ(count, event_count);
+ v8::internal::NestedTimedHistogramScope scope2(&histogram);
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
+ CHECK_EQ(v8::LogEventStatus::kStart, last_event_status);
+ CHECK_EQ(++count, event_count);
+ {
+ CHECK_EQ(count, event_count);
+ v8::internal::NestedTimedHistogramScope scope3(&histogram);
+ CHECK_EQ(++count, event_count);
+ v8::internal::PauseNestedTimedHistogramScope scope4(&histogram);
+ // The outer timer scope is just paused, no event is emited yet.
+ CHECK_EQ(count, event_count);
+ {
+ CHECK_EQ(count, event_count);
+ v8::internal::NestedTimedHistogramScope scope5(&histogram);
+ v8::internal::NestedTimedHistogramScope scope5_1(&histogram);
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
+ CHECK_EQ(v8::LogEventStatus::kStart, last_event_status);
+ count++;
+ CHECK_EQ(++count, event_count);
+ }
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
+ CHECK_EQ(v8::LogEventStatus::kEnd, last_event_status);
+ count++;
+ CHECK_EQ(++count, event_count);
+ }
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
+ CHECK_EQ(v8::LogEventStatus::kEnd, last_event_status);
+ CHECK_EQ(++count, event_count);
+ v8::internal::PauseNestedTimedHistogramScope scope6(&histogram);
+ // The outer timer scope is just paused, no event is emited yet.
+ CHECK_EQ(count, event_count);
+ {
+ v8::internal::PauseNestedTimedHistogramScope scope7(&histogram);
+ CHECK_EQ(count, event_count);
+ }
+ CHECK_EQ(count, event_count);
+ }
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
+ CHECK_EQ(v8::LogEventStatus::kEnd, last_event_status);
+ CHECK_EQ(++count, event_count);
+ }
CHECK_EQ(0, strcmp("V8.Test", last_event_message));
- CHECK_EQ(1, last_event_status);
+ CHECK_EQ(v8::LogEventStatus::kEnd, last_event_status);
+ CHECK_EQ(++count, event_count);
}
TEST(PropertyDescriptor) {
@@ -24378,7 +24439,7 @@ TEST(CreateSyntheticModule) {
.IsUndefined());
CHECK_EQ(i_module->export_names().length(), 1);
CHECK(i::String::cast(i_module->export_names().get(0)).Equals(*default_name));
- CHECK_EQ(i_module->status(), i::Module::kInstantiated);
+ CHECK_EQ(i_module->status(), i::Module::kLinked);
CHECK(module->IsSyntheticModule());
CHECK(!module->IsSourceTextModule());
CHECK_EQ(module->GetModuleRequests()->Length(), 0);
@@ -28737,7 +28798,7 @@ TEST(FastApiCalls) {
#ifndef V8_LITE_MODE
namespace {
void FastCallback1TypedArray(v8::Local<v8::Object> receiver, int arg0,
- v8::FastApiTypedArray<double> arg1) {
+ const v8::FastApiTypedArray<double>& arg1) {
// TODO(mslekova): Use the TypedArray parameter
}
@@ -29251,3 +29312,57 @@ TEST(TestSetSabConstructorEnabledCallback) {
sab_constructor_enabled_value = true;
CHECK(i_isolate->IsSharedArrayBufferConstructorEnabled(i_context));
}
+
+namespace {
+void NodeTypeCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ args.GetReturnValue().Set(v8::Number::New(isolate, 1));
+}
+} // namespace
+
+TEST(EmbedderInstanceTypes) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ i::FLAG_embedder_instance_types = true;
+ Local<FunctionTemplate> node = FunctionTemplate::New(isolate);
+ Local<ObjectTemplate> proto_template = node->PrototypeTemplate();
+ Local<FunctionTemplate> nodeType = v8::FunctionTemplate::New(
+ isolate, NodeTypeCallback, Local<Value>(),
+ v8::Signature::New(isolate, node), 0, v8::ConstructorBehavior::kThrow,
+ v8::SideEffectType::kHasSideEffect, nullptr, 0, 1, 3);
+ proto_template->SetAccessorProperty(
+ String::NewFromUtf8Literal(isolate, "nodeType"), nodeType);
+
+ Local<FunctionTemplate> element = FunctionTemplate::New(
+ isolate, nullptr, Local<Value>(), Local<v8::Signature>(), 0,
+ v8::ConstructorBehavior::kAllow, v8::SideEffectType::kHasSideEffect,
+ nullptr, 1);
+ element->Inherit(node);
+
+ Local<FunctionTemplate> html_element = FunctionTemplate::New(
+ isolate, nullptr, Local<Value>(), Local<v8::Signature>(), 0,
+ v8::ConstructorBehavior::kAllow, v8::SideEffectType::kHasSideEffect,
+ nullptr, 2);
+ html_element->Inherit(element);
+
+ Local<FunctionTemplate> div_element = FunctionTemplate::New(
+ isolate, nullptr, Local<Value>(), Local<v8::Signature>(), 0,
+ v8::ConstructorBehavior::kAllow, v8::SideEffectType::kHasSideEffect,
+ nullptr, 3);
+ div_element->Inherit(html_element);
+
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("div"),
+ div_element->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust());
+
+ CompileRun("var x = div.nodeType;");
+
+ Local<Value> res =
+ env->Global()->Get(env.local(), v8_str("x")).ToLocalChecked();
+ CHECK_EQ(1, res->ToInt32(env.local()).ToLocalChecked()->Value());
+}
diff --git a/deps/v8/test/cctest/test-atomicops.cc b/deps/v8/test/cctest/test-atomicops.cc
index 3ab3ac7c37..9d28cd484e 100644
--- a/deps/v8/test/cctest/test-atomicops.cc
+++ b/deps/v8/test/cctest/test-atomicops.cc
@@ -286,5 +286,22 @@ TEST(Load) {
TestLoad<AtomicWord>();
}
+TEST(Relaxed_Memmove) {
+ constexpr size_t kLen = 6;
+ Atomic8 arr[kLen];
+ {
+ for (size_t i = 0; i < kLen; ++i) arr[i] = i;
+ Relaxed_Memmove(arr + 2, arr + 3, 2);
+ uint8_t expected[]{0, 1, 3, 4, 4, 5};
+ for (size_t i = 0; i < kLen; ++i) CHECK_EQ(arr[i], expected[i]);
+ }
+ {
+ for (size_t i = 0; i < kLen; ++i) arr[i] = i;
+ Relaxed_Memmove(arr + 3, arr + 2, 2);
+ uint8_t expected[]{0, 1, 2, 2, 3, 5};
+ for (size_t i = 0; i < kLen; ++i) CHECK_EQ(arr[i], expected[i]);
+ }
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 702046ade6..ee255d7b1f 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -27,18 +27,19 @@
#include <stdlib.h>
#include <wchar.h>
-#include <memory>
-#include "src/init/v8.h"
+#include <memory>
#include "include/v8-profiler.h"
#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/codegen/script-details.h"
#include "src/diagnostics/disasm.h"
#include "src/heap/factory.h"
#include "src/heap/spaces.h"
+#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/objects-inl.h"
@@ -72,8 +73,7 @@ static Handle<JSFunction> Compile(const char* source) {
.ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- isolate, source_code, Compiler::ScriptDetails(),
- v8::ScriptOriginOptions(), nullptr, nullptr,
+ isolate, source_code, ScriptDetails(), nullptr, nullptr,
v8::ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index b822b02828..342dd46d53 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -70,7 +70,9 @@ static void CheckMigrationTarget(Isolate* isolate, Map old_map, Map new_map) {
.GetMigrationTarget();
if (target.is_null()) return;
CHECK_EQ(new_map, target);
- CHECK_EQ(Map::TryUpdateSlow(isolate, old_map), target);
+ CHECK_EQ(MapUpdater::TryUpdateNoLock(isolate, old_map,
+ ConcurrencyMode::kNotConcurrent),
+ target);
}
class Expectations {
@@ -1831,8 +1833,8 @@ static void TestReconfigureElementsKind_GeneralizeFieldInPlace(
{
MapHandles map_list;
map_list.push_back(updated_map);
- Map transitioned_map =
- map2->FindElementsKindTransitionedMap(isolate, map_list);
+ Map transitioned_map = map2->FindElementsKindTransitionedMap(
+ isolate, map_list, ConcurrencyMode::kNotConcurrent);
CHECK_EQ(*updated_map, transitioned_map);
}
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 743cb6e8f3..dc23a8f601 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -2358,7 +2358,7 @@ TEST(HiddenPropertiesFastCase) {
GetProperty(isolate, global, v8::HeapGraphEdge::kProperty, "c");
CHECK(c);
const v8::HeapGraphNode* hidden_props =
- GetProperty(isolate, global, v8::HeapGraphEdge::kProperty, "<symbol>");
+ GetProperty(isolate, c, v8::HeapGraphEdge::kProperty, "<symbol key>");
CHECK(!hidden_props);
v8::Local<v8::Value> cHandle =
@@ -2377,10 +2377,32 @@ TEST(HiddenPropertiesFastCase) {
c = GetProperty(isolate, global, v8::HeapGraphEdge::kProperty, "c");
CHECK(c);
hidden_props =
- GetProperty(isolate, c, v8::HeapGraphEdge::kProperty, "<symbol>");
+ GetProperty(isolate, c, v8::HeapGraphEdge::kProperty, "<symbol key>");
CHECK(hidden_props);
}
+TEST(SymbolsAndPrivateClassFields) {
+ v8::Isolate* isolate = CcTest::isolate();
+ LocalContext env;
+ v8::HandleScope scope(isolate);
+ v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+
+ CompileRun(
+ "class C { #private = this; [Symbol('MySymbol')] = this; };\n"
+ "c = new C;\n");
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* c =
+ GetProperty(isolate, global, v8::HeapGraphEdge::kProperty, "c");
+ CHECK(c);
+ const v8::HeapGraphNode* prop;
+ prop = GetProperty(isolate, c, v8::HeapGraphEdge::kProperty, "#private");
+ CHECK(prop);
+ prop = GetProperty(isolate, c, v8::HeapGraphEdge::kProperty,
+ "<symbol MySymbol>");
+ CHECK(prop);
+}
TEST(AccessorInfo) {
LocalContext env;
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index 529701c227..f68789df2c 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -189,15 +189,23 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWriteExecute));
- SwitchMemoryPermissionsToWritable();
- FloodWithInc(isolate, buffer.get());
- FlushInstructionCache(buffer->start(), buffer->size());
- SwitchMemoryPermissionsToExecutable();
+ {
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+ // Make sure to switch memory to writable on M1 hardware.
+ wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
+#endif
+ FloodWithInc(isolate, buffer.get());
+ FlushInstructionCache(buffer->start(), buffer->size());
+ }
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
- SwitchMemoryPermissionsToWritable();
- FloodWithNop(isolate, buffer.get());
- FlushInstructionCache(buffer->start(), buffer->size());
- SwitchMemoryPermissionsToExecutable();
+ {
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+ // Make sure to switch memory to writable on M1 hardware.
+ wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
+#endif
+ FloodWithNop(isolate, buffer.get());
+ FlushInstructionCache(buffer->start(), buffer->size());
+ }
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 1abdfa10e9..b78052c9cd 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -33,6 +33,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/script-details.h"
#include "src/common/assert-scope.h"
#include "src/debug/debug.h"
#include "src/heap/heap-inl.h"
@@ -1570,30 +1571,30 @@ int CountBuiltins() {
}
static Handle<SharedFunctionInfo> CompileScript(
- Isolate* isolate, Handle<String> source, Handle<String> name,
- ScriptData* cached_data, v8::ScriptCompiler::CompileOptions options) {
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData* cached_data,
+ v8::ScriptCompiler::CompileOptions options) {
return Compiler::GetSharedFunctionInfoForScript(
- isolate, source, Compiler::ScriptDetails(name),
- v8::ScriptOriginOptions(), nullptr, cached_data, options,
+ isolate, source, script_details, nullptr, cached_data, options,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
}
static Handle<SharedFunctionInfo> CompileScriptAndProduceCache(
- Isolate* isolate, Handle<String> source, Handle<String> name,
- ScriptData** script_data, v8::ScriptCompiler::CompileOptions options) {
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData** out_cached_data,
+ v8::ScriptCompiler::CompileOptions options) {
Handle<SharedFunctionInfo> sfi =
Compiler::GetSharedFunctionInfoForScript(
- isolate, source, Compiler::ScriptDetails(name),
- v8::ScriptOriginOptions(), nullptr, nullptr, options,
+ isolate, source, script_details, nullptr, nullptr, options,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
std::unique_ptr<ScriptCompiler::CachedData> cached_data(
ScriptCompiler::CreateCodeCache(ToApiHandle<UnboundScript>(sfi)));
uint8_t* buffer = NewArray<uint8_t>(cached_data->length);
MemCopy(buffer, cached_data->data, cached_data->length);
- *script_data = new i::ScriptData(buffer, cached_data->length);
- (*script_data)->AcquireDataOwnership();
+ *out_cached_data = new i::AlignedCachedData(buffer, cached_data->length);
+ (*out_cached_data)->AcquireDataOwnership();
return sfi;
}
@@ -1619,20 +1620,21 @@ TEST(CodeSerializerWithProfiler) {
CHECK(!orig_source.is_identical_to(copy_source));
CHECK(orig_source->Equals(*copy_source));
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
+ ScriptDetails default_script_details;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, orig_source, Handle<String>(), &cache,
+ isolate, orig_source, default_script_details, &cache,
v8::ScriptCompiler::kNoCompileOptions);
CHECK(!orig->GetBytecodeArray(isolate).HasSourcePositionTable());
- isolate->set_is_profiling(true);
+ isolate->SetIsProfiling(true);
// This does not assert that no compilation can happen as source position
// collection could trigger it.
Handle<SharedFunctionInfo> copy =
- CompileScript(isolate, copy_source, Handle<String>(), cache,
+ CompileScript(isolate, copy_source, default_script_details, cache,
v8::ScriptCompiler::kConsumeCodeCache);
// Since the profiler is now enabled, source positions should be collected
@@ -1661,10 +1663,11 @@ void TestCodeSerializerOnePlusOneImpl(bool verify_builtins_count = true) {
CHECK(!orig_source.is_identical_to(copy_source));
CHECK(orig_source->Equals(*copy_source));
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
+ ScriptDetails default_script_details;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, orig_source, Handle<String>(), &cache,
+ isolate, orig_source, default_script_details, &cache,
v8::ScriptCompiler::kNoCompileOptions);
int builtins_count = CountBuiltins();
@@ -1672,7 +1675,7 @@ void TestCodeSerializerOnePlusOneImpl(bool verify_builtins_count = true) {
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, copy_source, Handle<String>(), cache,
+ copy = CompileScript(isolate, copy_source, default_script_details, cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
@@ -1719,23 +1722,126 @@ TEST(CodeSerializerPromotedToCompilationCache) {
const char* source = "1 + 1";
- Handle<String> src = isolate->factory()
- ->NewStringFromUtf8(base::CStrVector(source))
- .ToHandleChecked();
- ScriptData* cache = nullptr;
-
- CompileScriptAndProduceCache(isolate, src, src, &cache,
+ Handle<String> src = isolate->factory()->NewStringFromAsciiChecked(source);
+ AlignedCachedData* cache = nullptr;
+
+ Handle<FixedArray> default_host_defined_options =
+ isolate->factory()->NewFixedArray(2);
+ default_host_defined_options->set(0, Smi::FromInt(0));
+ const char* default_host_defined_option_1_string = "custom string";
+ Handle<String> default_host_defined_option_1 =
+ isolate->factory()->NewStringFromAsciiChecked(
+ default_host_defined_option_1_string);
+ default_host_defined_options->set(1, *default_host_defined_option_1);
+
+ ScriptDetails default_script_details(src);
+ default_script_details.host_defined_options = default_host_defined_options;
+ CompileScriptAndProduceCache(isolate, src, default_script_details, &cache,
v8::ScriptCompiler::kNoCompileOptions);
DisallowCompilation no_compile_expected(isolate);
- Handle<SharedFunctionInfo> copy = CompileScript(
- isolate, src, src, cache, v8::ScriptCompiler::kConsumeCodeCache);
+ Handle<SharedFunctionInfo> copy =
+ CompileScript(isolate, src, default_script_details, cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
- MaybeHandle<SharedFunctionInfo> shared =
- isolate->compilation_cache()->LookupScript(
- src, src, 0, 0, v8::ScriptOriginOptions(), LanguageMode::kSloppy);
+ {
+ ScriptDetails script_details(src);
+ script_details.host_defined_options =
+ default_script_details.host_defined_options;
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK_EQ(*shared.ToHandleChecked(), *copy);
+ }
- CHECK(*shared.ToHandleChecked() == *copy);
+ {
+ // Lookup with strictly equal host_defined_options should succeed:
+ ScriptDetails script_details(src);
+ Handle<FixedArray> host_defined_options =
+ isolate->factory()->NewFixedArray(2);
+ host_defined_options->set(0, default_host_defined_options->get(0));
+ Handle<String> host_defined_option_1 =
+ isolate->factory()->NewStringFromAsciiChecked(
+ default_host_defined_option_1_string);
+ host_defined_options->set(1, *host_defined_option_1);
+ script_details.host_defined_options = host_defined_options;
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK_EQ(*shared.ToHandleChecked(), *copy);
+ }
+
+ {
+ // Lookup with different string with same contents should succeed:
+ ScriptDetails script_details(
+ isolate->factory()->NewStringFromAsciiChecked(source));
+ script_details.host_defined_options =
+ default_script_details.host_defined_options;
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK_EQ(*shared.ToHandleChecked(), *copy);
+ }
+
+ {
+ // Lookup with different string should fail:
+ ScriptDetails script_details(
+ isolate->factory()->NewStringFromAsciiChecked("other"));
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK(shared.is_null());
+ }
+
+ {
+ // Lookup with different position should fail:
+ ScriptDetails script_details(src);
+ script_details.line_offset = 0xFF;
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK(shared.is_null());
+ }
+
+ {
+ // Lookup with different position should fail:
+ ScriptDetails script_details(src);
+ script_details.column_offset = 0xFF;
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK(shared.is_null());
+ }
+
+ {
+ // Lookup with different language mode should fail:
+ ScriptDetails script_details(src);
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kStrict);
+ CHECK(shared.is_null());
+ }
+
+ {
+ // Lookup with different script_options should fail
+ ScriptOriginOptions origin_options(false, true);
+ CHECK_NE(ScriptOriginOptions().Flags(), origin_options.Flags());
+ ScriptDetails script_details(src, origin_options);
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK(shared.is_null());
+ }
+
+ {
+ // Lookup with different host_defined_options should fail:
+ ScriptDetails script_details(src);
+ script_details.host_defined_options = isolate->factory()->NewFixedArray(5);
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(src, script_details,
+ LanguageMode::kSloppy);
+ CHECK(shared.is_null());
+ }
delete cache;
}
@@ -1761,9 +1867,9 @@ TEST(CodeSerializerInternalizedString) {
Handle<JSObject> global(isolate->context().global_object(), isolate);
- i::ScriptData* script_data = nullptr;
+ i::AlignedCachedData* cached_data = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, orig_source, Handle<String>(), &script_data,
+ isolate, orig_source, ScriptDetails(), &cached_data,
v8::ScriptCompiler::kNoCompileOptions);
Handle<JSFunction> orig_fun =
Factory::JSFunctionBuilder{isolate, orig, isolate->native_context()}
@@ -1777,7 +1883,7 @@ TEST(CodeSerializerInternalizedString) {
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, copy_source, Handle<String>(), script_data,
+ copy = CompileScript(isolate, copy_source, ScriptDetails(), cached_data,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -1796,7 +1902,7 @@ TEST(CodeSerializerInternalizedString) {
CHECK(Handle<String>::cast(copy_result)->Equals(*expected));
CHECK_EQ(builtins_count, CountBuiltins());
- delete script_data;
+ delete cached_data;
}
TEST(CodeSerializerLargeCodeObject) {
@@ -1820,18 +1926,18 @@ TEST(CodeSerializerLargeCodeObject) {
isolate->factory()->NewStringFromUtf8(source).ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
- Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kNoCompileOptions);
+ Handle<SharedFunctionInfo> orig =
+ CompileScriptAndProduceCache(isolate, source_str, ScriptDetails(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
CHECK(isolate->heap()->InSpace(orig->abstract_code(isolate), LO_SPACE));
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, source_str, Handle<String>(), cache,
+ copy = CompileScript(isolate, source_str, ScriptDetails(), cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -1887,11 +1993,11 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
}
Handle<JSObject> global(isolate->context().global_object(), isolate);
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
- Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kNoCompileOptions);
+ Handle<SharedFunctionInfo> orig =
+ CompileScriptAndProduceCache(isolate, source_str, ScriptDetails(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
CHECK(heap->InSpace(orig->abstract_code(isolate), LO_SPACE));
@@ -1906,7 +2012,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, source_str, Handle<String>(), cache,
+ copy = CompileScript(isolate, source_str, ScriptDetails(), cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -1952,16 +2058,16 @@ TEST(CodeSerializerLargeStrings) {
.ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
- Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kNoCompileOptions);
+ Handle<SharedFunctionInfo> orig =
+ CompileScriptAndProduceCache(isolate, source_str, ScriptDetails(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, source_str, Handle<String>(), cache,
+ copy = CompileScript(isolate, source_str, ScriptDetails(), cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -2026,16 +2132,16 @@ TEST(CodeSerializerThreeBigStrings) {
.ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
- Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kNoCompileOptions);
+ Handle<SharedFunctionInfo> orig =
+ CompileScriptAndProduceCache(isolate, source_str, ScriptDetails(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, source_str, Handle<String>(), cache,
+ copy = CompileScript(isolate, source_str, ScriptDetails(), cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -2145,16 +2251,16 @@ TEST(CodeSerializerExternalString) {
.ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, source_string, Handle<String>(), &cache,
+ isolate, source_string, ScriptDetails(), &cache,
v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, source_string, Handle<String>(), cache,
+ copy = CompileScript(isolate, source_string, ScriptDetails(), cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -2209,16 +2315,16 @@ TEST(CodeSerializerLargeExternalString) {
.ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
- Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
- isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kNoCompileOptions);
+ Handle<SharedFunctionInfo> orig =
+ CompileScriptAndProduceCache(isolate, source_str, ScriptDetails(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, source_str, Handle<String>(), cache,
+ copy = CompileScript(isolate, source_str, ScriptDetails(), cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -2263,16 +2369,16 @@ TEST(CodeSerializerExternalScriptName) {
CHECK(!name->IsInternalizedString());
Handle<JSObject> global(isolate->context().global_object(), isolate);
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScriptAndProduceCache(isolate, source_string, name, &cache,
- v8::ScriptCompiler::kNoCompileOptions);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, source_string, ScriptDetails(name), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, source_string, name, cache,
+ copy = CompileScript(isolate, source_string, ScriptDetails(name), cache,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -2631,11 +2737,11 @@ TEST(Regress503552) {
HandleScope scope(isolate);
Handle<String> source = isolate->factory()->NewStringFromAsciiChecked(
"function f() {} function g() {}");
- ScriptData* script_data = nullptr;
+ AlignedCachedData* cached_data = nullptr;
Handle<SharedFunctionInfo> shared = CompileScriptAndProduceCache(
- isolate, source, Handle<String>(), &script_data,
+ isolate, source, ScriptDetails(), &cached_data,
v8::ScriptCompiler::kNoCompileOptions);
- delete script_data;
+ delete cached_data;
heap::SimulateIncrementalMarking(isolate->heap());
@@ -4020,14 +4126,16 @@ TEST(WeakArraySerializationInCodeCache) {
Handle<String> src = isolate->factory()
->NewStringFromUtf8(base::CStrVector(source))
.ToHandleChecked();
- ScriptData* cache = nullptr;
+ AlignedCachedData* cache = nullptr;
- CompileScriptAndProduceCache(isolate, src, src, &cache,
+ ScriptDetails script_details(src);
+ CompileScriptAndProduceCache(isolate, src, script_details, &cache,
v8::ScriptCompiler::kNoCompileOptions);
DisallowCompilation no_compile_expected(isolate);
- Handle<SharedFunctionInfo> copy = CompileScript(
- isolate, src, src, cache, v8::ScriptCompiler::kConsumeCodeCache);
+ Handle<SharedFunctionInfo> copy =
+ CompileScript(isolate, src, script_details, cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
// Verify that the pointers in shared_function_infos are weak.
WeakFixedArray sfis = Script::cast(copy->script()).shared_function_infos();
diff --git a/deps/v8/test/cctest/test-stack-unwinding-win64.cc b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
index cd0243723b..138f4822fa 100644
--- a/deps/v8/test/cctest/test-stack-unwinding-win64.cc
+++ b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
@@ -12,6 +12,11 @@
#define CONTEXT_PC(context) (context.Pc)
#endif
+#include <windows.h>
+
+// This has to come after windows.h.
+#include <versionhelpers.h> // For IsWindows8OrGreater().
+
class UnwindingWin64Callbacks {
public:
UnwindingWin64Callbacks() = default;
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index dabd7b0dfe..93f899d559 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -239,6 +239,14 @@ TEST(TerminateBigIntToString) {
"fail();");
}
+TEST(TerminateBigIntFromString) {
+ TestTerminatingSlowOperation(
+ "var a = '12344567890'.repeat(10000);\n"
+ "terminate();\n"
+ "BigInt(a);\n"
+ "fail();\n");
+}
+
int call_count = 0;
diff --git a/deps/v8/test/cctest/test-web-snapshots.cc b/deps/v8/test/cctest/test-web-snapshots.cc
index c616b8428e..56c79d075a 100644
--- a/deps/v8/test/cctest/test-web-snapshots.cc
+++ b/deps/v8/test/cctest/test-web-snapshots.cc
@@ -366,8 +366,65 @@ TEST(SFIDeduplication) {
}
}
+TEST(SFIDeduplicationClasses) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ WebSnapshotData snapshot_data;
+ {
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+ const char* snapshot_source =
+ "let foo = {};\n"
+ "foo.create = function(a) {\n"
+ " return class {\n"
+ " constructor(x) {this.x = x;};\n"
+ " }\n"
+ "}\n"
+ "foo.class = foo.create('hi');";
+
+ CompileRun(snapshot_source);
+ v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
+ v8::Local<v8::String> str =
+ v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
+ exports->Set(isolate, 0, str);
+ WebSnapshotSerializer serializer(isolate);
+ CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
+ CHECK(!serializer.has_error());
+ CHECK_NOT_NULL(snapshot_data.buffer);
+ }
+
+ {
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+ WebSnapshotDeserializer deserializer(isolate);
+ CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
+ snapshot_data.buffer_size));
+ CHECK(!deserializer.has_error());
+
+ const char* get_class = "foo.class";
+ const char* create_new_class = "foo.create()";
+
+ // Verify that foo.inner and the JSFunction which is the result of calling
+ // foo.outer() after deserialization share the SFI.
+ v8::Local<v8::Function> v8_class1 =
+ CompileRun(get_class).As<v8::Function>();
+ v8::Local<v8::Function> v8_class2 =
+ CompileRun(create_new_class).As<v8::Function>();
+
+ Handle<JSFunction> class1 =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class1));
+ Handle<JSFunction> class2 =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class2));
+
+ CHECK_EQ(class1->shared(), class2->shared());
+ }
+}
+
TEST(SFIDeduplicationAfterBytecodeFlushing) {
- FLAG_stress_flush_bytecode = true;
+ FLAG_stress_flush_code = true;
+ FLAG_flush_bytecode = true;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -451,6 +508,92 @@ TEST(SFIDeduplicationAfterBytecodeFlushing) {
}
}
+TEST(SFIDeduplicationAfterBytecodeFlushingClasses) {
+ FLAG_stress_flush_code = true;
+ FLAG_flush_bytecode = true;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+
+ WebSnapshotData snapshot_data;
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+
+ const char* snapshot_source =
+ "let foo = {};\n"
+ "foo.create = function(a) {\n"
+ " return class {\n"
+ " constructor(x) {this.x = x;};\n"
+ " }\n"
+ "}\n"
+ "foo.class = foo.create('hi');";
+
+ CompileRun(snapshot_source);
+
+ v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
+ v8::Local<v8::String> str =
+ v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
+ exports->Set(isolate, 0, str);
+ WebSnapshotSerializer serializer(isolate);
+ CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
+ CHECK(!serializer.has_error());
+ CHECK_NOT_NULL(snapshot_data.buffer);
+ }
+
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+ WebSnapshotDeserializer deserializer(isolate);
+ CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
+ snapshot_data.buffer_size));
+ CHECK(!deserializer.has_error());
+
+ const char* get_create = "foo.create";
+ const char* get_class = "foo.class";
+ const char* create_new_class = "foo.create()";
+
+ v8::Local<v8::Function> v8_create =
+ CompileRun(get_create).As<v8::Function>();
+ Handle<JSFunction> create =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*v8_create));
+ CHECK(!create->shared().is_compiled());
+
+ v8::Local<v8::Function> v8_class1 =
+ CompileRun(get_class).As<v8::Function>();
+ v8::Local<v8::Function> v8_class2 =
+ CompileRun(create_new_class).As<v8::Function>();
+
+ Handle<JSFunction> class1 =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class1));
+ Handle<JSFunction> class2 =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class2));
+
+ CHECK(create->shared().is_compiled());
+ CHECK_EQ(class1->shared(), class2->shared());
+
+ // Force bytecode flushing of "foo.outer".
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+
+ CHECK(!create->shared().is_compiled());
+
+ // Create another inner function.
+ v8::Local<v8::Function> v8_class3 =
+ CompileRun(create_new_class).As<v8::Function>();
+ Handle<JSFunction> class3 =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class3));
+
+ // Check that it shares the SFI with the original inner function which is in
+ // the snapshot.
+ CHECK_EQ(class1->shared(), class3->shared());
+ }
+}
+
TEST(SFIDeduplicationOfFunctionsNotInSnapshot) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index f819fac2ca..e671d247ce 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -159,8 +159,8 @@ void CompileJumpTableThunk(Address thunk, Address jump_target) {
FlushInstructionCache(thunk, kThunkBufferSize);
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// MacOS on arm64 refuses {mprotect} calls to toggle permissions of RWX
- // memory. Simply do nothing here, and rely on
- // {SwitchMemoryPermissionsToExecutable} in the JumpTableRunner.
+ // memory. Simply do nothing here, as the space will by default be executable
+ // and non-writable for the JumpTableRunner.
#else
CHECK(SetPermissions(GetPlatformPageAllocator(), thunk, kThunkBufferSize,
v8::PageAllocator::kReadExecute));
@@ -176,7 +176,6 @@ class JumpTableRunner : public v8::base::Thread {
void Run() override {
TRACE("Runner #%d is starting ...\n", runner_id_);
- SwitchMemoryPermissionsToExecutable();
GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
TRACE("Runner #%d is stopping ...\n", runner_id_);
USE(runner_id_);
@@ -199,7 +198,10 @@ class JumpTablePatcher : public v8::base::Thread {
void Run() override {
TRACE("Patcher %p is starting ...\n", this);
- SwitchMemoryPermissionsToWritable();
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+ // Make sure to switch memory to writable on M1 hardware.
+ CodeSpaceWriteScope code_space_write_scope(nullptr);
+#endif
Address slot_address =
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
// First, emit code to the two thunks.
@@ -250,7 +252,6 @@ TEST(JumpTablePatchingStress) {
std::bitset<kAvailableBufferSlots> used_thunk_slots;
buffer->MakeWritableAndExecutable();
- SwitchMemoryPermissionsToWritable();
// Iterate through jump-table slots to hammer at different alignments within
// the jump-table, thereby increasing stress for variable-length ISAs.
@@ -259,22 +260,29 @@ TEST(JumpTablePatchingStress) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
- // Patch the jump table slot to jump to itself. This will later be patched
- // by the patchers.
- Address slot_addr =
- slot_start + JumpTableAssembler::JumpSlotIndexToOffset(slot);
- JumpTableAssembler::PatchJumpTableSlot(slot_addr, kNullAddress, slot_addr);
- // For each patcher, generate two thunks where this patcher can emit code
- // which finally jumps back to {slot} in the jump table.
std::vector<Address> patcher_thunks;
- for (int i = 0; i < 2 * kNumberOfPatcherThreads; ++i) {
- Address thunk =
- AllocateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
- &used_thunk_slots, &thunk_buffers);
- ZapCode(thunk, kThunkBufferSize);
- patcher_thunks.push_back(thunk);
- TRACE(" generated jump thunk: " V8PRIxPTR_FMT "\n",
- patcher_thunks.back());
+ {
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+ // Make sure to switch memory to writable on M1 hardware.
+ CodeSpaceWriteScope code_space_write_scope(nullptr);
+#endif
+ // Patch the jump table slot to jump to itself. This will later be patched
+ // by the patchers.
+ Address slot_addr =
+ slot_start + JumpTableAssembler::JumpSlotIndexToOffset(slot);
+ JumpTableAssembler::PatchJumpTableSlot(slot_addr, kNullAddress,
+ slot_addr);
+ // For each patcher, generate two thunks where this patcher can emit code
+ // which finally jumps back to {slot} in the jump table.
+ for (int i = 0; i < 2 * kNumberOfPatcherThreads; ++i) {
+ Address thunk =
+ AllocateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
+ &used_thunk_slots, &thunk_buffers);
+ ZapCode(thunk, kThunkBufferSize);
+ patcher_thunks.push_back(thunk);
+ TRACE(" generated jump thunk: " V8PRIxPTR_FMT "\n",
+ patcher_thunks.back());
+ }
}
// Start multiple runner threads that execute the jump table slot
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index ee93220bb1..2e3cdf48ca 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -46,10 +46,10 @@ class LiftoffCompileEnvironment {
WasmFeatures detected2;
WasmCompilationResult result1 = ExecuteLiftoffCompilation(
&env, test_func.body, test_func.code->index(), kNoDebugging,
- isolate_->counters(), &detected1);
+ LiftoffOptions{}.set_detected_features(&detected1));
WasmCompilationResult result2 = ExecuteLiftoffCompilation(
&env, test_func.body, test_func.code->index(), kNoDebugging,
- isolate_->counters(), &detected2);
+ LiftoffOptions{}.set_detected_features(&detected2));
CHECK(result1.succeeded());
CHECK(result2.succeeded());
@@ -71,11 +71,12 @@ class LiftoffCompileEnvironment {
auto test_func = AddFunction(return_types, param_types, raw_function_bytes);
CompilationEnv env = wasm_runner_.builder().CreateCompilationEnv();
- WasmFeatures detected;
std::unique_ptr<DebugSideTable> debug_side_table_via_compilation;
auto result = ExecuteLiftoffCompilation(
- &env, test_func.body, 0, kForDebugging, nullptr, &detected,
- base::VectorOf(breakpoints), &debug_side_table_via_compilation);
+ &env, test_func.body, 0, kForDebugging,
+ LiftoffOptions{}
+ .set_breakpoints(base::VectorOf(breakpoints))
+ .set_debug_sidetable(&debug_side_table_via_compilation));
CHECK(result.succeeded());
// If there are no breakpoint, then {ExecuteLiftoffCompilation} should
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 1eddee2e6d..366a614cc0 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -1053,35 +1053,41 @@ WASM_EXEC_TEST(BrTable_loop_target) {
CHECK_EQ(1, r.Call(0));
}
-WASM_EXEC_TEST(F32ReinterpretI32) {
+WASM_EXEC_TEST(I32ReinterpretF32) {
WasmRunner<int32_t> r(execution_tier);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+ float* memory =
+ r.builder().AddMemoryElems<float>(kWasmPageSize / sizeof(float));
BUILD(r, WASM_I32_REINTERPRET_F32(
WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)));
- FOR_INT32_INPUTS(i) {
- int32_t expected = i;
- r.builder().WriteMemory(&memory[0], expected);
+ FOR_FLOAT32_INPUTS(i) {
+ float input = i;
+ int32_t expected = bit_cast<int32_t, float>(input);
+ r.builder().WriteMemory(&memory[0], input);
CHECK_EQ(expected, r.Call());
}
}
-WASM_EXEC_TEST(I32ReinterpretF32) {
- WasmRunner<int32_t, int32_t> r(execution_tier);
+WASM_EXEC_TEST(F32ReinterpretI32) {
+ WasmRunner<float> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r,
- WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
- WASM_F32_REINTERPRET_I32(WASM_LOCAL_GET(0))),
- WASM_I32V_2(107));
+ BUILD(r, WASM_F32_REINTERPRET_I32(
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
FOR_INT32_INPUTS(i) {
- int32_t expected = i;
- CHECK_EQ(107, r.Call(expected));
- CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+ int32_t input = i;
+ float expected = bit_cast<float, int32_t>(input);
+ r.builder().WriteMemory(&memory[0], input);
+ float result = r.Call();
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(result));
+ CHECK(IsSameNan(expected, result));
+ } else {
+ CHECK_EQ(expected, result);
+ }
}
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 940a42beaa..3c3ba34d5a 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -21,6 +21,25 @@ namespace v8 {
namespace internal {
namespace wasm {
+// Helper Functions.
+bool IsSameNan(float expected, float actual) {
+ // Sign is non-deterministic.
+ uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
+ uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
+ // Some implementations convert signaling NaNs to quiet NaNs.
+ return (expected_bits == actual_bits) ||
+ ((expected_bits | 0x00400000) == actual_bits);
+}
+
+bool IsSameNan(double expected, double actual) {
+ // Sign is non-deterministic.
+ uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
+ uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
+ // Some implementations convert signaling NaNs to quiet NaNs.
+ return (expected_bits == actual_bits) ||
+ ((expected_bits | 0x0008000000000000) == actual_bits);
+}
+
TestingModuleBuilder::TestingModuleBuilder(
Zone* zone, ManuallyImportedJSFunction* maybe_import,
TestExecutionTier tier, RuntimeExceptionSupport exception_support,
@@ -246,12 +265,12 @@ uint32_t TestingModuleBuilder::AddBytes(base::Vector<const byte> bytes) {
uint32_t TestingModuleBuilder::AddException(const FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
- uint32_t index = static_cast<uint32_t>(test_module_->exceptions.size());
- test_module_->exceptions.push_back(WasmException{sig});
+ uint32_t index = static_cast<uint32_t>(test_module_->tags.size());
+ test_module_->tags.push_back(WasmTag{sig});
Handle<WasmExceptionTag> tag = WasmExceptionTag::New(isolate_, index);
- Handle<FixedArray> table(instance_object_->exceptions_table(), isolate_);
+ Handle<FixedArray> table(instance_object_->tags_table(), isolate_);
table = isolate_->factory()->CopyFixedArrayAndGrow(table, 1);
- instance_object_->set_exceptions_table(*table);
+ instance_object_->set_tags_table(*table);
table->set(index, *tag);
return index;
}
@@ -351,7 +370,7 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
native_module_->ReserveCodeTableForTesting(kMaxFunctions);
auto instance = WasmInstanceObject::New(isolate_, module_object);
- instance->set_exceptions_table(*isolate_->factory()->empty_fixed_array());
+ instance->set_tags_table(*isolate_->factory()->empty_fixed_array());
instance->set_globals_start(globals_data_);
return instance;
}
@@ -549,21 +568,20 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
ForDebugging for_debugging =
native_module->IsTieredDown() ? kForDebugging : kNoDebugging;
- WasmFeatures unused_detected_features;
-
base::Optional<WasmCompilationResult> result;
if (builder_->test_execution_tier() ==
TestExecutionTier::kLiftoffForFuzzing) {
result.emplace(ExecuteLiftoffCompilation(
&env, func_body, function_->func_index, kForDebugging,
- isolate()->counters(), &unused_detected_features, {}, nullptr, 0,
- builder_->max_steps_ptr(), builder_->non_determinism_ptr()));
+ LiftoffOptions{}
+ .set_max_steps(builder_->max_steps_ptr())
+ .set_nondeterminism(builder_->non_determinism_ptr())));
} else {
WasmCompilationUnit unit(function_->func_index, builder_->execution_tier(),
for_debugging);
result.emplace(unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage().get(),
- isolate()->counters(), &unused_detected_features));
+ nullptr, nullptr));
}
WasmCode* code = native_module->PublishCode(
native_module->AddCompiledCode(std::move(*result)));
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 27e78a1737..8f6bb6074f 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -96,6 +96,10 @@ struct ManuallyImportedJSFunction {
Handle<JSFunction> js_function;
};
+// Helper Functions.
+bool IsSameNan(float expected, float actual);
+bool IsSameNan(double expected, double actual);
+
// A Wasm module builder. Globals are pre-set, however, memory and code may be
// progressively added by a test. In turn, we piecemeal update the runtime
// objects, i.e. {WasmInstanceObject}, {WasmModuleObject} and, if necessary,
@@ -439,7 +443,8 @@ class WasmRunnerBase : public InitializedHandleScope {
const char* name = nullptr) {
functions_.emplace_back(
new WasmFunctionCompiler(&zone_, sig, &builder_, name));
- builder().AddSignature(sig);
+ byte sig_index = builder().AddSignature(sig);
+ functions_.back()->SetSigIndex(sig_index);
return *functions_.back();
}
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
index 43b73f4bd0..aa6b755d0d 100644
--- a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
@@ -401,15 +401,6 @@ bool IsExtreme(float x) {
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
}
-bool IsSameNan(float expected, float actual) {
- // Sign is non-deterministic.
- uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
- uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
- // Some implementations convert signaling NaNs to quiet NaNs.
- return (expected_bits == actual_bits) ||
- ((expected_bits | 0x00400000) == actual_bits);
-}
-
bool IsCanonical(float actual) {
uint32_t actual_bits = bit_cast<uint32_t>(actual);
// Canonical NaN has quiet bit and no payload.
@@ -574,15 +565,6 @@ bool IsExtreme(double x) {
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
}
-bool IsSameNan(double expected, double actual) {
- // Sign is non-deterministic.
- uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
- uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
- // Some implementations convert signaling NaNs to quiet NaNs.
- return (expected_bits == actual_bits) ||
- ((expected_bits | 0x0008000000000000) == actual_bits);
-}
-
bool IsCanonical(double actual) {
uint64_t actual_bits = bit_cast<uint64_t>(actual);
// Canonical NaN has quiet bit and no payload.
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.h b/deps/v8/test/cctest/wasm/wasm-simd-utils.h
index 140acda05c..d7698f2060 100644
--- a/deps/v8/test/cctest/wasm/wasm-simd-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.h
@@ -139,13 +139,11 @@ bool PlatformCanRepresent(T x) {
// Returns true for very small and very large numbers. We skip these test
// values for the approximation instructions, which don't work at the extremes.
bool IsExtreme(float x);
-bool IsSameNan(float expected, float actual);
bool IsCanonical(float actual);
void CheckFloatResult(float x, float y, float expected, float actual,
bool exact = true);
bool IsExtreme(double x);
-bool IsSameNan(double expected, double actual);
bool IsCanonical(double actual);
void CheckDoubleResult(double x, double y, double expected, double actual,
bool exact = true);
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index cb12fa87cb..84871cccb6 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -607,7 +607,7 @@ class SideTable : public ZoneObject {
const int32_t stack_height;
};
struct CatchTarget {
- int exception_index;
+ int tag_index;
int target_control_index;
const byte* pc;
};
@@ -628,8 +628,8 @@ class SideTable : public ZoneObject {
target = pc;
}
- void Bind(const byte* pc, int exception_index, int target_control_index) {
- catch_targets.push_back({exception_index, target_control_index, pc});
+ void Bind(const byte* pc, int tag_index, int target_control_index) {
+ catch_targets.push_back({tag_index, target_control_index, pc});
}
// Reference this label from the given location.
@@ -665,12 +665,12 @@ class SideTable : public ZoneObject {
"control transfer @%zu: Δpc %d, stack %u->%u, exn: %d = "
"-%u\n",
offset, pcdiff, ref.stack_height, target_stack_height,
- p.exception_index, spdiff);
+ p.tag_index, spdiff);
CatchControlTransferEntry entry;
entry.pc_diff = pcdiff;
entry.sp_diff = spdiff;
entry.target_arity = arity;
- entry.exception_index = p.exception_index;
+ entry.tag_index = p.tag_index;
entry.target_control_index = p.target_control_index;
catch_entries.emplace_back(entry);
}
@@ -731,10 +731,9 @@ class SideTable : public ZoneObject {
};
int max_exception_arity = 0;
if (module) {
- for (auto& exception : module->exceptions) {
- max_exception_arity =
- std::max(max_exception_arity,
- static_cast<int>(exception.sig->parameter_count()));
+ for (auto& tag : module->tags) {
+ max_exception_arity = std::max(
+ max_exception_arity, static_cast<int>(tag.sig->parameter_count()));
}
}
for (BytecodeIterator i(code->start, code->end, &code->locals);
@@ -898,7 +897,7 @@ class SideTable : public ZoneObject {
// Only pop the exception stack once when we enter the first catch.
exception_stack.pop_back();
}
- ExceptionIndexImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ TagIndexImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
Control* c = &control_stack.back();
copy_unreachable();
TRACE("control @%u: Catch\n", i.pc_offset());
@@ -911,9 +910,8 @@ class SideTable : public ZoneObject {
DCHECK_IMPLIES(!unreachable,
stack_height >= c->end_label->target_stack_height);
- const FunctionSig* exception_sig = module->exceptions[imm.index].sig;
- int catch_in_arity =
- static_cast<int>(exception_sig->parameter_count());
+ const FunctionSig* tag_sig = module->tags[imm.index].sig;
+ int catch_in_arity = static_cast<int>(tag_sig->parameter_count());
stack_height = c->end_label->target_stack_height + catch_in_arity;
break;
}
@@ -1459,24 +1457,22 @@ class WasmInterpreterInternals {
}
CatchControlTransferEntry* handler = nullptr;
for (auto& entry : it->second) {
- if (entry.exception_index < 0) {
+ if (entry.tag_index < 0) {
ResetStack(StackHeight() - entry.sp_diff);
*pc += entry.pc_diff;
- if (entry.exception_index == kRethrowOrDelegateExceptionIndex) {
+ if (entry.tag_index == kRethrowOrDelegateExceptionIndex) {
// Recursively try to find a handler in the next enclosing try block
// (for the implicit rethrow) or in the delegate target.
return JumpToHandlerDelta(code, exception_object, pc);
}
handler = &entry;
break;
- } else if (MatchingExceptionTag(exception_object,
- entry.exception_index)) {
+ } else if (MatchingExceptionTag(exception_object, entry.tag_index)) {
handler = &entry;
- const WasmException* exception =
- &module()->exceptions[entry.exception_index];
- const FunctionSig* sig = exception->sig;
+ const WasmTag* tag = &module()->tags[entry.tag_index];
+ const FunctionSig* sig = tag->sig;
int catch_in_arity = static_cast<int>(sig->parameter_count());
- DoUnpackException(exception, exception_object);
+ DoUnpackException(tag, exception_object);
DoStackTransfer(entry.sp_diff + catch_in_arity, catch_in_arity);
*pc += handler->pc_diff;
break;
@@ -2169,7 +2165,7 @@ class WasmInterpreterInternals {
*len += 1;
break;
case kExprI32AtomicWait: {
- if (!module()->has_shared_memory) {
+ if (!module()->has_shared_memory || !isolate_->allow_atomics_wait()) {
DoTrap(kTrapUnreachable, pc);
return false;
}
@@ -2189,7 +2185,7 @@ class WasmInterpreterInternals {
break;
}
case kExprI64AtomicWait: {
- if (!module()->has_shared_memory) {
+ if (!module()->has_shared_memory || !isolate_->allow_atomics_wait()) {
DoTrap(kTrapUnreachable, pc);
return false;
}
@@ -3083,30 +3079,16 @@ class WasmInterpreterInternals {
return false;
}
- void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint32_t value) {
- encoded_values->set((*encoded_index)++, Smi::FromInt(value >> 16));
- encoded_values->set((*encoded_index)++, Smi::FromInt(value & 0xffff));
- }
-
- void EncodeI64ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint64_t value) {
- EncodeI32ExceptionValue(encoded_values, encoded_index,
- static_cast<uint32_t>(value >> 32));
- EncodeI32ExceptionValue(encoded_values, encoded_index,
- static_cast<uint32_t>(value));
- }
-
// Allocate, initialize and throw a new exception. The exception values are
// being popped off the operand stack. Returns true if the exception is being
// handled locally by the interpreter, false otherwise (interpreter exits).
- bool DoThrowException(const WasmException* exception,
+ bool DoThrowException(const WasmTag* tag,
uint32_t index) V8_WARN_UNUSED_RESULT {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<WasmExceptionTag> exception_tag(
- WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
+ WasmExceptionTag::cast(instance_object_->tags_table().get(index)),
isolate_);
- uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
+ uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(tag);
Handle<WasmExceptionPackage> exception_object =
WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
@@ -3114,7 +3096,7 @@ class WasmInterpreterInternals {
// Encode the exception values on the operand stack into the exception
// package allocated above. This encoding has to be in sync with other
// backends so that exceptions can be passed between them.
- const WasmExceptionSig* sig = exception->sig;
+ const WasmTagSig* sig = tag->sig;
uint32_t encoded_index = 0;
sp_t base_index = StackHeight() - sig->parameter_count();
for (size_t i = 0; i < sig->parameter_count(); ++i) {
@@ -3199,38 +3181,22 @@ class WasmInterpreterInternals {
Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag(
isolate_, Handle<WasmExceptionPackage>::cast(exception_object));
Handle<Object> expected_tag =
- handle(instance_object_->exceptions_table().get(index), isolate_);
+ handle(instance_object_->tags_table().get(index), isolate_);
DCHECK(expected_tag->IsWasmExceptionTag());
return expected_tag.is_identical_to(caught_tag);
}
- void DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint32_t* value) {
- uint32_t msb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
- uint32_t lsb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
- *value = (msb << 16) | (lsb & 0xffff);
- }
-
- void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint64_t* value) {
- uint32_t lsb = 0, msb = 0;
- DecodeI32ExceptionValue(encoded_values, encoded_index, &msb);
- DecodeI32ExceptionValue(encoded_values, encoded_index, &lsb);
- *value = (static_cast<uint64_t>(msb) << 32) | static_cast<uint64_t>(lsb);
- }
-
// Unpack the values encoded in the given exception. The exception values are
// pushed onto the operand stack. Callers must perform a tag check to ensure
// the encoded values match the expected signature of the exception.
- void DoUnpackException(const WasmException* exception,
- Handle<Object> exception_object) {
+ void DoUnpackException(const WasmTag* tag, Handle<Object> exception_object) {
Handle<FixedArray> encoded_values =
Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues(
isolate_, Handle<WasmExceptionPackage>::cast(exception_object)));
// Decode the exception values from the given exception package and push
// them onto the operand stack. This encoding has to be in sync with other
// backends so that exceptions can be passed between them.
- const WasmExceptionSig* sig = exception->sig;
+ const WasmTagSig* sig = tag->sig;
uint32_t encoded_index = 0;
for (size_t i = 0; i < sig->parameter_count(); ++i) {
WasmValue value;
@@ -3299,7 +3265,7 @@ class WasmInterpreterInternals {
}
Push(value);
}
- DCHECK_EQ(WasmExceptionPackage::GetEncodedSize(exception), encoded_index);
+ DCHECK_EQ(WasmExceptionPackage::GetEncodedSize(tag), encoded_index);
}
void Execute(InterpreterCode* code, pc_t pc, int max) {
@@ -3389,11 +3355,11 @@ class WasmInterpreterInternals {
break;
}
case kExprThrow: {
- ExceptionIndexImmediate<Decoder::kNoValidation> imm(&decoder,
- code->at(pc + 1));
+ TagIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
CommitPc(pc); // Needed for local unwinding.
- const WasmException* exception = &module()->exceptions[imm.index];
- if (!DoThrowException(exception, imm.index)) return;
+ const WasmTag* tag = &module()->tags[imm.index];
+ if (!DoThrowException(tag, imm.index)) return;
ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
continue; // Do not bump pc.
}
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.h b/deps/v8/test/common/wasm/wasm-interpreter.h
index ab89f5dc15..97fdaa426f 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.h
+++ b/deps/v8/test/common/wasm/wasm-interpreter.h
@@ -40,7 +40,7 @@ struct ControlTransferEntry {
};
struct CatchControlTransferEntry : public ControlTransferEntry {
- int exception_index;
+ int tag_index;
int target_control_index;
};
diff --git a/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js b/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
index 6820dc05de..962ceeca58 100644
--- a/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
+++ b/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --noalways-opt --stress-flush-bytecode
-// Flags: --expose-gc
+// Flags: --allow-natives-syntax --opt --noalways-opt --stress-flush-code
+// Flags: --expose-gc --flush-bytecode
Debug = debug.Debug
diff --git a/deps/v8/test/debugger/debug/regress/regress-9067.js b/deps/v8/test/debugger/debug/regress/regress-9067.js
index 300c1d25df..ed91811309 100644
--- a/deps/v8/test/debugger/debug/regress/regress-9067.js
+++ b/deps/v8/test/debugger/debug/regress/regress-9067.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --stress-flush-bytecode
+// Flags: --expose-gc --stress-flush-code --flush-bytecode
var Debug = debug.Debug
var bp;
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 5bc6c6c30e..608e4875ca 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -26,6 +26,7 @@ FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator_;
+ create_params.allow_atomics_wait = false;
isolate_ = v8::Isolate::New(create_params);
{
diff --git a/deps/v8/test/fuzzer/inspector-fuzzer.cc b/deps/v8/test/fuzzer/inspector-fuzzer.cc
index c617248db6..348e79820c 100644
--- a/deps/v8/test/fuzzer/inspector-fuzzer.cc
+++ b/deps/v8/test/fuzzer/inspector-fuzzer.cc
@@ -254,9 +254,6 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
inspector->Set(ToV8String(isolate, "callWithScheduledBreak"),
v8::FunctionTemplate::New(
isolate, &InspectorExtension::CallWithScheduledBreak));
- inspector->Set(ToV8String(isolate, "allowAccessorFormatting"),
- v8::FunctionTemplate::New(
- isolate, &InspectorExtension::AllowAccessorFormatting));
inspector->Set(
ToV8String(isolate, "markObjectAsNotInspectable"),
v8::FunctionTemplate::New(
@@ -389,21 +386,6 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
data->CancelPauseOnNextStatement(context_group_id);
}
- static void AllowAccessorFormatting(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 1 || !args[0]->IsObject()) {
- return;
- }
- v8::Local<v8::Object> object = args[0].As<v8::Object>();
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::Private> shouldFormatAccessorsPrivate = v8::Private::ForApi(
- isolate, ToV8String(isolate, "allowAccessorFormatting"));
- object
- ->SetPrivate(isolate->GetCurrentContext(), shouldFormatAccessorsPrivate,
- v8::Null(isolate))
- .ToChecked();
- }
-
static void MarkObjectAsNotInspectable(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsObject()) {
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index 58642bb49c..fc353e51ee 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -19,8 +19,8 @@ namespace fuzzer {
class WasmCodeFuzzer : public WasmExecutionFuzzer {
bool GenerateModule(Isolate* isolate, Zone* zone,
- base::Vector<const uint8_t> data,
- ZoneBuffer* buffer) override {
+ base::Vector<const uint8_t> data, ZoneBuffer* buffer,
+ bool liftoff_as_reference) override {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index a8685ed09c..b39d49e500 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -89,7 +89,8 @@ class DataRange {
}
};
-ValueType GetValueType(DataRange* data) {
+ValueType GetValueType(uint32_t num_types, DataRange* data,
+ bool liftoff_as_reference) {
constexpr ValueType types[] = {
kWasmI32, kWasmI64,
kWasmF32, kWasmF64,
@@ -97,6 +98,13 @@ ValueType GetValueType(DataRange* data) {
kWasmFuncRef, kWasmEqRef,
kWasmAnyRef, ValueType::Ref(HeapType(HeapType::kData), kNullable)};
+ if (liftoff_as_reference) {
+ uint32_t id = data->get<uint8_t>() % (arraysize(types) + num_types);
+ if (id >= arraysize(types)) {
+ return ValueType::Ref(id - arraysize(types), kNullable);
+ }
+ return types[id];
+ }
return types[data->get<uint8_t>() % arraysize(types)];
}
@@ -484,7 +492,9 @@ class WasmGenerator {
}
void drop(DataRange* data) {
- Generate(GetValueType(data), data);
+ Generate(GetValueType(builder_->builder()->NumTypes(), data,
+ liftoff_as_reference_),
+ data);
builder_->Emit(kExprDrop);
}
@@ -740,7 +750,77 @@ class WasmGenerator {
ref_null(type, data);
}
}
+ void new_object(HeapType type, DataRange* data) {
+ if (liftoff_as_reference_ && type.is_index()) {
+ bool new_default = data->get<uint8_t>() % 2;
+ uint32_t index = type.ref_index();
+ if (builder_->builder()->IsStructType(index)) {
+ if (new_default) {
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprStructNewDefault);
+ builder_->EmitU32V(index);
+ } else {
+ StructType* struct_gen = builder_->builder()->GetStructType(index);
+ int field_count = struct_gen->field_count();
+ for (int i = 0; i < field_count; i++) {
+ Generate(struct_gen->field(i), data);
+ }
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprStructNewWithRtt);
+ builder_->EmitU32V(index);
+ }
+ return;
+ } else if (builder_->builder()->IsArrayType(index)) {
+ if (new_default) {
+ Generate(kWasmI32, data);
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprArrayNewDefault);
+ builder_->EmitU32V(index);
+ } else {
+ Generate(builder_->builder()->GetArrayType(index)->element_type(),
+ data);
+ Generate(kWasmI32, data);
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprArrayNewWithRtt);
+ builder_->EmitU32V(index);
+ }
+ return;
+ }
+ }
+ ref_null(type, data);
+ }
+ template <ValueKind wanted_kind>
+ void struct_get(DataRange* data) {
+ WasmModuleBuilder* builder = builder_->builder();
+ int num_types = builder->NumTypes();
+ ZoneVector<uint32_t> field_index(builder->zone());
+ ZoneVector<uint32_t> struct_index(builder->zone());
+ for (int i = 0; i < num_types; i++) {
+ if (builder->IsStructType(i)) {
+ int field_count = builder->GetStructType(i)->field_count();
+ for (int index = 0; index < field_count; index++) {
+ if (builder->GetStructType(i)->field(index).kind() == wanted_kind) {
+ field_index.push_back(index);
+ struct_index.push_back(i);
+ }
+ }
+ }
+ }
+ if (field_index.empty()) {
+ Generate<wanted_kind>(data);
+ return;
+ }
+ int index = data->get<uint8_t>() % static_cast<int>(field_index.size());
+ GenerateOptRef(HeapType(struct_index[index]), data);
+ builder_->EmitWithPrefix(kExprStructGet);
+ builder_->EmitU32V(struct_index[index]);
+ builder_->EmitU32V(field_index[index]);
+ }
using GenerateFn = void (WasmGenerator::*const)(DataRange*);
using GenerateFnWithHeap = void (WasmGenerator::*const)(HeapType, DataRange*);
@@ -780,11 +860,13 @@ class WasmGenerator {
public:
WasmGenerator(WasmFunctionBuilder* fn, const std::vector<uint32_t>& functions,
const std::vector<ValueType>& globals,
- const std::vector<uint8_t>& mutable_globals, DataRange* data)
+ const std::vector<uint8_t>& mutable_globals, DataRange* data,
+ bool liftoff_as_reference)
: builder_(fn),
functions_(functions),
globals_(globals),
- mutable_globals_(mutable_globals) {
+ mutable_globals_(mutable_globals),
+ liftoff_as_reference_(liftoff_as_reference) {
FunctionSig* sig = fn->signature();
blocks_.emplace_back();
for (size_t i = 0; i < sig->return_count(); ++i) {
@@ -794,7 +876,8 @@ class WasmGenerator {
constexpr uint32_t kMaxLocals = 32;
locals_.resize(data->get<uint8_t>() % kMaxLocals);
for (ValueType& local : locals_) {
- local = GetValueType(data);
+ local = GetValueType(builder_->builder()->NumTypes(), data,
+ liftoff_as_reference_);
fn->AddLocal(local);
}
}
@@ -832,7 +915,7 @@ class WasmGenerator {
std::vector<int> try_blocks_;
std::vector<int> catch_blocks_;
bool has_simd_;
-
+ bool liftoff_as_reference_;
static constexpr uint32_t kMaxRecursionDepth = 64;
bool recursion_limit_reached() {
@@ -1045,7 +1128,9 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
&WasmGenerator::call<kI32>,
&WasmGenerator::call_indirect<kI32>,
- &WasmGenerator::try_block<kI32>};
+ &WasmGenerator::try_block<kI32>,
+
+ &WasmGenerator::struct_get<kI32>};
GenerateOneOf(alternatives, data);
}
@@ -1159,7 +1244,9 @@ void WasmGenerator::Generate<kI64>(DataRange* data) {
&WasmGenerator::call<kI64>,
&WasmGenerator::call_indirect<kI64>,
- &WasmGenerator::try_block<kI64>};
+ &WasmGenerator::try_block<kI64>,
+
+ &WasmGenerator::struct_get<kI64>};
GenerateOneOf(alternatives, data);
}
@@ -1216,7 +1303,9 @@ void WasmGenerator::Generate<kF32>(DataRange* data) {
&WasmGenerator::call<kF32>,
&WasmGenerator::call_indirect<kF32>,
- &WasmGenerator::try_block<kF32>};
+ &WasmGenerator::try_block<kF32>,
+
+ &WasmGenerator::struct_get<kF32>};
GenerateOneOf(alternatives, data);
}
@@ -1273,7 +1362,9 @@ void WasmGenerator::Generate<kF64>(DataRange* data) {
&WasmGenerator::call<kF64>,
&WasmGenerator::call_indirect<kF64>,
- &WasmGenerator::try_block<kF64>};
+ &WasmGenerator::try_block<kF64>,
+
+ &WasmGenerator::struct_get<kF64>};
GenerateOneOf(alternatives, data);
}
@@ -1552,7 +1643,8 @@ void WasmGenerator::Generate(ValueType type, DataRange* data) {
void WasmGenerator::GenerateOptRef(HeapType type, DataRange* data) {
constexpr GenerateFnWithHeap alternatives[] = {
- &WasmGenerator::ref_null, &WasmGenerator::get_local_opt_ref};
+ &WasmGenerator::ref_null, &WasmGenerator::get_local_opt_ref,
+ &WasmGenerator::new_object};
GenerateOneOf(alternatives, type, data);
}
@@ -1561,7 +1653,8 @@ std::vector<ValueType> WasmGenerator::GenerateTypes(DataRange* data) {
std::vector<ValueType> types;
int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
for (int i = 0; i < num_params; ++i) {
- types.push_back(GetValueType(data));
+ types.push_back(GetValueType(builder_->builder()->NumTypes(), data,
+ liftoff_as_reference_));
}
return types;
}
@@ -1615,7 +1708,8 @@ void WasmGenerator::ConsumeAndGenerate(
enum SigKind { kFunctionSig, kExceptionSig };
-FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind) {
+FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind,
+ uint32_t num_types, bool liftoff_as_reference) {
// Generate enough parameters to spill some to the stack.
int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
int num_returns = sig_kind == kFunctionSig
@@ -1623,8 +1717,12 @@ FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind) {
: 0;
FunctionSig::Builder builder(zone, num_returns, num_params);
- for (int i = 0; i < num_returns; ++i) builder.AddReturn(GetValueType(data));
- for (int i = 0; i < num_params; ++i) builder.AddParam(GetValueType(data));
+ for (int i = 0; i < num_returns; ++i) {
+ builder.AddReturn(GetValueType(num_types, data, liftoff_as_reference));
+ }
+ for (int i = 0; i < num_params; ++i) {
+ builder.AddParam(GetValueType(num_types, data, liftoff_as_reference));
+ }
return builder.Build();
}
@@ -1632,21 +1730,44 @@ FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind) {
class WasmCompileFuzzer : public WasmExecutionFuzzer {
bool GenerateModule(Isolate* isolate, Zone* zone,
- base::Vector<const uint8_t> data,
- ZoneBuffer* buffer) override {
+ base::Vector<const uint8_t> data, ZoneBuffer* buffer,
+ bool liftoff_as_reference) override {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
DataRange range(data);
std::vector<uint32_t> function_signatures;
+
+ // Add struct and array types first so that we get a chance to generate
+ // these types in function signatures
+ if (liftoff_as_reference) {
+ uint32_t count = 4;
+ StructType::Builder struct_builder(zone, count);
+ struct_builder.AddField(kWasmI32, false);
+ struct_builder.AddField(kWasmI64, false);
+ struct_builder.AddField(kWasmF32, false);
+ struct_builder.AddField(kWasmF64, false);
+ StructType* struct_fuz = struct_builder.Build();
+ builder.AddStructType(struct_fuz);
+ ArrayType* array_fuzI32 = zone->New<ArrayType>(kWasmI32, true);
+ ArrayType* array_fuzI64 = zone->New<ArrayType>(kWasmI64, true);
+ ArrayType* array_fuzF32 = zone->New<ArrayType>(kWasmF32, true);
+ ArrayType* array_fuzF64 = zone->New<ArrayType>(kWasmF64, true);
+ builder.AddArrayType(array_fuzI32);
+ builder.AddArrayType(array_fuzI64);
+ builder.AddArrayType(array_fuzF32);
+ builder.AddArrayType(array_fuzF64);
+ }
+
function_signatures.push_back(builder.AddSignature(sigs.i_iii()));
static_assert(kMaxFunctions >= 1, "need min. 1 function");
int num_functions = 1 + (range.get<uint8_t>() % kMaxFunctions);
for (int i = 1; i < num_functions; ++i) {
- FunctionSig* sig = GenerateSig(zone, &range, kFunctionSig);
+ FunctionSig* sig = GenerateSig(zone, &range, kFunctionSig,
+ builder.NumTypes(), liftoff_as_reference);
uint32_t signature_index = builder.AddSignature(sig);
function_signatures.push_back(signature_index);
}
@@ -1659,12 +1780,14 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
int num_exceptions = 1 + (range.get<uint8_t>() % kMaxExceptions);
for (int i = 0; i < num_exceptions; ++i) {
- FunctionSig* sig = GenerateSig(zone, &range, kExceptionSig);
+ FunctionSig* sig = GenerateSig(zone, &range, kExceptionSig,
+ builder.NumTypes(), liftoff_as_reference);
builder.AddException(sig);
}
for (int i = 0; i < num_globals; ++i) {
- ValueType type = GetValueType(&range);
+ ValueType type =
+ GetValueType(builder.NumTypes(), &range, liftoff_as_reference);
// 1/8 of globals are immutable.
const bool mutability = (range.get<uint8_t>() % 8) != 0;
builder.AddGlobal(type, mutability, WasmInitExpr());
@@ -1680,7 +1803,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
WasmFunctionBuilder* f = builder.AddFunction(sig);
WasmGenerator gen(f, function_signatures, globals, mutable_globals,
- &function_range);
+ &function_range, liftoff_as_reference);
base::Vector<const ValueType> return_types(sig->returns().begin(),
sig->return_count());
gen.Generate(return_types, &function_range);
@@ -1693,12 +1816,10 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
for (int i = 0; i < num_functions; ++i) {
builder.SetIndirectFunction(i, i);
}
-
builder.SetMaxMemorySize(32);
// We enable shared memory to be able to test atomics.
builder.SetHasSharedMemory();
builder.WriteTo(buffer);
-
return true;
}
};
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 991c1fb461..3948743f94 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -11,6 +11,7 @@
#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-compiler.h"
+#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/module-instantiate.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-feature-flags.h"
@@ -59,12 +60,12 @@ Handle<WasmModuleObject> CompileReferenceModule(Zone* zone, Isolate* isolate,
++i) {
auto& func = module->functions[i];
base::Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
- WasmFeatures unused_detected_features;
FunctionBody func_body(func.sig, func.code.offset(), func_code.begin(),
func_code.end());
auto result = ExecuteLiftoffCompilation(
- &env, func_body, func.func_index, kForDebugging, isolate->counters(),
- &unused_detected_features, {}, nullptr, 0, max_steps, nondeterminism);
+ &env, func_body, func.func_index, kForDebugging,
+ LiftoffOptions{}.set_max_steps(max_steps).set_nondeterminism(
+ nondeterminism));
native_module->PublishCode(
native_module->AddCompiledCode(std::move(result)));
}
@@ -198,7 +199,7 @@ PrintSig PrintParameters(const FunctionSig* sig) {
PrintSig PrintReturns(const FunctionSig* sig) {
return {sig->return_count(), [=](size_t i) { return sig->GetReturn(i); }};
}
-const char* ValueTypeToConstantName(ValueType type) {
+std::string ValueTypeToConstantName(ValueType type) {
switch (type.kind()) {
case kI32:
return "kWasmI32";
@@ -216,17 +217,44 @@ const char* ValueTypeToConstantName(ValueType type) {
return "kWasmExternRef";
case HeapType::kFunc:
return "kWasmFuncRef";
+ case HeapType::kEq:
+ return "kWasmEqRef";
case HeapType::kAny:
+ return "kWasmAnyRef";
+ case HeapType::kData:
+ return "wasmOptRefType(kWasmDataRef)";
case HeapType::kI31:
+ return "wasmOptRefType(kWasmI31Ref)";
case HeapType::kBottom:
default:
- // TODO(7748): Implement these if fuzzing for them is enabled.
- UNREACHABLE();
+ return "wasmOptRefType(" + std::to_string(type.ref_index()) + ")";
}
default:
UNREACHABLE();
}
}
+
+std::string HeapTypeToConstantName(HeapType heap_type) {
+ switch (heap_type.representation()) {
+ case HeapType::kFunc:
+ return "kWasmFuncRef";
+ case HeapType::kExtern:
+ return "kWasmExternRef";
+ case HeapType::kEq:
+ return "kWasmEqRef";
+ case HeapType::kI31:
+ return "kWasmI31Ref";
+ case HeapType::kData:
+ return "kWasmDataRef";
+ case HeapType::kAny:
+ return "kWasmAnyRef";
+ case HeapType::kBottom:
+ UNREACHABLE();
+ default:
+ return std::to_string(heap_type.ref_index());
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const PrintSig& print) {
os << "[";
for (size_t i = 0; i < print.num; ++i) {
@@ -292,10 +320,28 @@ void AppendInitExpr(std::ostream& os, ModuleWireBytes wire_bytes,
os << "F64Const(" << bit_cast<double>(result);
break;
}
+ case kSimdPrefix: {
+ DCHECK_LE(2 + kSimd128Size, expr.length());
+ DCHECK_EQ(static_cast<WasmOpcode>(pc[1]), kExprS128Const & 0xff);
+ os << "S128Const([";
+ for (int i = 0; i < kSimd128Size; i++) {
+ os << int(decoder.read_u8<Decoder::kNoValidation>(pc + 2 + i));
+ if (i + 1 < kSimd128Size) os << ", ";
+ }
+ os << "]";
+ break;
+ }
case kExprRefFunc:
os << "RefFunc("
<< decoder.read_u32v<Decoder::kNoValidation>(pc + 1, &length);
break;
+ case kExprRefNull: {
+ HeapType heap_type =
+ value_type_reader::read_heap_type<Decoder::kNoValidation>(
+ &decoder, pc + 1, &length, nullptr, WasmFeatures::All());
+ os << "RefNull(" << HeapTypeToConstantName(heap_type);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -334,7 +380,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
"can be\n"
"// found in the LICENSE file.\n"
"\n"
- "// Flags: --wasm-staging\n"
+ "// Flags: --wasm-staging --experimental-wasm-gc\n"
"\n"
"load('test/mjsunit/wasm/wasm-module-builder.js');\n"
"\n"
@@ -361,16 +407,38 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
os << ");\n";
}
- // TODO(7748): Support array/struct types.
#if DEBUG
for (uint8_t kind : module->type_kinds) {
- DCHECK_EQ(kWasmFunctionTypeCode, kind);
+ DCHECK(kWasmArrayTypeCode == kind || kWasmStructTypeCode == kind ||
+ kWasmFunctionTypeCode == kind);
}
#endif
- for (TypeDefinition type : module->types) {
- const FunctionSig* sig = type.function_sig;
- os << "builder.addType(makeSig(" << PrintParameters(sig) << ", "
- << PrintReturns(sig) << "));\n";
+
+ for (int i = 0; i < static_cast<int>(module->types.size()); i++) {
+ if (module->has_struct(i)) {
+ const StructType* struct_type = module->types[i].struct_type;
+ os << "builder.addStruct([";
+ int field_count = struct_type->field_count();
+ for (int index = 0; index < field_count; index++) {
+ os << "makeField(" << ValueTypeToConstantName(struct_type->field(index))
+ << ", " << (struct_type->mutability(index) ? "true" : "false")
+ << ")";
+ if (index + 1 < field_count)
+ os << ", ";
+ else
+ os << "]);\n";
+ }
+ } else if (module->has_array(i)) {
+ const ArrayType* array_type = module->types[i].array_type;
+ os << "builder.addArray("
+ << ValueTypeToConstantName(array_type->element_type()) << ","
+ << (array_type->mutability() ? "true" : "false") << ");\n";
+ } else {
+ DCHECK(module->has_signature(i));
+ const FunctionSig* sig = module->types[i].function_sig;
+ os << "builder.addType(makeSig(" << PrintParameters(sig) << ", "
+ << PrintReturns(sig) << "));\n";
+ }
}
Zone tmp_zone(isolate->allocator(), ZONE_NAME);
@@ -521,7 +589,7 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
bool liftoff_as_reference = false;
#endif
if (!data.empty()) data += 1;
- if (!GenerateModule(i_isolate, &zone, data, &buffer)) {
+ if (!GenerateModule(i_isolate, &zone, data, &buffer, liftoff_as_reference)) {
return;
}
@@ -530,6 +598,10 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
+ if (require_valid && FLAG_wasm_fuzzer_gen_test) {
+ GenerateTestCase(i_isolate, wire_bytes, true);
+ }
+
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
MaybeHandle<WasmModuleObject> compiled_module;
{
@@ -544,8 +616,7 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
i_isolate, enabled_features, &interpreter_thrower, wire_bytes);
}
bool compiles = !compiled_module.is_null();
-
- if (FLAG_wasm_fuzzer_gen_test) {
+ if (!require_valid && FLAG_wasm_fuzzer_gen_test) {
GenerateTestCase(i_isolate, wire_bytes, compiles);
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 5ef35a0349..b7c565cd27 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -48,7 +48,8 @@ class WasmExecutionFuzzer {
protected:
virtual bool GenerateModule(Isolate* isolate, Zone* zone,
base::Vector<const uint8_t> data,
- ZoneBuffer* buffer) = 0;
+ ZoneBuffer* buffer,
+ bool liftoff_as_reference) = 0;
};
} // namespace fuzzer
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index 14c1704daa..4fce54abfc 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -44,21 +44,7 @@ v8_executable("inspector-test") {
data_deps = [ "../../tools:v8_testrunner" ]
- data = [
- "console/",
- "counters/",
- "cpu-profiler/",
- "debugger/",
- "heap-profiler/",
- "inspector.status",
- "protocol-test.js",
- "runtime/",
- "runtime-call-stats/",
- "sessions/",
- "testcfg.py",
- "type-profiler/",
- "wasm-inspector-test.js",
- ]
+ data = [ "./" ]
cflags = []
ldflags = []
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage-block.js b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
index 5dbd6ae819..867991f0f1 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage-block.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
// Flags: --no-stress-incremental-marking
var source =
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage.js b/deps/v8/test/inspector/cpu-profiler/coverage.js
index 02b9897100..76e692c6f4 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage.js
@@ -3,8 +3,9 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
// Flags: --no-stress-incremental-marking
+// Flags: --no-baseline-batch-compilation
var source =
`
diff --git a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
index 001f393148..a37b08bab7 100644
--- a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
@@ -9,12 +9,12 @@ test (test.js:21:2)
foo (test.js:10:2)
-- Promise.then --
-test (test.js:19:14)
+test (test.js:12:14)
(anonymous) (expr1.js:0:0)
foo (test.js:12:2)
-- Promise.then --
-test (test.js:19:14)
+test (test.js:12:14)
(anonymous) (expr1.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
index ab08d3d69b..a2b4b96439 100644
--- a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
@@ -1,16 +1,16 @@
Checks async stack for late .then handlers with gc
foo1 (test.js:11:2)
-- Promise.then --
-test (test.js:18:14)
+test (test.js:10:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
-- Promise.then --
-test (test.js:22:14)
+test (test.js:14:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
-- Promise.then --
-test (test.js:24:14)
+test (test.js:16:14)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt b/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt
index 80cbb1f317..c7cbea72c5 100644
--- a/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-set-timeout-expected.txt
@@ -2,10 +2,10 @@ Checks that async stack contains setTimeout
inner1 (test.js:11:4)
foo1 (test.js:14:2)
-- setTimeout --
-inner2 (test.js:18:4)
-foo2 (test.js:20:2)
+inner2 (test.js:11:4)
+foo2 (test.js:13:2)
-- setTimeout --
-inner3 (test.js:25:4)
-foo3 (test.js:27:2)
+inner3 (test.js:18:4)
+foo3 (test.js:20:2)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
index 21e7dc1632..176ed99f2a 100644
--- a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
@@ -3,78 +3,78 @@ Checks created frame for async call chain
Running test: testPromise
foo1 (test.js:10:2)
-- Promise.then --
-promise (test.js:20:14)
+promise (test.js:12:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThen
foo1 (test.js:10:2)
-- Promise.then --
-promiseThen (test.js:28:14)
+promiseThen (test.js:20:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
-- Promise.then --
-promiseThen (test.js:29:14)
+promiseThen (test.js:21:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThenThen
foo1 (test.js:10:2)
-- Promise.then --
-promiseThenThen (test.js:37:14)
+promiseThenThen (test.js:29:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:10:2)
-- Promise.then --
-promiseThenThen (test.js:38:14)
+promiseThenThen (test.js:30:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
-- Promise.then --
-promiseThenThen (test.js:37:25)
+promiseThenThen (test.js:29:25)
(anonymous) (expr.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:10:2)
-- Promise.then --
-promiseResolve (test.js:44:27)
+promiseResolve (test.js:36:27)
(anonymous) (expr.js:0:0)
Running test: testPromiseReject
foo1 (test.js:10:2)
-- Promise.catch --
-promiseReject (test.js:48:31)
+promiseReject (test.js:40:31)
(anonymous) (expr.js:0:0)
Running test: testPromiseAll
foo1 (test.js:10:2)
-- Promise.then --
-promiseAll (test.js:52:44)
+promiseAll (test.js:44:44)
(anonymous) (expr.js:0:0)
Running test: testPromiseRace
foo1 (test.js:10:2)
-- Promise.then --
-promiseRace (test.js:56:45)
+promiseRace (test.js:48:45)
(anonymous) (expr.js:0:0)
Running test: testThenableJob1
foo1 (test.js:10:2)
-- Promise.then --
-thenableJob1 (test.js:60:72)
+thenableJob1 (test.js:52:72)
(anonymous) (expr.js:0:0)
Running test: testThenableJob2
foo1 (test.js:10:2)
-- Promise.then --
-thenableJob2 (test.js:64:57)
+thenableJob2 (test.js:56:57)
(anonymous) (expr.js:0:0)
@@ -82,10 +82,10 @@ Running test: testSetTimeouts
foo1 (test.js:10:2)
(anonymous) (test.js:72:25)
-- setTimeout --
-(anonymous) (test.js:72:6)
+(anonymous) (test.js:64:6)
-- setTimeout --
-(anonymous) (test.js:71:4)
+(anonymous) (test.js:63:4)
-- setTimeout --
-setTimeouts (test.js:70:2)
+setTimeouts (test.js:62:2)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
index 86860fdb39..4de838252e 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
@@ -3,94 +3,94 @@ Checks that async chains for promises are correct.
Running test: testPromise
foo1 (test.js:9:2)
-- Promise.then --
-promise (test.js:19:14)
+promise (test.js:12:14)
(anonymous) (testPromise.js:0:0)
Running test: testPromiseResolvedBySetTimeout
foo1 (test.js:9:2)
-- Promise.then --
-promiseResolvedBySetTimeout (test.js:27:14)
+promiseResolvedBySetTimeout (test.js:20:14)
(anonymous) (testPromiseResolvedBySetTimeout.js:0:0)
Running test: testPromiseAll
foo1 (test.js:9:2)
-- Promise.then --
-promiseAll (test.js:37:35)
+promiseAll (test.js:30:35)
(anonymous) (testPromiseAll.js:0:0)
Running test: testPromiseAllReverseOrder
foo1 (test.js:9:2)
-- Promise.then --
-promiseAllReverseOrder (test.js:48:35)
+promiseAllReverseOrder (test.js:41:35)
(anonymous) (testPromiseAllReverseOrder.js:0:0)
Running test: testPromiseRace
foo1 (test.js:9:2)
-- Promise.then --
-promiseRace (test.js:59:36)
+promiseRace (test.js:52:36)
(anonymous) (testPromiseRace.js:0:0)
Running test: testTwoChainedCallbacks
foo1 (test.js:9:2)
-- Promise.then --
-twoChainedCallbacks (test.js:68:14)
+twoChainedCallbacks (test.js:61:14)
(anonymous) (testTwoChainedCallbacks.js:0:0)
foo2 (test.js:13:2)
-- Promise.then --
-twoChainedCallbacks (test.js:68:25)
+twoChainedCallbacks (test.js:61:25)
(anonymous) (testTwoChainedCallbacks.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:9:2)
-- Promise.then --
-promiseResolve (test.js:74:27)
+promiseResolve (test.js:67:27)
(anonymous) (testPromiseResolve.js:0:0)
foo2 (test.js:13:2)
-- Promise.then --
-promiseResolve (test.js:74:38)
+promiseResolve (test.js:67:38)
(anonymous) (testPromiseResolve.js:0:0)
Running test: testThenableJobResolvedInSetTimeout
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedInSetTimeout (test.js:86:40)
+thenableJobResolvedInSetTimeout (test.js:79:40)
(anonymous) (testThenableJobResolvedInSetTimeout.js:0:0)
Running test: testThenableJobResolvedInSetTimeoutWithStack
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedInSetTimeoutWithStack (test.js:104:40)
+thenableJobResolvedInSetTimeoutWithStack (test.js:97:40)
(anonymous) (testThenableJobResolvedInSetTimeoutWithStack.js:0:0)
Running test: testThenableJobResolvedByPromise
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedByPromise (test.js:118:40)
+thenableJobResolvedByPromise (test.js:111:40)
(anonymous) (testThenableJobResolvedByPromise.js:0:0)
Running test: testThenableJobResolvedByPromiseWithStack
foo1 (test.js:9:2)
-- Promise.then --
-thenableJobResolvedByPromiseWithStack (test.js:136:40)
+thenableJobResolvedByPromiseWithStack (test.js:129:40)
(anonymous) (testThenableJobResolvedByPromiseWithStack.js:0:0)
Running test: testLateThenCallback
foo1 (test.js:9:2)
-- Promise.then --
-lateThenCallback (test.js:145:12)
+lateThenCallback (test.js:138:12)
(anonymous) (testLateThenCallback.js:0:0)
@@ -98,36 +98,36 @@ Running test: testComplex
inner1 (test.js:154:6)
foo1 (test.js:156:4)
-- Promise.then --
-complex (test.js:202:5)
+complex (test.js:195:5)
(anonymous) (testComplex.js:0:0)
(anonymous) (test.js:207:8)
-- Promise.then --
-(anonymous) (test.js:206:8)
+(anonymous) (test.js:199:8)
-- Promise.then --
-(anonymous) (test.js:205:6)
+(anonymous) (test.js:198:6)
-- setTimeout --
-complex (test.js:204:2)
+complex (test.js:197:2)
(anonymous) (testComplex.js:0:0)
Running test: testReject
foo1 (test.js:9:2)
-- Promise.catch --
-reject (test.js:217:31)
+reject (test.js:210:31)
(anonymous) (testReject.js:0:0)
Running test: testFinally1
foo1 (test.js:9:2)
-- Promise.finally --
-finally1 (test.js:221:33)
+finally1 (test.js:214:33)
(anonymous) (testFinally1.js:0:0)
Running test: testFinally2
foo1 (test.js:9:2)
-- Promise.finally --
-finally2 (test.js:225:34)
+finally2 (test.js:218:34)
(anonymous) (testFinally2.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
index a894c0e810..e61a4e117e 100644
--- a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
+++ b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
@@ -8,10 +8,10 @@ asyncFact (test.js:9:2)
asyncFact (test.js:11:2)
-- async function --
-asyncFact (test.js:10:20)
-asyncFact (test.js:10:20)
-asyncFact (test.js:10:20)
-asyncFact (test.js:10:20)
+asyncFact (test.js:3:20)
+asyncFact (test.js:3:20)
+asyncFact (test.js:3:20)
+asyncFact (test.js:3:20)
(anonymous) (expr.js:0:0)
@@ -23,7 +23,7 @@ asyncFact (test.js:9:2)
asyncFact (test.js:11:2)
-- async function --
-asyncFact (test.js:10:20)
+asyncFact (test.js:3:20)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
index be4e104424..6d941018b1 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -74,6 +74,11 @@ expression: (function* foo() { yield 1 })()
Running test: entriesInMapAndSet
expression: new Map([[1,2]])
+{
+ name : size
+ type : number
+ value : 1
+}
[[Entries]]:
[
[0] : {
@@ -95,6 +100,11 @@ expression: new Map([[1,2]])
]
expression: new Set([1])
+{
+ name : size
+ type : number
+ value : 1
+}
[[Entries]]:
[
[0] : {
diff --git a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
index c89a99a9db..4e2a5356bf 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
@@ -164,7 +164,7 @@ Runtime.evaluate compiled script with stack trace
[0] : {
columnNumber : 2
functionName : fooTop
- lineNumber : 10
+ lineNumber : 2
scriptId : <scriptId>
url : top-frame.js
}
@@ -247,7 +247,7 @@ Runtime.evaluate compile script error with stack trace
[0] : {
columnNumber : 2
functionName : fooTopFail
- lineNumber : 20
+ lineNumber : 2
scriptId : <scriptId>
url : top-frame-fail.js
}
diff --git a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
index ab47c245ee..f9ff354770 100644
--- a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
@@ -12,8 +12,8 @@ Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01 (kExprNop)
Getting current stack trace via "new Error().stack".
Error
at v8://test/getStack:1:1
- at func (<anonymous>:wasm-function[0]:0x21)
- at main (<anonymous>:wasm-function[1]:0x2f)
+ at func (wasm://wasm/8c388106:wasm-function[0]:0x21)
+ at main (wasm://wasm/24ba77a6:wasm-function[1]:0x2f)
at v8://test/runWasm:1:22
exports.main returned.
Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
index ff2955caee..dd9a73bd45 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
@@ -12,8 +12,8 @@ Result of evaluate (string):
Error: this is your stack trace:
-- skipped --
at call_debugger (<anonymous>:3:5)
- at call_func (<anonymous>:wasm-function[1]:0x37)
- at main (<anonymous>:wasm-function[2]:0x3e)
+ at call_func (wasm://wasm/37655946:wasm-function[1]:0x37)
+ at main (wasm://wasm/37655946:wasm-function[2]:0x3e)
at testFunction (<anonymous>:15:20)
at <anonymous>:1:1
Finished!
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index b04d2958ae..976ab4be68 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -445,9 +445,6 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
inspector->Set(isolate, "callWithScheduledBreak",
v8::FunctionTemplate::New(
isolate, &InspectorExtension::CallWithScheduledBreak));
- inspector->Set(isolate, "allowAccessorFormatting",
- v8::FunctionTemplate::New(
- isolate, &InspectorExtension::AllowAccessorFormatting));
inspector->Set(
isolate, "markObjectAsNotInspectable",
v8::FunctionTemplate::New(
@@ -583,21 +580,6 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
data->CancelPauseOnNextStatement(context_group_id);
}
- static void AllowAccessorFormatting(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 1 || !args[0]->IsObject()) {
- FATAL("Internal error: allowAccessorFormatting('object').");
- }
- v8::Local<v8::Object> object = args[0].As<v8::Object>();
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::Private> shouldFormatAccessorsPrivate = v8::Private::ForApi(
- isolate, ToV8String(isolate, "allowAccessorFormatting"));
- object
- ->SetPrivate(isolate->GetCurrentContext(), shouldFormatAccessorsPrivate,
- v8::Null(isolate))
- .ToChecked();
- }
-
static void MarkObjectAsNotInspectable(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsObject()) {
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index c693398c48..6f1e661b6a 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -43,12 +43,6 @@
}], # 'system == android'
##############################################################################
-['system == macos', {
- # Bug(v8:11399): Deadlock on Mac.
- 'cpu-profiler/console-profile-wasm': [SKIP],
-}],
-
-##############################################################################
['variant != default', {
# Issue 6167.
'debugger/eval-scopes': [PASS, FAIL],
@@ -532,4 +526,10 @@
'regress/regress-crbug-1195927': [SKIP],
}], # third_party_heap
+##############################################################################
+['variant == turboprop or variant == turboprop_as_toptier or variant == future or (tsan and not concurrent_marking)', {
+
+ 'cpu-profiler/coverage-block': [SKIP],
+}], # variant == turboprop or variant = turboprop_as_toptier
+
]
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index df4e62951e..8de62cee45 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -367,17 +367,6 @@ std::vector<int> IsolateData::GetSessionIds(int context_group_id) {
return result;
}
-bool IsolateData::formatAccessorsAsProperties(v8::Local<v8::Value> object) {
- v8::Local<v8::Context> context = isolate()->GetCurrentContext();
- v8::Local<v8::Private> shouldFormatAccessorsPrivate = v8::Private::ForApi(
- isolate(),
- v8::String::NewFromUtf8Literal(isolate(), "allowAccessorFormatting"));
- CHECK(object->IsObject());
- return object.As<v8::Object>()
- ->HasPrivate(context, shouldFormatAccessorsPrivate)
- .FromMaybe(false);
-}
-
bool IsolateData::isInspectableHeapObject(v8::Local<v8::Object> object) {
v8::Local<v8::Context> context = isolate()->GetCurrentContext();
v8::MicrotasksScope microtasks_scope(
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index eb6ed56ef7..921edfb462 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -114,7 +114,6 @@ class IsolateData : public v8_inspector::V8InspectorClient {
std::vector<int> GetSessionIds(int context_group_id);
// V8InspectorClient implementation.
- bool formatAccessorsAsProperties(v8::Local<v8::Value>) override;
v8::Local<v8::Context> ensureDefaultContextInGroup(
int context_group_id) override;
double currentTimeMS() override;
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 2b0f571e16..ac6e1405f4 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -188,7 +188,7 @@ InspectorTest.ContextGroup = class {
"PropertyDescriptor","object","get","set","value","configurable",
"enumerable","symbol","getPrototypeOf","nativeAccessorDescriptor",
"isBuiltin","hasGetter","hasSetter","getOwnPropertyDescriptor",
- "description","formatAccessorsAsProperties","isOwn","name",
+ "description","isOwn","name",
"typedArrayProperties","keys","getOwnPropertyNames",
"getOwnPropertySymbols","isPrimitiveValue","com","toLowerCase",
"ELEMENT","trim","replace","DOCUMENT","size","byteLength","toString",
diff --git a/deps/v8/test/inspector/runtime/command-line-api-expected.txt b/deps/v8/test/inspector/runtime/command-line-api-expected.txt
index ba5f08d885..efb7ab1861 100644
--- a/deps/v8/test/inspector/runtime/command-line-api-expected.txt
+++ b/deps/v8/test/inspector/runtime/command-line-api-expected.txt
@@ -40,6 +40,7 @@ Running test: testInspect
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -53,6 +54,7 @@ Running test: testInspect
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -65,6 +67,7 @@ Running test: testInspect
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -77,6 +80,7 @@ Running test: testInspect
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
copyToClipboard : true
}
@@ -97,6 +101,7 @@ Running test: testInspect
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -123,6 +128,7 @@ Running test: testQueryObjects
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
queryObjects : true
}
@@ -138,6 +144,7 @@ Is Promise.prototype: true
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
queryObjects : true
}
@@ -153,6 +160,7 @@ Is Promise.prototype: true
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
queryObjects : true
}
@@ -168,6 +176,7 @@ Is p: true
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
queryObjects : true
}
diff --git a/deps/v8/test/inspector/runtime/console-methods-expected.txt b/deps/v8/test/inspector/runtime/console-methods-expected.txt
index 0bffa5fc7a..f832f37e15 100644
--- a/deps/v8/test/inspector/runtime/console-methods-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-methods-expected.txt
@@ -14,7 +14,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 9
+ lineNumber : 2
scriptId : <scriptId>
url : test.js
}
@@ -46,7 +46,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 10
+ lineNumber : 3
scriptId : <scriptId>
url : test.js
}
@@ -78,7 +78,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 11
+ lineNumber : 4
scriptId : <scriptId>
url : test.js
}
@@ -110,7 +110,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 12
+ lineNumber : 5
scriptId : <scriptId>
url : test.js
}
@@ -142,7 +142,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 13
+ lineNumber : 6
scriptId : <scriptId>
url : test.js
}
@@ -174,7 +174,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 14
+ lineNumber : 7
scriptId : <scriptId>
url : test.js
}
@@ -206,7 +206,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 15
+ lineNumber : 8
scriptId : <scriptId>
url : test.js
}
@@ -297,7 +297,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 16
+ lineNumber : 9
scriptId : <scriptId>
url : test.js
}
@@ -378,7 +378,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 17
+ lineNumber : 10
scriptId : <scriptId>
url : test.js
}
@@ -410,7 +410,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 18
+ lineNumber : 11
scriptId : <scriptId>
url : test.js
}
@@ -442,7 +442,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 19
+ lineNumber : 12
scriptId : <scriptId>
url : test.js
}
@@ -474,7 +474,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 20
+ lineNumber : 13
scriptId : <scriptId>
url : test.js
}
@@ -506,7 +506,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 21
+ lineNumber : 14
scriptId : <scriptId>
url : test.js
}
@@ -538,7 +538,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 22
+ lineNumber : 15
scriptId : <scriptId>
url : test.js
}
@@ -570,7 +570,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 23
+ lineNumber : 16
scriptId : <scriptId>
url : test.js
}
@@ -602,7 +602,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 24
+ lineNumber : 17
scriptId : <scriptId>
url : test.js
}
@@ -634,7 +634,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 25
+ lineNumber : 18
scriptId : <scriptId>
url : test.js
}
@@ -666,14 +666,14 @@ Checks console methods
[0] : {
columnNumber : 12
functionName : foo
- lineNumber : 27
+ lineNumber : 20
scriptId : <scriptId>
url : test.js
}
[1] : {
columnNumber : 2
functionName : testFunction
- lineNumber : 29
+ lineNumber : 22
scriptId : <scriptId>
url : test.js
}
@@ -705,14 +705,14 @@ Checks console methods
[0] : {
columnNumber : 12
functionName : foo
- lineNumber : 27
+ lineNumber : 20
scriptId : <scriptId>
url : test.js
}
[1] : {
columnNumber : 2
functionName : testFunction
- lineNumber : 30
+ lineNumber : 23
scriptId : <scriptId>
url : test.js
}
@@ -744,7 +744,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 31
+ lineNumber : 24
scriptId : <scriptId>
url : test.js
}
@@ -776,7 +776,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 32
+ lineNumber : 25
scriptId : <scriptId>
url : test.js
}
@@ -808,7 +808,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 33
+ lineNumber : 26
scriptId : <scriptId>
url : test.js
}
@@ -840,7 +840,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 35
+ lineNumber : 28
scriptId : <scriptId>
url : test.js
}
@@ -872,7 +872,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 37
+ lineNumber : 30
scriptId : <scriptId>
url : test.js
}
@@ -904,7 +904,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 39
+ lineNumber : 32
scriptId : <scriptId>
url : test.js
}
@@ -936,7 +936,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 40
+ lineNumber : 33
scriptId : <scriptId>
url : test.js
}
@@ -968,7 +968,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 41
+ lineNumber : 34
scriptId : <scriptId>
url : test.js
}
@@ -1001,7 +1001,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 42
+ lineNumber : 35
scriptId : <scriptId>
url : test.js
}
@@ -1034,7 +1034,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 43
+ lineNumber : 36
scriptId : <scriptId>
url : test.js
}
@@ -1067,7 +1067,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 44
+ lineNumber : 37
scriptId : <scriptId>
url : test.js
}
@@ -1099,7 +1099,7 @@ Checks console methods
[0] : {
columnNumber : 10
functionName : testFunction
- lineNumber : 45
+ lineNumber : 38
scriptId : <scriptId>
url : test.js
}
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview-expected.txt b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview-expected.txt
index a8427b3d9b..8afa7df2a8 100644
--- a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview-expected.txt
+++ b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview-expected.txt
@@ -121,6 +121,27 @@ Running test: testShortTypedArrayPropertiesPreview
type : number
value : 0
}
+ [3] : {
+ name : buffer
+ subtype : arraybuffer
+ type : object
+ value : ArrayBuffer(3)
+ }
+ [4] : {
+ name : byteLength
+ type : number
+ value : 3
+ }
+ [5] : {
+ name : byteOffset
+ type : number
+ value : 0
+ }
+ [6] : {
+ name : length
+ type : number
+ value : 3
+ }
]
subtype : typedarray
type : object
@@ -670,6 +691,11 @@ Running test: testSetPropertiesPreview
]
overflow : false
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 3
+ }
]
subtype : set
type : object
@@ -727,6 +753,11 @@ Running test: testBigSetPropertiesPreview
]
overflow : true
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 10
+ }
]
subtype : set
type : object
@@ -814,20 +845,6 @@ Running test: testMixedSetPropertiesPreview
type : object
}
-Running test: testObjInheritsGetterProperty
-{
- description : Object
- overflow : false
- properties : [
- [0] : {
- name : propNotNamedProto
- type : number
- value : NaN
- }
- ]
- type : object
-}
-
Running test: testObjWithArrayAsProto
{
description : Array
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
index 22fc49d9bf..5d94fe72db 100644
--- a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
+++ b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
@@ -55,16 +55,6 @@ for (var i = 0; i < 10; i++) {
mixedSet.add(i);
}
-var deterministicNativeFunction = Math.log;
-var parentObj = {};
-Object.defineProperty(parentObj, 'propNotNamedProto', {
- get: deterministicNativeFunction,
- set: function() {}
-});
-inspector.allowAccessorFormatting(parentObj);
-var objInheritsGetterProperty = {__proto__: parentObj};
-inspector.allowAccessorFormatting(objInheritsGetterProperty);
-
var arrayWithLongValues = ["a".repeat(101), 2n**401n];
`);
@@ -134,13 +124,6 @@ InspectorTest.runTestSuite([
.then(next);
},
- function testObjInheritsGetterProperty(next)
- {
- Protocol.Runtime.evaluate({ "expression": "objInheritsGetterProperty", "generatePreview": true })
- .then(result => InspectorTest.logMessage(result.result.result.preview))
- .then(next);
- },
-
function testObjWithArrayAsProto(next)
{
Protocol.Runtime.evaluate({ "expression": "Object.create([1,2])", "generatePreview": true })
diff --git a/deps/v8/test/inspector/runtime/internal-properties-entries.js b/deps/v8/test/inspector/runtime/internal-properties-entries.js
index d955a19ac0..e2d165767c 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-entries.js
+++ b/deps/v8/test/inspector/runtime/internal-properties-entries.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
let {session, contextGroup, Protocol} = InspectorTest.start('Checks internal [[Entries]] in Runtime.getProperties output');
diff --git a/deps/v8/test/inspector/runtime/remote-object-expected.txt b/deps/v8/test/inspector/runtime/remote-object-expected.txt
index f95cec7249..bbbde00235 100644
--- a/deps/v8/test/inspector/runtime/remote-object-expected.txt
+++ b/deps/v8/test/inspector/runtime/remote-object-expected.txt
@@ -623,6 +623,11 @@ Running test: testMap
description : Map(0)
overflow : false
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 0
+ }
]
subtype : map
type : object
@@ -659,6 +664,11 @@ Running test: testMap
]
overflow : false
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 1
+ }
]
subtype : map
type : object
@@ -681,6 +691,11 @@ Running test: testMap
description : Map(1)
overflow : true
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 1
+ }
]
subtype : map
type : object
@@ -689,6 +704,11 @@ Running test: testMap
description : Map(1)
overflow : true
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 1
+ }
]
subtype : map
type : object
@@ -697,6 +717,11 @@ Running test: testMap
]
overflow : false
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 1
+ }
]
subtype : map
type : object
@@ -777,6 +802,11 @@ Running test: testMap
type : number
value : 42
}
+ [1] : {
+ name : size
+ type : number
+ value : 1
+ }
]
subtype : map
type : object
@@ -966,6 +996,11 @@ Running test: testSet
]
overflow : false
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 1
+ }
]
subtype : set
type : object
@@ -1039,6 +1074,11 @@ Running test: testSet
]
overflow : true
properties : [
+ [0] : {
+ name : size
+ type : number
+ value : 7
+ }
]
subtype : set
type : object
@@ -1728,6 +1768,27 @@ Running test: testTypedArray
type : number
value : 2
}
+ [3] : {
+ name : buffer
+ subtype : arraybuffer
+ type : object
+ value : ArrayBuffer(2)
+ }
+ [4] : {
+ name : byteLength
+ type : number
+ value : 2
+ }
+ [5] : {
+ name : byteOffset
+ type : number
+ value : 0
+ }
+ [6] : {
+ name : length
+ type : number
+ value : 2
+ }
]
subtype : typedarray
type : object
@@ -2266,6 +2327,11 @@ Running test: testArrayBuffer
description : ArrayBuffer(0)
overflow : false
properties : [
+ [0] : {
+ name : byteLength
+ type : number
+ value : 0
+ }
]
subtype : arraybuffer
type : object
@@ -2284,6 +2350,11 @@ Running test: testArrayBuffer
description : ArrayBuffer(400)
overflow : false
properties : [
+ [0] : {
+ name : byteLength
+ type : number
+ value : 400
+ }
]
subtype : arraybuffer
type : object
@@ -2304,6 +2375,22 @@ Running test: testDataView
description : DataView(16)
overflow : false
properties : [
+ [0] : {
+ name : buffer
+ subtype : arraybuffer
+ type : object
+ value : ArrayBuffer(16)
+ }
+ [1] : {
+ name : byteLength
+ type : number
+ value : 16
+ }
+ [2] : {
+ name : byteOffset
+ type : number
+ value : 0
+ }
]
subtype : dataview
type : object
@@ -2322,6 +2409,22 @@ Running test: testDataView
description : DataView(4)
overflow : false
properties : [
+ [0] : {
+ name : buffer
+ subtype : arraybuffer
+ type : object
+ value : ArrayBuffer(16)
+ }
+ [1] : {
+ name : byteLength
+ type : number
+ value : 4
+ }
+ [2] : {
+ name : byteOffset
+ type : number
+ value : 12
+ }
]
subtype : dataview
type : object
diff --git a/deps/v8/test/inspector/sessions/runtime-command-line-api-expected.txt b/deps/v8/test/inspector/sessions/runtime-command-line-api-expected.txt
index 98583a530f..003d9e6949 100644
--- a/deps/v8/test/inspector/sessions/runtime-command-line-api-expected.txt
+++ b/deps/v8/test/inspector/sessions/runtime-command-line-api-expected.txt
@@ -92,6 +92,7 @@ inspectRequested from 1
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -106,6 +107,7 @@ inspectRequested from 1
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -120,6 +122,7 @@ inspectRequested from 2
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -134,6 +137,7 @@ inspectRequested from 2
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -148,6 +152,7 @@ inspectRequested from 1
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -185,6 +190,7 @@ inspectRequested from 2
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
@@ -200,6 +206,7 @@ inspectRequested from 2
{
method : Runtime.inspectRequested
params : {
+ executionContextId : <executionContextId>
hints : {
}
object : {
diff --git a/deps/v8/test/intl/regress-1224869.js b/deps/v8/test/intl/regress-1224869.js
new file mode 100644
index 0000000000..2d30ac5dc5
--- /dev/null
+++ b/deps/v8/test/intl/regress-1224869.js
@@ -0,0 +1,18 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var l = new Intl.Locale("en-US-4106-4104-4102-4100-4098-4096-4094-4092-4090-4088-4086-4084-4082-4080-4078-4076-4074-4072-4070-4068-4066-4064-4062-4060-4058-4056-4054-4052-4050-4048-4049");
+
+// Ensure won't DCHECK in debug build
+try {
+ l.maximize();
+} catch(e) {
+}
+
+l2 = new Intl.Locale("en-US-4106-4104-4102-4100-4098-4096-4094-4092-4090-4088-4086-4084-4082-4080-4078-4076-4074-4072-4070-4068-4066-4064-4062-4060-4058-4056-4054-4052-4050-4048-4049");
+// Ensure won't DCHECK in debug build
+try {
+ l2.minimize();
+} catch(e) {
+}
diff --git a/deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.js b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.js
new file mode 100644
index 0000000000..bca9bf7397
--- /dev/null
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.js
@@ -0,0 +1,5 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var [a] = (() => ({a: 1}))();
diff --git a/deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.out b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.out
new file mode 100644
index 0000000000..683fbf09eb
--- /dev/null
+++ b/deps/v8/test/message/fail/destructuring-array-non-iterable-object-computed.out
@@ -0,0 +1,5 @@
+*%(basename)s:5: TypeError: object is not iterable (cannot read property Symbol(Symbol.iterator))
+var [a] = (() => ({a: 1}))();
+ ^
+TypeError: object is not iterable (cannot read property Symbol(Symbol.iterator))
+ at *%(basename)s:5:11
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.js b/deps/v8/test/message/fail/wasm-exception-rethrow.js
index d7b615653c..fb600726f1 100644
--- a/deps/v8/test/message/fail/wasm-exception-rethrow.js
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.js
@@ -7,7 +7,7 @@
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
-let except = builder.addException(kSig_v_i);
+let except = builder.addTag(kSig_v_i);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
kExprTry, kWasmVoid,
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.out b/deps/v8/test/message/fail/wasm-exception-rethrow.out
index 28fd825866..657c4279f1 100644
--- a/deps/v8/test/message/fail/wasm-exception-rethrow.out
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.out
@@ -1,4 +1,4 @@
-wasm-function[0]:0x32: RuntimeError: wasm exception
-RuntimeError: wasm exception
- at rethrow0 (<anonymous>:wasm-function[0]:0x32)
+wasm-function[0]:0x32: WebAssembly.Exception: wasm exception
+WebAssembly.Exception: wasm exception
+ at rethrow0 (wasm://wasm/f019909a:wasm-function[0]:0x32)
at *%(basename)s:21:18
diff --git a/deps/v8/test/message/fail/wasm-exception-throw.js b/deps/v8/test/message/fail/wasm-exception-throw.js
index 63ed2f76a0..827ff86dd4 100644
--- a/deps/v8/test/message/fail/wasm-exception-throw.js
+++ b/deps/v8/test/message/fail/wasm-exception-throw.js
@@ -7,7 +7,7 @@
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
-let except = builder.addException(kSig_v_i);
+let except = builder.addTag(kSig_v_i);
builder.addFunction("throw0", kSig_v_v)
.addBody([
kExprI32Const, 23,
diff --git a/deps/v8/test/message/fail/wasm-exception-throw.out b/deps/v8/test/message/fail/wasm-exception-throw.out
index af45212f3f..eb1f260a7b 100644
--- a/deps/v8/test/message/fail/wasm-exception-throw.out
+++ b/deps/v8/test/message/fail/wasm-exception-throw.out
@@ -1,4 +1,4 @@
-wasm-function[0]:0x2e: RuntimeError: wasm exception
-RuntimeError: wasm exception
- at throw0 (<anonymous>:wasm-function[0]:0x2e)
+wasm-function[0]:0x2e: WebAssembly.Exception: wasm exception
+WebAssembly.Exception: wasm exception
+ at throw0 (wasm://wasm/e4cabeba:wasm-function[0]:0x2e)
at *%(basename)s:17:18
diff --git a/deps/v8/test/message/fail/wasm-function-name.out b/deps/v8/test/message/fail/wasm-function-name.out
index 182b15dc87..cfeb5f3c44 100644
--- a/deps/v8/test/message/fail/wasm-function-name.out
+++ b/deps/v8/test/message/fail/wasm-function-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at main (<anonymous>:wasm-function[0]:0x22)
+ at main (wasm://wasm/d578b3d2:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-and-function-name.out b/deps/v8/test/message/fail/wasm-module-and-function-name.out
index 969ac55cbb..f3462b2cc7 100644
--- a/deps/v8/test/message/fail/wasm-module-and-function-name.out
+++ b/deps/v8/test/message/fail/wasm-module-and-function-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at test-module.main (<anonymous>:wasm-function[0]:0x22)
+ at test-module.main (wasm://wasm/test-module-712dabfa:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-name.out b/deps/v8/test/message/fail/wasm-module-name.out
index 969ac55cbb..f3462b2cc7 100644
--- a/deps/v8/test/message/fail/wasm-module-name.out
+++ b/deps/v8/test/message/fail/wasm-module-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at test-module.main (<anonymous>:wasm-function[0]:0x22)
+ at test-module.main (wasm://wasm/test-module-712dabfa:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-no-name.out b/deps/v8/test/message/fail/wasm-no-name.out
index c39dff6310..0147ad988a 100644
--- a/deps/v8/test/message/fail/wasm-no-name.out
+++ b/deps/v8/test/message/fail/wasm-no-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at <anonymous>:wasm-function[0]:0x22
+ at wasm://wasm/44ee3bce:wasm-function[0]:0x22
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-trap.out b/deps/v8/test/message/fail/wasm-trap.out
index 85eb9845b4..03c00afc32 100644
--- a/deps/v8/test/message/fail/wasm-trap.out
+++ b/deps/v8/test/message/fail/wasm-trap.out
@@ -1,4 +1,4 @@
wasm-function[1]:0x30: RuntimeError: divide by zero
RuntimeError: divide by zero
- at main (<anonymous>:wasm-function[1]:0x30)
+ at main (wasm://wasm/7b123b7e:wasm-function[1]:0x30)
at *%(basename)s:{NUMBER}:16
diff --git a/deps/v8/test/message/wasm-function-name-async.out b/deps/v8/test/message/wasm-function-name-async.out
index ad003ef227..84a083c032 100644
--- a/deps/v8/test/message/wasm-function-name-async.out
+++ b/deps/v8/test/message/wasm-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at main (<anonymous>:wasm-function[0]:0x22)
+ at main (wasm://wasm/d578b3d2:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-function-name-streaming.out b/deps/v8/test/message/wasm-function-name-streaming.out
index 4e8b7d5a5a..82dcda0256 100644
--- a/deps/v8/test/message/wasm-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at main (<anonymous>:wasm-function[0]:0x22)
+ at main (wasm://wasm/d578b3d2:wasm-function[0]:0x22)
at test/message/wasm-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-async.out b/deps/v8/test/message/wasm-module-and-function-name-async.out
index f2d044245b..d8645e3817 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-async.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (<anonymous>:wasm-function[0]:0x22)
+ at test-module.main (wasm://wasm/test-module-712dabfa:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-streaming.out b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
index 8e6eebad7f..ae24d7f168 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (<anonymous>:wasm-function[0]:0x22)
+ at test-module.main (wasm://wasm/test-module-712dabfa:wasm-function[0]:0x22)
at test/message/wasm-module-and-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-async.out b/deps/v8/test/message/wasm-module-name-async.out
index f2d044245b..d8645e3817 100644
--- a/deps/v8/test/message/wasm-module-name-async.out
+++ b/deps/v8/test/message/wasm-module-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (<anonymous>:wasm-function[0]:0x22)
+ at test-module.main (wasm://wasm/test-module-712dabfa:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-streaming.out b/deps/v8/test/message/wasm-module-name-streaming.out
index e7435267e9..aae29270b2 100644
--- a/deps/v8/test/message/wasm-module-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (<anonymous>:wasm-function[0]:0x22)
+ at test-module.main (wasm://wasm/test-module-712dabfa:wasm-function[0]:0x22)
at test/message/wasm-module-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-async.out b/deps/v8/test/message/wasm-no-name-async.out
index b35991d7fa..1bcdcfa082 100644
--- a/deps/v8/test/message/wasm-no-name-async.out
+++ b/deps/v8/test/message/wasm-no-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at <anonymous>:wasm-function[0]:0x22
+ at wasm://wasm/44ee3bce:wasm-function[0]:0x22
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-streaming.out b/deps/v8/test/message/wasm-no-name-streaming.out
index 182d3c552b..759174fefc 100644
--- a/deps/v8/test/message/wasm-no-name-streaming.out
+++ b/deps/v8/test/message/wasm-no-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at <anonymous>:wasm-function[0]:0x22
+ at wasm://wasm/44ee3bce:wasm-function[0]:0x22
at test/message/wasm-no-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/mjsunit/baseline/batch-compilation.js b/deps/v8/test/mjsunit/baseline/batch-compilation.js
index 827d6dcc81..279a24f4f0 100644
--- a/deps/v8/test/mjsunit/baseline/batch-compilation.js
+++ b/deps/v8/test/mjsunit/baseline/batch-compilation.js
@@ -4,8 +4,8 @@
// Flags: --sparkplug --no-always-sparkplug --sparkplug-filter="test*"
// Flags: --allow-natives-syntax --expose-gc --no-always-opt
-// Flags: --baseline-batch-compilation --baseline-batch-compilation-threshold=200
-// Flags: --scale-factor-for-feedback-allocation=4
+// Flags: --baseline-batch-compilation --baseline-batch-compilation-threshold=500
+// Flags: --scale-factor-for-feedback-allocation=2
// Flags to drive Fuzzers into the right direction
// TODO(v8:11853): Remove these flags once fuzzers handle flag implications
diff --git a/deps/v8/test/mjsunit/baseline/flush-baseline-code.js b/deps/v8/test/mjsunit/baseline/flush-baseline-code.js
new file mode 100644
index 0000000000..8599fcdadf
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/flush-baseline-code.js
@@ -0,0 +1,83 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stress-flush-code --allow-natives-syntax
+// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
+// Flags: --no-always-sparkplug --lazy-feedback-allocation
+// Flags: --flush-baseline-code --flush-bytecode
+
+function HasBaselineCode(f) {
+ let opt_status = %GetOptimizationStatus(f);
+ return (opt_status & V8OptimizationStatus.kBaseline) !== 0;
+}
+
+function HasByteCode(f) {
+ let opt_status = %GetOptimizationStatus(f);
+ return (opt_status & V8OptimizationStatus.kInterpreted) !== 0;
+}
+
+var x = {b:20, c:30};
+function f() {
+ return x.b + 10;
+}
+
+// Test bytecode gets flushed
+f();
+assertTrue(HasByteCode(f));
+gc();
+assertFalse(HasByteCode(f));
+
+// Test baseline code and bytecode gets flushed
+for (i = 1; i < 50; i++) {
+ f();
+}
+assertTrue(HasBaselineCode(f));
+gc();
+assertFalse(HasBaselineCode(f));
+assertFalse(HasByteCode(f));
+
+// Check bytecode isn't flushed if it's held strongly from somewhere but
+// baseline code is flushed.
+function f1(should_recurse) {
+ if (should_recurse) {
+ assertTrue(HasByteCode(f1));
+ for (i = 1; i < 50; i++) {
+ f1(false);
+ }
+ assertTrue(HasBaselineCode(f1));
+ gc();
+ assertFalse(HasBaselineCode(f1));
+ assertTrue(HasByteCode(f1));
+ }
+ return x.b + 10;
+}
+
+f1(false);
+// Recurse first time so we have bytecode array on the stack that keeps
+// bytecode alive.
+f1(true);
+
+// Flush bytecode
+gc();
+assertFalse(HasBaselineCode(f1));
+assertFalse(HasByteCode(f1));
+
+// Check baseline code and bytecode aren't flushed if baseline code is on
+// stack.
+function f2(should_recurse) {
+ if (should_recurse) {
+ assertTrue(HasBaselineCode(f2));
+ f2(false);
+ gc();
+ assertTrue(HasBaselineCode(f2));
+ }
+ return x.b + 10;
+}
+
+for (i = 1; i < 50; i++) {
+ f2(false);
+}
+assertTrue(HasBaselineCode(f2));
+// Recurse with baseline code on stack
+f2(true);
diff --git a/deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js b/deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js
new file mode 100644
index 0000000000..4b4dde93c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js
@@ -0,0 +1,57 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stress-flush-code --allow-natives-syntax
+// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
+// Flags: --no-always-sparkplug --lazy-feedback-allocation
+// Flags: --flush-baseline-code --no-flush-bytecode
+
+function HasBaselineCode(f) {
+ let opt_status = %GetOptimizationStatus(f);
+ return (opt_status & V8OptimizationStatus.kBaseline) !== 0;
+}
+
+function HasByteCode(f) {
+ let opt_status = %GetOptimizationStatus(f);
+ return (opt_status & V8OptimizationStatus.kInterpreted) !== 0;
+}
+
+var x = {b:20, c:30};
+function f() {
+ return x.b + 10;
+}
+
+// Test bytecode gets flushed
+f();
+assertTrue(HasByteCode(f));
+gc();
+assertTrue(HasByteCode(f));
+
+// Test baseline code gets flushed but not bytecode.
+for (i = 1; i < 50; i++) {
+ f();
+}
+assertTrue(HasBaselineCode(f));
+gc();
+assertFalse(HasBaselineCode(f));
+assertTrue(HasByteCode(f));
+
+// Check baseline code and bytecode aren't flushed if baseline code is on
+// stack.
+function f2(should_recurse) {
+ if (should_recurse) {
+ assertTrue(HasBaselineCode(f2));
+ f2(false);
+ gc();
+ assertTrue(HasBaselineCode(f2));
+ }
+ return x.b + 10;
+}
+
+for (i = 1; i < 50; i++) {
+ f2(false);
+}
+assertTrue(HasBaselineCode(f2));
+// Recurse with baseline code on stack
+f2(true);
diff --git a/deps/v8/test/mjsunit/check-bounds-array-index.js b/deps/v8/test/mjsunit/check-bounds-array-index.js
index 8367a4c81c..ce2396ff08 100644
--- a/deps/v8/test/mjsunit/check-bounds-array-index.js
+++ b/deps/v8/test/mjsunit/check-bounds-array-index.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --opt --no-always-opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
let arr = [1, 2, 3];
diff --git a/deps/v8/test/mjsunit/check-bounds-string-from-char-code-at.js b/deps/v8/test/mjsunit/check-bounds-string-from-char-code-at.js
index 210edf944e..024a40a6c6 100644
--- a/deps/v8/test/mjsunit/check-bounds-string-from-char-code-at.js
+++ b/deps/v8/test/mjsunit/check-bounds-string-from-char-code-at.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --opt --no-always-opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
let string = "foobar";
diff --git a/deps/v8/test/mjsunit/code-coverage-ad-hoc.js b/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
index b6ae4620ea..e74d61fe78 100644
--- a/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
+++ b/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-code
// Files: test/mjsunit/code-coverage-utils.js
// Test code coverage without explicitly activating it upfront.
diff --git a/deps/v8/test/mjsunit/code-coverage-block-async.js b/deps/v8/test/mjsunit/code-coverage-block-async.js
index 2876816c95..58a2a2fb20 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-async.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-async.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-code
// Flags: --no-stress-incremental-marking
// Files: test/mjsunit/code-coverage-utils.js
diff --git a/deps/v8/test/mjsunit/code-coverage-block-noopt.js b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
index 301a05ad5d..069798439b 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-noopt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-code
// Flags: --no-opt
// Files: test/mjsunit/code-coverage-utils.js
diff --git a/deps/v8/test/mjsunit/code-coverage-block-opt.js b/deps/v8/test/mjsunit/code-coverage-block-opt.js
index 7c87dd4113..76bfb7f206 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-opt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-opt.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --opt
-// Flags: --no-stress-flush-bytecode --turbo-inlining
+// Flags: --no-stress-flush-code --turbo-inlining
// Files: test/mjsunit/code-coverage-utils.js
if (isNeverOptimizeLiteMode()) {
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index e9d38d7146..8640e4e7f1 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-code
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
diff --git a/deps/v8/test/mjsunit/code-coverage-class-fields.js b/deps/v8/test/mjsunit/code-coverage-class-fields.js
index 15b5478fca..cb0aeeaf6e 100644
--- a/deps/v8/test/mjsunit/code-coverage-class-fields.js
+++ b/deps/v8/test/mjsunit/code-coverage-class-fields.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --harmony-public-fields
-// Flags: --harmony-static-fields --no-stress-flush-bytecode
+// Flags: --harmony-static-fields --no-stress-flush-code
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
diff --git a/deps/v8/test/mjsunit/code-coverage-precise.js b/deps/v8/test/mjsunit/code-coverage-precise.js
index 3c70408174..4d8dcbb8f3 100644
--- a/deps/v8/test/mjsunit/code-coverage-precise.js
+++ b/deps/v8/test/mjsunit/code-coverage-precise.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-code
// Flags: --no-stress-incremental-marking
// Files: test/mjsunit/code-coverage-utils.js
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
index 1388de7c10..f230dad80a 100644
--- a/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
@@ -14,7 +14,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertFalse(foo());
})();
@@ -28,7 +28,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertFalse(foo());
})();
@@ -42,7 +42,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertFalse(foo());
})();
@@ -56,7 +56,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertFalse(foo());
})();
@@ -70,7 +70,7 @@
%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertFalse(foo());
})();
@@ -86,7 +86,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(foo(b));
assertFalse(foo(a));
@@ -107,7 +107,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(foo(b));
assertFalse(foo(a));
@@ -128,7 +128,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(foo(b));
assertFalse(foo(a));
@@ -149,7 +149,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(null));
@@ -173,7 +173,7 @@
assertFalse(foo(a));
assertTrue(foo(b));
assertFalse(foo(a));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(null));
diff --git a/deps/v8/test/mjsunit/compiler/array-slice-clone.js b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
index c6294b85b5..a693afb156 100644
--- a/deps/v8/test/mjsunit/compiler/array-slice-clone.js
+++ b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --opt --no-stress-flush-code
// Test CloneFastJSArray inserted by JSCallReducer for Array.prototype.slice.
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js
index 5afef40f44..d9513aa23e 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-2.js
@@ -34,7 +34,7 @@
assertTrue(sum_js_got_interpreted);
// The protector should be invalidated, which prevents inlining.
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals('AxB', foo('A', 'B'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js
index 5c2f0ebf70..9a0e857565 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-3.js
@@ -34,7 +34,7 @@
assertEquals('AundefinedB', foo('A', 'B'));
assertTrue(sum_js_got_interpreted);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals('AundefinedB', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
assertOptimized(foo);
@@ -45,7 +45,7 @@
// Now the call will not be inlined.
%PrepareFunctionForOptimization(foo);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals('AxB', foo('A', 'B'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
index 092118d73e..49da8832f0 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
@@ -38,7 +38,7 @@
assertTrue(sum_js_got_interpreted);
// Compile function foo; inlines 'sum_js' into 'foo'.
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(78, foo(26, 6, 46, null));
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js
index e74295c361..805e86dd30 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-5.js
@@ -45,8 +45,8 @@
assertTrue(log_got_interpreted);
// Compile foo.
- %OptimizeFunctionOnNextCall(log);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(log);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(42, foo());
// The call with spread should not have been inlined, because of the
// generator/iterator.
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js
index 61884b4b28..5e3456fea4 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-6.js
@@ -45,8 +45,8 @@
assertTrue(log_got_interpreted);
// Compile foo.
- %OptimizeFunctionOnNextCall(log);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(log);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(42, foo());
// The call with spread should not have been inlined, because of the
// generator/iterator.
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
index 147ba5bddb..96e50dd906 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
@@ -37,8 +37,8 @@
assertTrue(log_got_interpreted);
// Compile foo.
- %OptimizeFunctionOnNextCall(log);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(log);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
// The call with spread should have been inlined.
assertFalse(log_got_interpreted);
@@ -58,7 +58,7 @@
// Recompile 'foo'.
%PrepareFunctionForOptimization(foo);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(42, foo());
// The call with spread will not be inlined because we have redefined the
// array iterator.
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js
index 372115b774..b077131bc7 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread.js
@@ -30,13 +30,31 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertOptimized(foo);
assertFalse(sum_js_got_interpreted);
})();
+// Test using receiver
+(function () {
+ function bar() {
+ return this.gaga;
+ }
+ function foo(receiver) {
+ return bar.apply(receiver, [""]);
+ }
+
+ %PrepareFunctionForOptimization(bar);
+ %PrepareFunctionForOptimization(foo);
+ var receiver = { gaga: 42 };
+ assertEquals(42, foo(receiver));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(42, foo(receiver));
+ assertOptimized(foo);
+})();
+
// Test with holey array.
(function () {
"use strict";
@@ -52,7 +70,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('AundefinedB', foo('A', 'B'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('AundefinedB', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
@@ -74,7 +92,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals(45.31, foo(16.11, 26.06));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
// This is expected to deoptimize
@@ -85,7 +103,7 @@
// Optimize again
%PrepareFunctionForOptimization(foo);
assertEquals(45.31, foo(16.11, 26.06));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
// This should stay optimized, but with the call not inlined.
@@ -112,7 +130,7 @@
%PrepareFunctionForOptimization(foo);
// Here array size changes.
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
// Here it should deoptimize.
assertEquals('abc', foo('a', 'b', 'c'));
@@ -120,7 +138,7 @@
assertTrue(sum_js_got_interpreted);
// Now speculation mode prevents the optimization.
%PrepareFunctionForOptimization(foo);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals('abc', foo('a', 'b', 'c'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
@@ -141,7 +159,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals(56.34, foo(11.03, 16.11, 26.06));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals(56.34, foo(11.03, 16.11, 26.06));
assertFalse(sum_js_got_interpreted);
@@ -163,7 +181,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(42, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(got_interpreted);
assertEquals(42, foo());
assertFalse(got_interpreted);
@@ -190,7 +208,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(44, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(got_interpreted);
assertEquals(44, foo());
assertTrue(got_interpreted);
@@ -217,7 +235,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -239,7 +257,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(42, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(got_interpreted);
assertEquals(42, foo());
assertFalse(got_interpreted);
@@ -266,7 +284,7 @@
%PrepareFunctionForOptimization(fortytwo);
%PrepareFunctionForOptimization(foo);
assertEquals(44, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(got_interpreted);
assertEquals(44, foo());
assertTrue(got_interpreted);
@@ -294,7 +312,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -317,7 +335,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -343,7 +361,7 @@
%PrepareFunctionForOptimization(max);
%PrepareFunctionForOptimization(foo);
assertEquals(5, foo(1, 2, 3));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(max_got_interpreted);
assertEquals(5, foo(1, 2, 3));
assertTrue(max_got_interpreted);
@@ -372,7 +390,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abccba', foo('a', 'b', 'c'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abccba', foo('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
@@ -399,7 +417,7 @@
len = 0;
%PrepareFunctionForOptimization(foo);
assertEquals(3, foo(1, 2, 3));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(3, foo(1, 2, 3));
assertOptimized(foo);
// Deoptimize when input of Math.max is not number
@@ -410,7 +428,7 @@
len = 2;
%PrepareFunctionForOptimization(foo1);
assertEquals(3, foo1(1, 2, 3));
- %OptimizeFunctionOnNextCall(foo1);
+ %OptimizeFunctionForTopTier(foo1);
assertEquals(3, foo1(1, 2, 3));
//Deoptimize when array length changes
assertUnoptimized(foo1);
@@ -436,7 +454,7 @@
len = 0;
%PrepareFunctionForOptimization(foo);
assertEquals(2, foo(1, 2, 3));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(2, foo(1, 2, 3));
assertOptimized(foo);
// Deoptimzie when input of Math.max is not number
@@ -447,7 +465,7 @@
len = 2;
%PrepareFunctionForOptimization(foo1);
assertEquals(3, foo1(1, 2, 3));
- %OptimizeFunctionOnNextCall(foo1);
+ %OptimizeFunctionForTopTier(foo1);
assertEquals(3, foo1(1, 2, 3));
assertOptimized(foo1);
// No Deoptimization when array length changes
@@ -475,8 +493,8 @@
%PrepareFunctionForOptimization(foo_closure);
%PrepareFunctionForOptimization(foo);
assertEquals('abc', foo('a', 'b', 'c'));
- %OptimizeFunctionOnNextCall(foo_closure);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo_closure);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_got_interpreted);
assertEquals('abc', foo('a', 'b', 'c'));
assertFalse(sum_got_interpreted);
@@ -501,7 +519,7 @@
assertEquals(166, foo(40, 42, 44));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(166, foo(40, 42, 44));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -525,7 +543,7 @@
assertEquals(166, foo(40, 42, 44));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(166, foo(40, 42, 44));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -548,7 +566,7 @@
assertEquals('42abc', foo('a', 'b', 'c'));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals('42abc', foo('a', 'b', 'c'));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -571,7 +589,7 @@
assertEquals('45abc', foo('a', 'b', 'c'));
assertTrue(sum_got_interpreted);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals('45abc', foo('a', 'b', 'c'));
assertFalse(sum_got_interpreted);
assertOptimized(foo);
@@ -592,7 +610,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('ABundefined3', foo('A', 'B'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('ABundefined3', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
@@ -614,7 +632,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
assertFalse(sum_js_got_interpreted);
@@ -637,7 +655,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('ABundefined3', foo('A', 'B'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('ABundefined3', foo('A', 'B'));
assertFalse(sum_js_got_interpreted);
@@ -660,7 +678,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abc6', foo('a', 'b', 'c', 'd', 'e'));
assertFalse(sum_js_got_interpreted);
@@ -682,7 +700,7 @@
%PrepareFunctionForOptimization(sum_js);
%PrepareFunctionForOptimization(foo);
assertEquals('abcde', foo('a', 'b', 'c', 'd', 'e'));
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertTrue(sum_js_got_interpreted);
assertEquals('abcde', foo('a', 'b', 'c', 'd', 'e'));
assertFalse(sum_js_got_interpreted);
@@ -707,7 +725,7 @@
assertTrue(sum_js_got_interpreted);
// The call is not inlined with CreateArguments.
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals('abc', foo('a', 'b', 'c'));
assertTrue(sum_js_got_interpreted);
assertOptimized(foo);
@@ -735,7 +753,7 @@
assertTrue(sum_js_got_interpreted);
// Optimization also works if the call is in an inlined function.
- %OptimizeFunctionOnNextCall(bar);
+ %OptimizeFunctionForTopTier(bar);
assertEquals('cba', bar('a', 'b', 'c'));
assertFalse(sum_js_got_interpreted);
assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/compiler/catch-block-load.js b/deps/v8/test/mjsunit/compiler/catch-block-load.js
index c753b2aaa0..573195d44e 100644
--- a/deps/v8/test/mjsunit/compiler/catch-block-load.js
+++ b/deps/v8/test/mjsunit/compiler/catch-block-load.js
@@ -31,7 +31,7 @@ function boom() {
foo();
foo();
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
foo();
})();
@@ -62,6 +62,6 @@ function boom() {
foo();
foo();
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
foo();
})();
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js b/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
index 9cbdbc863f..b5cdafd26f 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-inlining-1.js
@@ -22,5 +22,5 @@ function foo() { return %TurbofanStaticAssert(bar(global)); }
bar({gaga() {}});
foo();
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js b/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
index e3e63d195c..4c35303986 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-inlining-2.js
@@ -22,5 +22,5 @@ function foo(obj) { obj.gaga; %TurbofanStaticAssert(bar(obj)); }
bar({gaga() {}});
foo(global);
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(global);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 9e482d2a8d..941944119b 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -25,9 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
-// Flags: --no-always-opt --no-turbo-concurrent-get-property-access-info
+// Flags: --allow-natives-syntax --concurrent-recompilation --no-always-opt
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -51,25 +49,25 @@ var obj1 = new_object();
var obj2 = new_object();
add_field(obj1);
add_field(obj2);
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(add_field, "concurrent");
var o = new_object();
// Kick off recompilation.
add_field(o);
// Invalidate transition map after compile graph has been created.
+%WaitForBackgroundOptimization();
o.c = 2.2;
-// In the mean time, concurrent recompiling is still blocked.
assertUnoptimized(add_field, "no sync");
-// Let concurrent recompilation proceed.
-%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization that bailed out.
+%FinalizeOptimization();
if (!%IsDictPropertyConstTrackingEnabled()) {
// TODO(v8:11457) Currently, we cannot inline property stores if there is a
// dictionary mode prototype on the prototype chain. Therefore, if
// v8_dict_property_const_tracking is enabled, the optimized code only
// contains a call to the IC handler and doesn't get invalidated when the
// transition map changes.
- assertUnoptimized(add_field, "sync");
+ assertUnoptimized(add_field);
}
// Clear type info for stress runs.
%ClearFunctionFeedback(add_field);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
index 6ee45ef342..8de72c0cd9 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
@@ -25,9 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --no-always-opt
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
-// Flags: --no-always-opt --no-turbo-concurrent-get-property-access-info
+// Flags: --allow-natives-syntax --no-always-opt --concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -44,18 +42,17 @@ o.__proto__ = { __proto__: { bar: function() { return 1; } } };
assertEquals(1, f(o));
assertEquals(1, f(o));
-// Mark for concurrent optimization.
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(f, "concurrent");
// Kick off recompilation.
assertEquals(1, f(o));
// Change the prototype chain after compile graph has been created.
+%WaitForBackgroundOptimization();
o.__proto__.__proto__ = { bar: function() { return 2; } };
-// At this point, concurrent recompilation thread has not yet done its job.
assertUnoptimized(f, "no sync");
-// Let the background thread proceed.
-%UnblockConcurrentRecompilation();
-// Optimization eventually bails out due to map dependency.
-assertUnoptimized(f, "sync");
+%FinalizeOptimization();
+// Optimization failed due to map dependency.
+assertUnoptimized(f);
assertEquals(2, f(o));
-//Clear type info for stress runs.
+// Clear type info for stress runs.
%ClearFunctionFeedback(f);
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
index e824cabda6..875f9be756 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
@@ -12,6 +12,6 @@ function foo(x) {
%PrepareFunctionForOptimization(foo);
foo(121);
foo(122);
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(123);
})();
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
index 0f8891769b..15c5aab560 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
@@ -12,7 +12,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a.length = 1;
@@ -28,7 +28,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -44,7 +44,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -58,7 +58,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -72,7 +72,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -87,7 +87,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -101,7 +101,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
@@ -115,7 +115,7 @@
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertEquals(1, foo());
assertOptimized(foo);
a[0] = 42;
diff --git a/deps/v8/test/mjsunit/compiler/construct-bound-function.js b/deps/v8/test/mjsunit/compiler/construct-bound-function.js
index 94abd80cfb..6f7f5696ff 100644
--- a/deps/v8/test/mjsunit/compiler/construct-bound-function.js
+++ b/deps/v8/test/mjsunit/compiler/construct-bound-function.js
@@ -30,5 +30,5 @@ bar({aaaa:1});
bar({aaaaa:1});
foo();
foo();
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/construct-object.js b/deps/v8/test/mjsunit/compiler/construct-object.js
index f074781bfc..5d3b8a7952 100644
--- a/deps/v8/test/mjsunit/compiler/construct-object.js
+++ b/deps/v8/test/mjsunit/compiler/construct-object.js
@@ -26,5 +26,5 @@ bar({aaaa:1});
bar({aaaaa:1});
foo();
foo();
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/construct-receiver.js b/deps/v8/test/mjsunit/compiler/construct-receiver.js
index e030745a25..be937748fd 100644
--- a/deps/v8/test/mjsunit/compiler/construct-receiver.js
+++ b/deps/v8/test/mjsunit/compiler/construct-receiver.js
@@ -25,5 +25,5 @@ new class extends C { constructor() { super(); this.c = 1 } }
new class extends C { constructor() { super(); this.d = 1 } }
foo();
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-deopt.js b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
index 3868fbbd2c..cd7a8ca9da 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --no-always-opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --opt --no-always-opt --no-stress-flush-code
// Check that there are no deopt loops for DataView methods.
diff --git a/deps/v8/test/mjsunit/compiler/dataview-detached.js b/deps/v8/test/mjsunit/compiler/dataview-detached.js
index b5fe3102c2..1ca51b2497 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-detached.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-detached.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --noalways-opt --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --opt --noalways-opt --no-stress-flush-code
// Invalidate the detaching protector.
%ArrayBufferDetach(new ArrayBuffer(1));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js b/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
index 3f32293d3b..eec17503d6 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --no-flush-bytecode --no-stress-flush-bytecode
+// Flags: --allow-natives-syntax --opt --no-flush-bytecode --no-stress-flush-code
/* Test MapCheck behavior */
diff --git a/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js b/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
index e69a1cbeda..9772291280 100644
--- a/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
+++ b/deps/v8/test/mjsunit/compiler/diamond-followedby-branch.js
@@ -18,5 +18,5 @@ function foo(cond, v1, v2) {
%PrepareFunctionForOptimization(foo);
foo(1, 10, 20); foo(2, 30, 40);
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(1, 10, 20); foo(2, 30, 40);
diff --git a/deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js b/deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js
index 1cfc0cfe10..e139af2415 100644
--- a/deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js
+++ b/deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --opt --allow-natives-syntax --expose-gc --flush-bytecode
-// Flags: --stress-flush-bytecode
+// Flags: --stress-flush-code --flush-bytecode
function foo(a) {}
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-calls.js b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
index c6dab813cf..bdda760c19 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-calls.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
@@ -163,7 +163,7 @@ assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45));
assertOptimized(add_32bit_int_mismatch);
// Test that passing too few argument falls down the slow path,
-// because it's an argument type mismatch (undefined vs. int).
+// because one of the arguments is undefined.
fast_c_api.reset_counts();
assertEquals(-42, add_32bit_int_mismatch(false, -42));
assertUnoptimized(add_32bit_int_mismatch);
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-helpers.js b/deps/v8/test/mjsunit/compiler/fast-api-helpers.js
new file mode 100644
index 0000000000..587cfbc539
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/fast-api-helpers.js
@@ -0,0 +1,36 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+
+// Helper for sequence tests.
+function optimize_and_check(func, fast_count, slow_count, expected) {
+ %PrepareFunctionForOptimization(func);
+ let result = func();
+ assertEqualsDelta(expected, result, 0.001);
+
+ fast_c_api.reset_counts();
+ %OptimizeFunctionOnNextCall(func);
+ result = func();
+ assertEqualsDelta(expected, result, 0.001);
+ assertOptimized(func);
+ assertEquals(fast_count, fast_c_api.fast_call_count());
+ assertEquals(slow_count, fast_c_api.slow_call_count());
+}
+
+function ExpectFastCall(func, expected) {
+ optimize_and_check(func, 1, 0, expected);
+}
+
+function ExpectSlowCall(func, expected) {
+ optimize_and_check(func, 0, 1, expected);
+}
+
+function assert_throws_and_optimized(func, arg) {
+ fast_c_api.reset_counts();
+ assertThrows(() => func(arg));
+ assertOptimized(func);
+ assertEquals(0, fast_c_api.fast_call_count());
+ assertEquals(1, fast_c_api.slow_call_count());
+}
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js b/deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js
new file mode 100644
index 0000000000..7bc8db4ec7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js
@@ -0,0 +1,55 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file adds x64 specific tests to the ones in fast-api-sequence.js.
+
+// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// --always-opt is disabled because we rely on particular feedback for
+// optimizing to the fastest path.
+// Flags: --no-always-opt
+// The test relies on optimizing/deoptimizing at predictable moments, so
+// it's not suitable for deoptimization fuzzing.
+// Flags: --deopt-every-n-times=0
+
+d8.file.execute('test/mjsunit/compiler/fast-api-helpers.js');
+
+const fast_c_api = new d8.test.FastCAPI();
+
+assertTrue(fast_c_api.supports_fp_params);
+
+(function () {
+ const max_safe_float = 2 ** 24 - 1;
+ const add_all_result = -42 + 45 +
+ Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
+ max_safe_float * 0.5 + Math.PI;
+
+ function add_all_sequence() {
+ const arr = [-42, 45,
+ Number.MIN_SAFE_INTEGER, Number.MAX_SAFE_INTEGER,
+ max_safe_float * 0.5, Math.PI];
+ return fast_c_api.add_all_sequence(false /* should_fallback */, arr);
+ }
+ ExpectFastCall(add_all_sequence, add_all_result);
+})();
+
+const max_safe_as_bigint = BigInt(Number.MAX_SAFE_INTEGER);
+(function () {
+ function int64_test(should_fallback = false) {
+ let typed_array = new BigInt64Array([-42n, 1n, max_safe_as_bigint]);
+ return fast_c_api.add_all_int64_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ const expected = Number(BigInt.asIntN(64, -42n + 1n + max_safe_as_bigint));
+ ExpectFastCall(int64_test, expected);
+})();
+
+(function () {
+ function uint64_test(should_fallback = false) {
+ let typed_array = new BigUint64Array([max_safe_as_bigint, 1n, 2n]);
+ return fast_c_api.add_all_uint64_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ const expected = Number(BigInt.asUintN(64, max_safe_as_bigint + 1n + 2n));
+ ExpectFastCall(uint64_test, expected);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-sequences.js b/deps/v8/test/mjsunit/compiler/fast-api-sequences.js
index 8d960de1b1..f37c68cb5e 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-sequences.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-sequences.js
@@ -12,89 +12,38 @@
// it's not suitable for deoptimization fuzzing.
// Flags: --deopt-every-n-times=0
+d8.file.execute('test/mjsunit/compiler/fast-api-helpers.js');
+
const fast_c_api = new d8.test.FastCAPI();
// ----------- add_all_sequence -----------
// `add_all_sequence` has the following signature:
// double add_all_sequence(bool /*should_fallback*/, Local<Array>)
-const max_safe_float = 2**24 - 1;
-const add_all_result_full = -42 + 45 +
- Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
- max_safe_float * 0.5 + Math.PI;
-const full_array = [-42, 45,
- Number.MIN_SAFE_INTEGER, Number.MAX_SAFE_INTEGER,
- max_safe_float * 0.5, Math.PI];
-
-function add_all_sequence_smi(arg) {
- return fast_c_api.add_all_sequence(false /* should_fallback */, arg);
-}
-
-%PrepareFunctionForOptimization(add_all_sequence_smi);
-assertEquals(3, add_all_sequence_smi([-42, 45]));
-%OptimizeFunctionOnNextCall(add_all_sequence_smi);
-
-function add_all_sequence_full(arg) {
- return fast_c_api.add_all_sequence(false /* should_fallback */, arg);
-}
-
-%PrepareFunctionForOptimization(add_all_sequence_full);
-if (fast_c_api.supports_fp_params) {
- assertEquals(add_all_result_full, add_all_sequence_full(full_array));
-} else {
- assertEquals(3, add_all_sequence_smi([-42, 45]));
-}
-%OptimizeFunctionOnNextCall(add_all_sequence_full);
-
-if (fast_c_api.supports_fp_params) {
- // Test that regular call hits the fast path.
- fast_c_api.reset_counts();
- assertEquals(add_all_result_full, add_all_sequence_full(full_array));
- assertOptimized(add_all_sequence_full);
- assertEquals(1, fast_c_api.fast_call_count());
- assertEquals(0, fast_c_api.slow_call_count());
-} else {
- // Smi only test - regular call hits the fast path.
- fast_c_api.reset_counts();
- assertEquals(3, add_all_sequence_smi([-42, 45]));
- assertOptimized(add_all_sequence_smi);
- assertEquals(1, fast_c_api.fast_call_count());
- assertEquals(0, fast_c_api.slow_call_count());
-}
-
-function add_all_sequence_mismatch(arg) {
- return fast_c_api.add_all_sequence(false /*should_fallback*/, arg);
-}
-
-%PrepareFunctionForOptimization(add_all_sequence_mismatch);
-assertThrows(() => add_all_sequence_mismatch());
-%OptimizeFunctionOnNextCall(add_all_sequence_mismatch);
-
-// Test that passing non-array arguments falls down the slow path.
-fast_c_api.reset_counts();
-assertThrows(() => add_all_sequence_mismatch(42));
-assertOptimized(add_all_sequence_mismatch);
-assertEquals(0, fast_c_api.fast_call_count());
-assertEquals(1, fast_c_api.slow_call_count());
-
-fast_c_api.reset_counts();
-assertThrows(() => add_all_sequence_mismatch({}));
-assertOptimized(add_all_sequence_mismatch);
-assertEquals(0, fast_c_api.fast_call_count());
-assertEquals(1, fast_c_api.slow_call_count());
+// Smi only test - regular call hits the fast path.
+(function () {
+ function add_all_sequence() {
+ const arr = [-42, 45];
+ return fast_c_api.add_all_sequence(false /* should_fallback */, arr);
+ }
+ ExpectFastCall(add_all_sequence, 3);
+})();
-fast_c_api.reset_counts();
-assertThrows(() => add_all_sequence_mismatch('string'));
-assertOptimized(add_all_sequence_mismatch);
-assertEquals(0, fast_c_api.fast_call_count());
-assertEquals(1, fast_c_api.slow_call_count());
+(function () {
+ function add_all_sequence_mismatch(arg) {
+ return fast_c_api.add_all_sequence(false /*should_fallback*/, arg);
+ }
-fast_c_api.reset_counts();
-assertThrows(() => add_all_sequence_mismatch(Symbol()));
-assertOptimized(add_all_sequence_mismatch);
-assertEquals(0, fast_c_api.fast_call_count());
-assertEquals(1, fast_c_api.slow_call_count());
+ %PrepareFunctionForOptimization(add_all_sequence_mismatch);
+ add_all_sequence_mismatch();
+ %OptimizeFunctionOnNextCall(add_all_sequence_mismatch);
+ // Test that passing non-array arguments falls down the slow path.
+ assert_throws_and_optimized(add_all_sequence_mismatch, 42);
+ assert_throws_and_optimized(add_all_sequence_mismatch, {});
+ assert_throws_and_optimized(add_all_sequence_mismatch, 'string');
+ assert_throws_and_optimized(add_all_sequence_mismatch, Symbol());
+})();
//----------- Test function overloads with same arity. -----------
//Only overloads between JSArray and TypedArray are supported
@@ -102,21 +51,26 @@ assertEquals(1, fast_c_api.slow_call_count());
// Test with TypedArray.
(function () {
function overloaded_test(should_fallback = false) {
- let typed_array = new Uint32Array([1, 2, 3]);
+ let typed_array = new Uint32Array([1,2,3]);
return fast_c_api.add_all_overload(false /* should_fallback */,
typed_array);
}
+ ExpectFastCall(overloaded_test, 6);
+})();
- %PrepareFunctionForOptimization(overloaded_test);
- let result = overloaded_test();
- assertEquals(0, result);
+let large_array = [];
+for (let i = 0; i < 100; i++) {
+ large_array.push(i);
+}
- fast_c_api.reset_counts();
- %OptimizeFunctionOnNextCall(overloaded_test);
- result = overloaded_test();
- assertEquals(0, result);
- assertOptimized(overloaded_test);
- assertEquals(1, fast_c_api.fast_call_count());
+// Non-externalized TypedArray.
+(function () {
+ function overloaded_test(should_fallback = false) {
+ let typed_array = new Uint32Array(large_array);
+ return fast_c_api.add_all_overload(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectFastCall(overloaded_test, 4950);
})();
// Mismatched TypedArray.
@@ -126,17 +80,7 @@ assertEquals(1, fast_c_api.slow_call_count());
return fast_c_api.add_all_overload(false /* should_fallback */,
typed_array);
}
-
- %PrepareFunctionForOptimization(overloaded_test);
- let result = overloaded_test();
- assertEquals(0, result);
-
- fast_c_api.reset_counts();
- %OptimizeFunctionOnNextCall(overloaded_test);
- result = overloaded_test();
- assertEquals(0, result);
- assertOptimized(overloaded_test);
- assertEquals(0, fast_c_api.fast_call_count());
+ ExpectSlowCall(overloaded_test, 6.6);
})();
// Test with JSArray.
@@ -145,17 +89,7 @@ assertEquals(1, fast_c_api.slow_call_count());
let js_array = [26, -6, 42];
return fast_c_api.add_all_overload(false /* should_fallback */, js_array);
}
-
- %PrepareFunctionForOptimization(overloaded_test);
- let result = overloaded_test();
- assertEquals(62, result);
-
- fast_c_api.reset_counts();
- %OptimizeFunctionOnNextCall(overloaded_test);
- result = overloaded_test();
- assertEquals(62, result);
- assertOptimized(overloaded_test);
- assertEquals(1, fast_c_api.fast_call_count());
+ ExpectFastCall(overloaded_test, 62);
})();
// Test function overloads with undefined.
@@ -163,15 +97,7 @@ assertEquals(1, fast_c_api.slow_call_count());
function overloaded_test(should_fallback = false) {
return fast_c_api.add_all_overload(false /* should_fallback */, undefined);
}
-
- %PrepareFunctionForOptimization(overloaded_test);
- assertThrows(() => overloaded_test());
-
- fast_c_api.reset_counts();
- %OptimizeFunctionOnNextCall(overloaded_test);
- assertThrows(() => overloaded_test());
- assertOptimized(overloaded_test);
- assertEquals(0, fast_c_api.fast_call_count());
+ ExpectSlowCall(overloaded_test, 0);
})();
// Test function with invalid overloads.
@@ -197,3 +123,94 @@ assertEquals(1, fast_c_api.slow_call_count());
assertUnoptimized(overloaded_test);
assertEquals(0, fast_c_api.fast_call_count());
})();
+
+//----------- Test different TypedArray functions. -----------
+// ----------- add_all_<TYPE>_typed_array -----------
+// `add_all_<TYPE>_typed_array` have the following signature:
+// double add_all_<TYPE>_typed_array(bool /*should_fallback*/, FastApiTypedArray<TYPE>)
+
+(function () {
+ function int32_test(should_fallback = false) {
+ let typed_array = new Int32Array([-42, 1, 2, 3]);
+ return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectFastCall(int32_test, -36);
+})();
+
+(function () {
+ function uint32_test(should_fallback = false) {
+ let typed_array = new Uint32Array([1, 2, 3]);
+ return fast_c_api.add_all_uint32_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectFastCall(uint32_test, 6);
+})();
+
+(function () {
+ function detached_typed_array_test(should_fallback = false) {
+ let typed_array = new Int32Array([-42, 1, 2, 3]);
+ %ArrayBufferDetach(typed_array.buffer);
+ return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectSlowCall(detached_typed_array_test, 0);
+})();
+
+(function () {
+ function detached_non_ext_typed_array_test(should_fallback = false) {
+ let typed_array = new Int32Array(large_array);
+ %ArrayBufferDetach(typed_array.buffer);
+ return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectSlowCall(detached_non_ext_typed_array_test, 0);
+})();
+
+(function () {
+ function shared_array_buffer_ta_test(should_fallback = false) {
+ let sab = new SharedArrayBuffer(16);
+ let typed_array = new Int32Array(sab);
+ typed_array.set([-42, 1, 2, 3]);
+ return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectSlowCall(shared_array_buffer_ta_test, -36);
+})();
+
+(function () {
+ function shared_array_buffer_ext_ta_test(should_fallback = false) {
+ let sab = new SharedArrayBuffer(400);
+ let typed_array = new Int32Array(sab);
+ typed_array.set(large_array);
+ return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectSlowCall(shared_array_buffer_ext_ta_test, 4950);
+})();
+
+// Empty TypedArray.
+(function () {
+ function int32_test(should_fallback = false) {
+ let typed_array = new Int32Array(0);
+ return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
+ typed_array);
+ }
+ ExpectFastCall(int32_test, 0);
+})();
+
+// Invalid argument types instead of a TypedArray.
+(function () {
+ function invalid_test(arg) {
+ return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
+ arg);
+ }
+ %PrepareFunctionForOptimization(invalid_test);
+ invalid_test(new Int32Array(0));
+ %OptimizeFunctionOnNextCall(invalid_test);
+
+ assert_throws_and_optimized(invalid_test, 42);
+ assert_throws_and_optimized(invalid_test, {});
+ assert_throws_and_optimized(invalid_test, 'string');
+ assert_throws_and_optimized(invalid_test, Symbol());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js b/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
index f56cae9c4e..deb914dc20 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
@@ -16,7 +16,10 @@ function inline_polymorphic(f) {
%PrepareFunctionForOptimization(inline_polymorphic);
inline_polymorphic(make_closure());
inline_polymorphic(make_closure());
-%OptimizeFunctionOnNextCall(inline_polymorphic);
+// Compile using top tier since we need value numbering phase for the
+// TurbofanStaticAssert to deduce answer is 42 at compile time. In Turboprop
+// this phase is disabled.
+%OptimizeFunctionForTopTier(inline_polymorphic);
inline_polymorphic(make_closure());
try {
diff --git a/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js b/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js
index cc93eede86..269a1c184e 100644
--- a/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js
+++ b/deps/v8/test/mjsunit/compiler/is-being-interpreted-1.js
@@ -13,5 +13,5 @@ function foo() { return %IsBeingInterpreted(); }
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionOnNextCall(bar);
+%OptimizeFunctionForTopTier(bar);
assertFalse(bar());
diff --git a/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js b/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js
index 9d996eb94a..534b50871d 100644
--- a/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js
+++ b/deps/v8/test/mjsunit/compiler/is-being-interpreted-2.js
@@ -13,5 +13,5 @@ function foo() { return %IsBeingInterpreted(); }
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionOnNextCall(bar);
+%OptimizeFunctionForTopTier(bar);
assertTrue(bar());
diff --git a/deps/v8/test/mjsunit/compiler/js-create-arguments.js b/deps/v8/test/mjsunit/compiler/js-create-arguments.js
index e37ac06a55..dc2f8911b4 100644
--- a/deps/v8/test/mjsunit/compiler/js-create-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/js-create-arguments.js
@@ -37,5 +37,5 @@ function main() {
main();
main();
-%OptimizeFunctionOnNextCall(main);
+%OptimizeFunctionForTopTier(main);
main();
diff --git a/deps/v8/test/mjsunit/compiler/js-create.js b/deps/v8/test/mjsunit/compiler/js-create.js
index 6ddc1d164c..88eff498e8 100644
--- a/deps/v8/test/mjsunit/compiler/js-create.js
+++ b/deps/v8/test/mjsunit/compiler/js-create.js
@@ -29,5 +29,5 @@ bar({aaaa:1});
bar({aaaaa:1});
foo();
foo();
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
index 0f215c2d80..811dc753cc 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
@@ -30,7 +30,7 @@
%PrepareFunctionForOptimization(lit_const_smi);
lit_const_smi(); lit_const_smi();
- %OptimizeFunctionOnNextCall(lit_const_smi); lit_const_smi();
+ %OptimizeFunctionForTopTier(lit_const_smi); lit_const_smi();
function lit_const_object() {
@@ -46,7 +46,7 @@
%PrepareFunctionForOptimization(lit_const_object);
lit_const_object(); lit_const_object();
- %OptimizeFunctionOnNextCall(lit_const_object); lit_const_object();
+ %OptimizeFunctionForTopTier(lit_const_object); lit_const_object();
function lit_computed_smi(k) {
@@ -62,11 +62,11 @@
%PrepareFunctionForOptimization(lit_computed_smi);
lit_computed_smi(1); lit_computed_smi(2);
- %OptimizeFunctionOnNextCall(lit_computed_smi); lit_computed_smi(3);
+ %OptimizeFunctionForTopTier(lit_computed_smi); lit_computed_smi(3);
// TODO(bmeurer): Fix const tracking for double fields in object literals
// lit_computed_smi(1.1); lit_computed_smi(2.2);
- // %OptimizeFunctionOnNextCall(lit_computed_smi); lit_computed_smi(3.3);
+ // %OptimizeFunctionForTopTier(lit_computed_smi); lit_computed_smi(3.3);
function lit_param_object(k) {
@@ -81,7 +81,7 @@
%PrepareFunctionForOptimization(lit_param_object);
lit_param_object({x: 1}); lit_param_object({x: 2});
- %OptimizeFunctionOnNextCall(lit_param_object); lit_param_object({x: 3});
+ %OptimizeFunctionForTopTier(lit_param_object); lit_param_object({x: 3});
function nested_lit_param(k) {
@@ -96,11 +96,11 @@
%PrepareFunctionForOptimization(nested_lit_param);
nested_lit_param(1); nested_lit_param(2);
- %OptimizeFunctionOnNextCall(nested_lit_param); nested_lit_param(3);
+ %OptimizeFunctionForTopTier(nested_lit_param); nested_lit_param(3);
// TODO(bmeurer): Fix const tracking for double fields in object literals
// nested_lit_param(1.1); nested_lit_param(2.2);
- // %OptimizeFunctionOnNextCall(nested_lit_param); nested_lit_param(3.3);
+ // %OptimizeFunctionForTopTier(nested_lit_param); nested_lit_param(3.3);
function nested_lit_param_object(k) {
@@ -115,7 +115,7 @@
%PrepareFunctionForOptimization(nested_lit_param_object);
nested_lit_param_object({x: 1}); nested_lit_param_object({x: 2});
- %OptimizeFunctionOnNextCall(nested_lit_param_object);
+ %OptimizeFunctionForTopTier(nested_lit_param_object);
nested_lit_param_object({x: 3});
@@ -138,16 +138,16 @@
%PrepareFunctionForOptimization(inst_param);
inst_param(1); inst_param(2);
- %OptimizeFunctionOnNextCall(inst_param); inst_param(3);
+ %OptimizeFunctionForTopTier(inst_param); inst_param(3);
// TODO(gsps): Reenable once we fully support const field information
// tracking in the presence of pointer compression.
// inst_param(1.1); inst_param(2.2);
- // %OptimizeFunctionOnNextCall(inst_param); inst_param(3.3);
+ // %OptimizeFunctionForTopTier(inst_param); inst_param(3.3);
%PrepareFunctionForOptimization(inst_param);
inst_param({x: 1}); inst_param({x: 2});
- %OptimizeFunctionOnNextCall(inst_param); inst_param({x: 3});
+ %OptimizeFunctionForTopTier(inst_param); inst_param({x: 3});
function inst_computed(k) {
@@ -168,9 +168,9 @@
%PrepareFunctionForOptimization(inst_computed);
inst_computed(1); inst_computed(2);
- %OptimizeFunctionOnNextCall(inst_computed); inst_computed(3);
+ %OptimizeFunctionForTopTier(inst_computed); inst_computed(3);
%PrepareFunctionForOptimization(inst_computed);
inst_computed(1.1); inst_computed(2.2);
- %OptimizeFunctionOnNextCall(inst_computed); inst_computed(3.3);
+ %OptimizeFunctionForTopTier(inst_computed); inst_computed(3.3);
})();
diff --git a/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js b/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js
index 380a6ceac2..b8d1e93602 100644
--- a/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js
+++ b/deps/v8/test/mjsunit/compiler/opt-higher-order-functions.js
@@ -22,7 +22,7 @@ function TestFunctionPrototypeApply(x) {
%PrepareFunctionForOptimization(TestFunctionPrototypeApply);
assertEquals(TestFunctionPrototypeApply(-13), 13);
assertEquals(TestFunctionPrototypeApply(42), 42);
-%OptimizeFunctionOnNextCall(TestFunctionPrototypeApply);
+%OptimizeFunctionForTopTier(TestFunctionPrototypeApply);
assertEquals(TestFunctionPrototypeApply(-13), 13);
assertOptimized(TestFunctionPrototypeApply);
TestFunctionPrototypeApply("abc");
@@ -39,7 +39,7 @@ function TestFunctionPrototypeApplyReceiver(func, x, y) {
%PrepareFunctionForOptimization(TestFunctionPrototypeApplyReceiver);
assertEquals(-13, TestFunctionPrototypeApplyReceiver(MathMin, -13, 42));
assertEquals(-4, TestFunctionPrototypeApplyReceiver(MathMin, 3, -4));
-%OptimizeFunctionOnNextCall(TestFunctionPrototypeApplyReceiver);
+%OptimizeFunctionForTopTier(TestFunctionPrototypeApplyReceiver);
assertEquals(7, TestFunctionPrototypeApplyReceiver(MathMin, 7, 9));
assertOptimized(TestFunctionPrototypeApplyReceiver);
TestFunctionPrototypeApplyReceiver(MathMin, "abc");
@@ -60,14 +60,14 @@ assertUnoptimized(TestFunctionPrototypeApplyReceiver);
%PrepareFunctionForOptimization(foo);
%PrepareFunctionForOptimization(test);
assertEquals(-13, test(-13, 42));
- %OptimizeFunctionOnNextCall(test);
+ %OptimizeFunctionForTopTier(test);
assertEquals(-13, test(-13, 42));
assertOptimized(test);
%PrepareFunctionForOptimization(test);
F = Math.max;
assertEquals(42, test(-13, 42));
assertUnoptimized(test);
- %OptimizeFunctionOnNextCall(test);
+ %OptimizeFunctionForTopTier(test);
assertEquals(42, test(-13, 42));
F = Math.min;
assertEquals(-13, test(-13, 42));
@@ -82,7 +82,7 @@ function TestFunctionPrototypeCall(x) {
%PrepareFunctionForOptimization(TestFunctionPrototypeCall);
TestFunctionPrototypeCall(42);
TestFunctionPrototypeCall(52);
-%OptimizeFunctionOnNextCall(TestFunctionPrototypeCall);
+%OptimizeFunctionForTopTier(TestFunctionPrototypeCall);
TestFunctionPrototypeCall(12);
assertOptimized(TestFunctionPrototypeCall);
TestFunctionPrototypeCall("abc");
@@ -97,7 +97,7 @@ function TestArrayForEach(x) {
%PrepareFunctionForOptimization(TestArrayForEach);
TestArrayForEach([1, 3, -4]);
TestArrayForEach([-9, 9, 0]);
-%OptimizeFunctionOnNextCall(TestArrayForEach);
+%OptimizeFunctionForTopTier(TestArrayForEach);
TestArrayForEach([1, 3, -4]);
assertOptimized(TestArrayForEach);
TestArrayForEach(["abc", "xy"]);
@@ -112,7 +112,7 @@ function TestArrayReduce(x) {
%PrepareFunctionForOptimization(TestArrayReduce);
assertEquals(TestArrayReduce([1, 2, -3, 4]), -24);
assertEquals(TestArrayReduce([3, 5, 7]), 105);
-%OptimizeFunctionOnNextCall(TestArrayReduce);
+%OptimizeFunctionForTopTier(TestArrayReduce);
assertEquals(TestArrayReduce([1, 2, -3, 4]), -24);
assertOptimized(TestArrayReduce);
TestArrayReduce(["abc", "xy"]);
@@ -127,7 +127,7 @@ function TestArrayReduceRight(x) {
%PrepareFunctionForOptimization(TestArrayReduceRight);
assertEquals(TestArrayReduceRight([1, 2, -3, 4]), -24);
assertEquals(TestArrayReduceRight([3, 5, 7]), 105);
-%OptimizeFunctionOnNextCall(TestArrayReduceRight);
+%OptimizeFunctionForTopTier(TestArrayReduceRight);
assertEquals(TestArrayReduceRight([1, 2, -3, 4]), -24);
assertOptimized(TestArrayReduceRight);
TestArrayReduceRight(["abc", "xy"]);
@@ -142,7 +142,7 @@ function TestArrayMap(x) {
%PrepareFunctionForOptimization(TestArrayMap);
assertEquals(TestArrayMap([1, -2, -3, 4]), [1, 2, 3, 4]);
assertEquals(TestArrayMap([5, -5, 5, -5]), [5, 5, 5, 5]);
-%OptimizeFunctionOnNextCall(TestArrayMap);
+%OptimizeFunctionForTopTier(TestArrayMap);
assertEquals(TestArrayMap([1, -2, 3, -4]), [1, 2, 3, 4]);
assertOptimized(TestArrayMap);
TestArrayMap(["abc", "xy"]);
@@ -157,7 +157,7 @@ function TestArrayFilter(x) {
%PrepareFunctionForOptimization(TestArrayFilter);
assertEquals(TestArrayFilter([-2, 0, 3, -4]), [-2, 3, -4]);
assertEquals(TestArrayFilter([0, 1, 1, 0]), [1, 1]);
-%OptimizeFunctionOnNextCall(TestArrayFilter);
+%OptimizeFunctionForTopTier(TestArrayFilter);
assertEquals(TestArrayFilter([-2, 0, 3, -4]), [-2, 3, -4]);
assertOptimized(TestArrayFilter);
TestArrayFilter(["abc", "xy"]);
@@ -172,7 +172,7 @@ function TestArrayFind(x) {
%PrepareFunctionForOptimization(TestArrayFind);
assertEquals(TestArrayFind([0, 0, -3, 12]), -3);
assertEquals(TestArrayFind([0, -18]), -18);
-%OptimizeFunctionOnNextCall(TestArrayFind);
+%OptimizeFunctionForTopTier(TestArrayFind);
assertEquals(TestArrayFind([0, 0, -3, 12]), -3);
assertOptimized(TestArrayFind);
TestArrayFind(["", "abc", "xy"]);
@@ -187,7 +187,7 @@ function TestArrayFindIndex(x) {
%PrepareFunctionForOptimization(TestArrayFindIndex);
assertEquals(TestArrayFindIndex([0, 0, -3, 12]), 2);
assertEquals(TestArrayFindIndex([0, -18]), 1);
-%OptimizeFunctionOnNextCall(TestArrayFindIndex);
+%OptimizeFunctionForTopTier(TestArrayFindIndex);
assertEquals(TestArrayFindIndex([0, 0, -3, 12]), 2);
assertOptimized(TestArrayFindIndex);
TestArrayFindIndex(["", "abc", "xy"]);
@@ -202,7 +202,7 @@ function TestArrayEvery(x) {
%PrepareFunctionForOptimization(TestArrayEvery);
assertEquals(TestArrayEvery([3, 0, -9]), false);
assertEquals(TestArrayEvery([2, 12, -1]), true);
-%OptimizeFunctionOnNextCall(TestArrayEvery);
+%OptimizeFunctionForTopTier(TestArrayEvery);
assertEquals(TestArrayEvery([3, 0, -9]), false);
assertOptimized(TestArrayEvery);
TestArrayEvery(["abc", "xy"]);
@@ -217,7 +217,7 @@ function TestArraySome(x) {
%PrepareFunctionForOptimization(TestArraySome);
assertEquals(TestArraySome([3, 0, -9]), true);
assertEquals(TestArraySome([0, 0]), false);
-%OptimizeFunctionOnNextCall(TestArraySome);
+%OptimizeFunctionForTopTier(TestArraySome);
assertEquals(TestArraySome([3, 0, -9]), true);
assertOptimized(TestArraySome);
TestArraySome(["abc", "xy"]);
@@ -233,7 +233,7 @@ function TestJSCallWithJSFunction(x) {
%PrepareFunctionForOptimization(TestJSCallWithJSFunction);
assertEquals(TestJSCallWithJSFunction(-14), 42);
assertEquals(TestJSCallWithJSFunction(14), -42);
-%OptimizeFunctionOnNextCall(TestJSCallWithJSFunction);
+%OptimizeFunctionForTopTier(TestJSCallWithJSFunction);
assertEquals(TestJSCallWithJSFunction(-14), 42);
assertOptimized(TestJSCallWithJSFunction);
TestJSCallWithJSFunction("abc");
@@ -248,7 +248,7 @@ function TestJSCallWithJSBoundFunction(x) {
%PrepareFunctionForOptimization(TestJSCallWithJSBoundFunction);
assertEquals(TestJSCallWithJSBoundFunction(-14), 42);
assertEquals(TestJSCallWithJSBoundFunction(14), -42);
-%OptimizeFunctionOnNextCall(TestJSCallWithJSBoundFunction);
+%OptimizeFunctionForTopTier(TestJSCallWithJSBoundFunction);
assertEquals(TestJSCallWithJSBoundFunction(-14), 42);
assertOptimized(TestJSCallWithJSBoundFunction);
TestJSCallWithJSBoundFunction("abc");
@@ -268,7 +268,7 @@ function TestReflectApply(x) {
%PrepareFunctionForOptimization(TestReflectApply);
assertEquals(TestReflectApply(-9), 9);
assertEquals(TestReflectApply(7), 7);
-%OptimizeFunctionOnNextCall(TestReflectApply);
+%OptimizeFunctionForTopTier(TestReflectApply);
assertEquals(TestReflectApply(-9), 9);
assertOptimized(TestReflectApply);
TestReflectApply("abc");
@@ -288,7 +288,7 @@ function TestCallWithSpread(x) {
%PrepareFunctionForOptimization(TestCallWithSpread);
assertEquals(TestCallWithSpread(-13), 169);
assertEquals(TestCallWithSpread(7), 49);
-%OptimizeFunctionOnNextCall(TestCallWithSpread);
+%OptimizeFunctionForTopTier(TestCallWithSpread);
assertEquals(TestCallWithSpread(-13), 169);
assertOptimized(TestCallWithSpread);
TestCallWithSpread("abc");
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
index ae7f92a33d..6c86d9327c 100644
--- a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
@@ -21,7 +21,7 @@
%PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), Promise);
assertInstanceof(foo(), Promise);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertInstanceof(foo(), Promise);
assertOptimized(foo);
@@ -57,7 +57,7 @@
%PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), Promise);
assertInstanceof(foo(), Promise);
- %OptimizeFunctionOnNextCall(foo);
+ %OptimizeFunctionForTopTier(foo);
assertInstanceof(foo(), Promise);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1125145.js b/deps/v8/test/mjsunit/compiler/regress-1125145.js
index 58ae8640d8..2c2d2cc7c9 100644
--- a/deps/v8/test/mjsunit/compiler/regress-1125145.js
+++ b/deps/v8/test/mjsunit/compiler/regress-1125145.js
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --stack-limit=100
function foo() {}
-for (let i = 0; i < 100000; ++i) {
+for (let i = 0; i < 10000; ++i) {
foo = foo.bind();
}
diff --git a/deps/v8/test/mjsunit/compiler/regress-905555-2.js b/deps/v8/test/mjsunit/compiler/regress-905555-2.js
index f7e2a728c2..b59b6edb30 100644
--- a/deps/v8/test/mjsunit/compiler/regress-905555-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-905555-2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --block-concurrent-recompilation --noalways-opt
+// Flags: --allow-natives-syntax --noalways-opt
global = 1;
@@ -13,12 +13,13 @@ function boom(value) {
%PrepareFunctionForOptimization(boom);
assertEquals(1, boom());
assertEquals(1, boom());
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(boom, "concurrent");
assertEquals(1, boom());
+%WaitForBackgroundOptimization();
delete this.global;
-
-%UnblockConcurrentRecompilation();
+%FinalizeOptimization();
// boom should be deoptimized because the global property cell has changed.
assertUnoptimized(boom, "sync");
diff --git a/deps/v8/test/mjsunit/compiler/regress-905555.js b/deps/v8/test/mjsunit/compiler/regress-905555.js
index 72ccf9aa1d..b52ad0245d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-905555.js
+++ b/deps/v8/test/mjsunit/compiler/regress-905555.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --block-concurrent-recompilation --noalways-opt
+// Flags: --allow-natives-syntax --noalways-opt
global = 1;
@@ -13,14 +13,15 @@ function boom(value) {
%PrepareFunctionForOptimization(boom);
assertEquals(1, boom());
assertEquals(1, boom());
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(boom, "concurrent");
assertEquals(1, boom());
+%WaitForBackgroundOptimization();
this.__defineGetter__("global", () => 42);
-
-%UnblockConcurrentRecompilation();
+%FinalizeOptimization();
// boom should be deoptimized because the global property cell has changed.
-assertUnoptimized(boom, "sync");
+assertUnoptimized(boom);
assertEquals(42, boom());
diff --git a/deps/v8/test/mjsunit/compiler/regress-9137-1.js b/deps/v8/test/mjsunit/compiler/regress-9137-1.js
index 5743847dfd..017ca277c2 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9137-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9137-1.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt
-// Flags: --no-flush-bytecode --no-stress-flush-bytecode
+// Flags: --no-flush-bytecode --no-stress-flush-code
function changeMap(obj) {
obj.blub = 42;
diff --git a/deps/v8/test/mjsunit/compiler/regress-9137-2.js b/deps/v8/test/mjsunit/compiler/regress-9137-2.js
index 3642b06df4..5100045687 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9137-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9137-2.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt
-// Flags: --no-flush-bytecode --no-stress-flush-bytecode
+// Flags: --no-flush-bytecode --no-stress-flush-code
function changeMap(obj) {
obj.blub = 42;
diff --git a/deps/v8/test/mjsunit/compiler/regress-9945-1.js b/deps/v8/test/mjsunit/compiler/regress-9945-1.js
index 46ef6aa42e..6421d9bcd7 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9945-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9945-1.js
@@ -45,7 +45,7 @@ assertOptimized(bar);
// Instead we trigger optimization of foo, which will inline bar (this time
// based on the new PACKED_ELEMENTS map.
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(a);
assertOptimized(foo);
%PrepareFunctionForOptimization(foo);
@@ -61,6 +61,6 @@ assertOptimized(bar);
// Now ensure there is no deopt-loop. There used to be a deopt-loop because, as
// a result of over-eager checkpoint elimination, we used to deopt into foo
// (right before the call to bar) rather than into bar (right before the load).
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(b);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-9945-2.js b/deps/v8/test/mjsunit/compiler/regress-9945-2.js
index 005553a3ee..67f4350d42 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9945-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9945-2.js
@@ -33,7 +33,7 @@ foo(a);
foo(a);
// Trigger optimization of bar, based on PACKED_SMI_ELEMENTS feedback.
-%OptimizeFunctionOnNextCall(bar);
+%OptimizeFunctionForTopTier(bar);
bar(a);
assertOptimized(bar);
%PrepareFunctionForOptimization(bar);
@@ -49,7 +49,7 @@ assertOptimized(bar);
// Instead we trigger optimization of foo, which will inline bar (this time
// based on the new PACKED_ELEMENTS map.
assertOptimized(bar);
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
assertOptimized(bar);
foo(a);
assertOptimized(bar);
@@ -66,6 +66,6 @@ assertOptimized(bar);
// Now ensure there is no deopt-loop. There used to be a deopt-loop because, as
// a result of over-eager checkpoint elimination, we used to deopt into foo
// (right before the call to bar) rather than into bar (right before the load).
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(b);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-11977.js b/deps/v8/test/mjsunit/compiler/regress-crbug-11977.js
new file mode 100644
index 0000000000..908ed26b89
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-11977.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(str) {
+ return str.startsWith();
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(f('undefined'), true);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(f('undefined'), true);
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1230260.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1230260.js
new file mode 100644
index 0000000000..0ba91a048f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1230260.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+
+function foo() {
+ String.prototype.startsWith.call(undefined, "");
+}
+%PrepareFunctionForOptimization(foo);
+assertThrows(foo);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo);
+
+function bar() {
+ "bla".startsWith("", Symbol(''));
+}
+%PrepareFunctionForOptimization(bar);
+assertThrows(bar);
+%OptimizeFunctionOnNextCall(bar);
+assertThrows(bar);
diff --git a/deps/v8/test/mjsunit/compiler/serializer-accessors.js b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
index 1281bed3df..2042b7f66a 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-accessors.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
@@ -37,6 +37,6 @@ function foo() {
foo();
foo();
-%OptimizeFunctionOnNextCall(foo);
expect_interpreted = false;
+%OptimizeFunctionForTopTier(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/serializer-apply.js b/deps/v8/test/mjsunit/compiler/serializer-apply.js
index 20154b09ba..8f438b751c 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-apply.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-apply.js
@@ -23,5 +23,5 @@ function bar() {
%PrepareFunctionForOptimization(apply);
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionOnNextCall(bar);
+%OptimizeFunctionForTopTier(bar);
assertFalse(bar());
diff --git a/deps/v8/test/mjsunit/compiler/serializer-call.js b/deps/v8/test/mjsunit/compiler/serializer-call.js
index d4299a6880..2c62d3e361 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-call.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-call.js
@@ -23,5 +23,5 @@ function bar() {
%PrepareFunctionForOptimization(call);
assertTrue(bar());
assertTrue(bar());
-%OptimizeFunctionOnNextCall(bar);
+%OptimizeFunctionForTopTier(bar);
assertFalse(bar());
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
index 3367a08e3e..e10520da2b 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
@@ -39,5 +39,5 @@ assertTrue(main(true, true));
assertTrue(main(true, true));
assertTrue(main(false, true));
assertTrue(main(false, true));
-%OptimizeFunctionOnNextCall(main);
+%OptimizeFunctionForTopTier(main);
assertFalse(main(false));
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
index 3f24649f04..b5f311ab4d 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
@@ -37,5 +37,5 @@ assertTrue(main(true, true));
assertTrue(main(true, true));
assertTrue(main(false, true));
assertTrue(main(false, true));
-%OptimizeFunctionOnNextCall(main);
+%OptimizeFunctionForTopTier(main);
assertFalse(main(false));
diff --git a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js
index 13e88639db..ab20f06b8a 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-1.js
@@ -23,5 +23,5 @@ bar({bla: 1});
bar({blu: 1});
bar({blo: 1});
foo(obj);
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(obj);
diff --git a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js
index 5622719f7d..f4669bff76 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-feedback-propagation-2.js
@@ -28,5 +28,5 @@ bar({bla: 1});
bar({blu: 1});
bar({blo: 1});
foo(obj);
-%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionForTopTier(foo);
foo(obj);
diff --git a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
index ff7a1c5a2b..b2e2ee9163 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
@@ -54,6 +54,6 @@ var g = new G;
foo();
foo();
-%OptimizeFunctionOnNextCall(foo);
expect_interpreted = false;
+%OptimizeFunctionForTopTier(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/string-startswith.js b/deps/v8/test/mjsunit/compiler/string-startswith.js
index c060a5e67b..97849c3125 100644
--- a/deps/v8/test/mjsunit/compiler/string-startswith.js
+++ b/deps/v8/test/mjsunit/compiler/string-startswith.js
@@ -79,3 +79,16 @@
%OptimizeFunctionOnNextCall(f);
assertEquals(false, f(1073741824));
})();
+
+(function() {
+ function f(str) {
+ return str.startsWith('');
+ }
+
+ %PrepareFunctionForOptimization(f);
+ f('foo');
+ f('');
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(f('foo'), true);
+ assertEquals(f(''), true);
+})();
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
index c2135bfaf7..981e8e6bb6 100644
--- a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
@@ -26,11 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --concurrent-recompilation
// Flags: --nostress-opt --no-always-opt
// Flags: --no-turboprop
-// Flags: --no-concurrent-inlining
-// Flags: --no-turbo-concurrent-get-property-access-info
// --nostress-opt is in place because this particular optimization
// (guaranteeing that the Array prototype chain has no elements) is
@@ -54,20 +52,20 @@ assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
// Optimized code of f1 depends on initial object and array maps.
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(f1, "concurrent");
// Kick off recompilation. Note that the NoElements protector is read by the
// compiler in the main-thread phase of compilation, i.e., before the store to
// Object.prototype below.
assertEquals(0.5, f1(arr, 0));
// Invalidate current initial object map after compile graph has been created.
+%WaitForBackgroundOptimization();
Object.prototype[1] = 1.5;
assertEquals(2, f1(arr, 1));
-// Not yet optimized since concurrent recompilation is blocked.
assertUnoptimized(f1, "no sync");
-// Let concurrent recompilation proceed.
-%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization, which bails out
// due to map dependency.
+%FinalizeOptimization();
assertUnoptimized(f1, "sync");
// Clear type info for stress runs.
%ClearFunctionFeedback(f1);
diff --git a/deps/v8/test/mjsunit/const-dict-tracking.js b/deps/v8/test/mjsunit/const-dict-tracking.js
index c737cbda2a..c5f7ee3af3 100644
--- a/deps/v8/test/mjsunit/const-dict-tracking.js
+++ b/deps/v8/test/mjsunit/const-dict-tracking.js
@@ -3,9 +3,7 @@
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --opt --no-always-opt
-// Flags: --no-stress-flush-bytecode
-// Flags: --block-concurrent-recompilation
-// Flags: --no-turbo-concurrent-get-property-access-info
+// Flags: --no-stress-flush-code --concurrent-recompilation
//
// Tests tracking of constness of properties stored in dictionary
// mode prototypes.
@@ -714,14 +712,17 @@ function testbench(o, proto, update_proto, check_constness) {
%PrepareFunctionForOptimization(read_length);
assertEquals(1, read_length(o));
+ %DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(read_length, "concurrent");
assertEquals(1, read_length(o));
assertUnoptimized(read_length, "no sync");
+ %WaitForBackgroundOptimization();
var other_proto1 = [];
Object.setPrototypeOf(proto2, other_proto1);
- %UnblockConcurrentRecompilation();
- assertUnoptimized(read_length, "sync");
+ %FinalizeOptimization();
+
+ assertUnoptimized(read_length);
assertEquals(0, read_length(o));
if (%IsDictPropertyConstTrackingEnabled()) {
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index c855f792af..6587a9690f 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax --nostress-opt --opt
-// Flags: --no-stress-flush-bytecode --no-lazy-feedback-allocation
+// Flags: --no-stress-flush-code --no-lazy-feedback-allocation
function test(f, iterations) {
%PrepareFunctionForOptimization(f);
diff --git a/deps/v8/test/mjsunit/deopt-unlinked.js b/deps/v8/test/mjsunit/deopt-unlinked.js
index 422631450b..1bf27477ea 100644
--- a/deps/v8/test/mjsunit/deopt-unlinked.js
+++ b/deps/v8/test/mjsunit/deopt-unlinked.js
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
// The deopt count is stored in the feedback vector which gets cleared when
// bytecode is flushed, which --gc-interval can cause in stress modes.
-// Flags: --noflush-bytecode --nostress-flush-bytecode
+// Flags: --no-flush-bytecode --no-stress-flush-code
function foo() {};
%PrepareFunctionForOptimization(foo);
diff --git a/deps/v8/test/mjsunit/es6/classes-constructor.js b/deps/v8/test/mjsunit/es6/classes-constructor.js
index faf9404f07..8425725055 100644
--- a/deps/v8/test/mjsunit/es6/classes-constructor.js
+++ b/deps/v8/test/mjsunit/es6/classes-constructor.js
@@ -25,11 +25,12 @@
throw Error('Should not happen!');
}
- // ES6 9.2.1[[Call]] throws a TypeError in the caller context/Realm when the
- // called function is a classConstructor
+ // https://tc39.es/ecma262/#sec-ecmascript-function-objects-call-thisargument-argumentslist
+ // 10.2.1 [[Call]] throws a TypeError created in callee context with F's
+ // associated Realm Record when the called function is a classConstructor
assertThrows(function() { Realm.eval(realmIndex, "A()") }, otherTypeError);
- assertThrows(function() { instance.constructor() }, TypeError);
- assertThrows(function() { A() }, TypeError);
+ assertThrows(function() { instance.constructor() }, otherTypeError);
+ assertThrows(function() { A() }, otherTypeError);
// ES6 9.3.1 call() first activates the callee context before invoking the
// method. The TypeError from the constructor is thus thrown in the other
diff --git a/deps/v8/test/mjsunit/es9/object-rest-basic.js b/deps/v8/test/mjsunit/es9/object-rest-basic.js
index caaee6f40c..b24334540b 100644
--- a/deps/v8/test/mjsunit/es9/object-rest-basic.js
+++ b/deps/v8/test/mjsunit/es9/object-rest-basic.js
@@ -89,7 +89,7 @@ var p = new Proxy({}, {
});
assertThrows(() => { var { ...y } = p });
-var z = { b: 1}
+var z = { b: 1};
var p = new Proxy(z, {
ownKeys() { return Object.keys(z); },
get(_, prop) { return z[prop]; },
@@ -97,9 +97,20 @@ var p = new Proxy(z, {
return Object.getOwnPropertyDescriptor(z, prop);
},
});
-var { ...y } = p ;
+var { ...y } = p;
assertEquals(z, y);
+var z = { 1: 1, 2: 2, 3: 3 };
+var p = new Proxy(z, {
+ ownKeys() { return ['1', '2']; },
+ getOwnPropertyDescriptor(_, prop) {
+ return Object.getOwnPropertyDescriptor(z, prop);
+ },
+});
+var { 1: x, ...y } = p;
+assertEquals(1, x);
+assertEquals({ 2: 2 }, y);
+
var z = { b: 1}
var { ...y } = { ...z} ;
assertEquals(z, y);
diff --git a/deps/v8/test/mjsunit/harmony/array-findlast-unscopables.js b/deps/v8/test/mjsunit/harmony/array-findlast-unscopables.js
new file mode 100644
index 0000000000..ac9ae268fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-findlast-unscopables.js
@@ -0,0 +1,15 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-find-last
+
+var findLast = 'local findLast';
+var findLastIndex = 'local findLastIndex';
+
+var array = [];
+
+with (array) {
+ assertEquals('local findLast', findLast);
+ assertEquals('local findLastIndex', findLastIndex);
+}
diff --git a/deps/v8/test/mjsunit/harmony/array-prototype-findlast.js b/deps/v8/test/mjsunit/harmony/array-prototype-findlast.js
new file mode 100644
index 0000000000..012dca1b69
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-prototype-findlast.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-find-last
+
+(function () {
+ var array = [,];
+
+ function findLast() {
+ return array.findLast(v => v > 0);
+ }
+
+ assertEquals(findLast(), undefined);
+
+ array.__proto__.push(6);
+ assertEquals(findLast(), 6);
+
+ array = [6, -1, 5];
+ assertEquals(findLast(), 5);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/array-prototype-findlastindex.js b/deps/v8/test/mjsunit/harmony/array-prototype-findlastindex.js
new file mode 100644
index 0000000000..dda48f5c5a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-prototype-findlastindex.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-find-last
+
+(function () {
+ var array = [,];
+
+ function findLastIndex() {
+ return array.findLastIndex(v => v > 0);
+ }
+
+ assertEquals(findLastIndex(), -1);
+
+ array.__proto__.push(6);
+ assertEquals(findLastIndex(), 0);
+
+ array = [6, -1, 5];
+ assertEquals(findLastIndex(), 2);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/bigint/div-special-cases.js b/deps/v8/test/mjsunit/harmony/bigint/div-special-cases.js
new file mode 100644
index 0000000000..f29cf2f9af
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/div-special-cases.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Regression test for crbug.com/1233397.
+let y_power = 13311n * 64n; // Large enough to choose the Barrett algorithm.
+// A couple of digits and a couple of intra-digit bits larger.
+let x_power = y_power + 50n * 64n + 30n;
+let x = 2n ** x_power;
+let y = 2n ** y_power;
+let q = x / y;
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regress-minuszero.js b/deps/v8/test/mjsunit/harmony/bigint/regress-minuszero.js
new file mode 100644
index 0000000000..164e08d616
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/regress-minuszero.js
@@ -0,0 +1,25 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertTrue(BigInt("-0 ") == -0);
+assertTrue("-0 " == 0n);
+assertTrue(BigInt("-0") == -0);
+assertTrue(-0n == -0);
+assertTrue(-0n == 0n);
+
+assertTrue(BigInt("-0 ") > -1);
+assertTrue("-0 " > -1n);
+assertTrue(BigInt("-0") > -1);
+assertTrue(-0n > -1);
+
+assertEquals(BigInt("-0 ") & 1n, 0n);
+assertEquals(BigInt("-0") & 1n, 0n);
+assertEquals(-0n & 1n, 0n);
+
+var zero = BigInt("-0 ");
+assertEquals(1n, ++zero);
+zero = BigInt("-0");
+assertEquals(1n, ++zero);
+zero = -0n;
+assertEquals(1n, ++zero);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regress-tostring-2.js b/deps/v8/test/mjsunit/harmony/bigint/regress-tostring-2.js
new file mode 100644
index 0000000000..7b8d63ec7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/regress-tostring-2.js
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Specific regression test for crbug.com/1236694.
+let long = '1000000000000000000000000000000000000000000000'.repeat(20) + '0';
+let short = '100000000000000000000000000000000000000000000'.repeat(20) + '0';
+BigInt(long).toLocaleString();
+BigInt(short).toLocaleString();
+
+// Generalized to test a range of similar inputs. Considerations to keep
+// execution times reasonable while testing interesting cases:
+// - The number of zeros should grow large enough to potentially fill two
+// entire digits (i.e. >= 38), which makes the recursion take the early
+// termination path, which is worthy of test coverage.
+// - The number of repeats should grow large enough to shift any bug-triggering
+// bit pattern to any position in a digit, i.e. >= 64.
+// - Fewer repeats may be easier to debug in case of failure, but likely don't
+// provide additional test coverage, so we test very few distinct values.
+// - To test the fast algorithm, (zeros+1)*repeats must be >= 810 or so.
+function test(zeros, repeats) {
+ let chunk = '1' + '0'.repeat(zeros);
+ let input = chunk.repeat(repeats);
+ assertEquals(input, BigInt(input).toString(),
+ `bug for ${zeros} zeros repeated ${repeats} times`);
+}
+for (let zeros = 1; zeros < 50; zeros++) {
+ for (let repeats = 64; repeats > 0; repeats -= 20) {
+ test(zeros, repeats);
+ }
+}
+test(96, 11); // Found to hit the extra-early recursion termination path.
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regress-tostring.js b/deps/v8/test/mjsunit/harmony/bigint/regress-tostring.js
new file mode 100644
index 0000000000..b985f5ba80
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/regress-tostring.js
@@ -0,0 +1,8 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Regression test for crbug.com/1232733
+let big = 10n ** 900n;
+let expected = "1" + "0".repeat(900);
+assertEquals(expected, big.toString());
diff --git a/deps/v8/test/mjsunit/harmony/error-cause.js b/deps/v8/test/mjsunit/harmony/error-cause.js
index eeb1976803..4595b85814 100644
--- a/deps/v8/test/mjsunit/harmony/error-cause.js
+++ b/deps/v8/test/mjsunit/harmony/error-cause.js
@@ -19,6 +19,8 @@
(function () {
const err = Error('message');
assertEquals(undefined, err.cause);
+ assertFalse('cause' in err);
+ assertFalse('cause' in Error.prototype);
})();
// Chained errors
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-indirection.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-indirection.mjs
new file mode 100644
index 0000000000..eda9c56a42
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-indirection.mjs
@@ -0,0 +1,6 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "./modules-skip-top-level-await-cycle-error.mjs";
+await Promise.resolve();
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-throwing.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-throwing.mjs
new file mode 100644
index 0000000000..5c1c408874
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error-throwing.mjs
@@ -0,0 +1,5 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+throw new Error("Error in modules-skip-top-level-await-cycle-error-throwing.mjs");
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error.mjs
new file mode 100644
index 0000000000..172b5d81b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-top-level-await-cycle-error.mjs
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "./modules-skip-top-level-await-cycle-error-indirection.mjs";
+import "./modules-skip-top-level-await-cycle-error-throwing.mjs";
+export function error() {}
diff --git a/deps/v8/test/mjsunit/harmony/modules-top-level-await-cycle-error.mjs b/deps/v8/test/mjsunit/harmony/modules-top-level-await-cycle-error.mjs
new file mode 100644
index 0000000000..97dc11b6d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-top-level-await-cycle-error.mjs
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags:--harmony-top-level-await --ignore-unhandled-promises
+
+try {
+ await import('./modules-skip-top-level-await-cycle-error.mjs');
+ assertUnreachable();
+} catch(e) {
+ assertEquals(e.message, 'Error in modules-skip-top-level-await-cycle-error-throwing.mjs');
+}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js b/deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js
deleted file mode 100644
index 06cf89ad06..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-regexp-match-indices
-
-// Redefined hasIndices should not reflect in flags without
-// --harmony-regexp-match-indices
-{
- let re = /./;
- Object.defineProperty(re, "hasIndices", { get: function() { return true; } });
- assertEquals("", re.flags);
-}
diff --git a/deps/v8/test/mjsunit/harmony/typedarray-findlast.js b/deps/v8/test/mjsunit/harmony/typedarray-findlast.js
new file mode 100644
index 0000000000..4275d53c4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/typedarray-findlast.js
@@ -0,0 +1,226 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-array-find-last
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array];
+
+for (var constructor of typedArrayConstructors) {
+
+assertEquals(1, constructor.prototype.findLast.length);
+
+var a = new constructor([21, 22, 23, 24]);
+assertEquals(undefined, a.findLast(function() { return false; }));
+assertEquals(24, a.findLast(function() { return true; }));
+assertEquals(undefined, a.findLast(function(val) { return 121 === val; }));
+assertEquals(24, a.findLast(function(val) { return 24 === val; }));
+assertEquals(23, a.findLast(function(val) { return 23 === val; }), null);
+assertEquals(22, a.findLast(function(val) { return 22 === val; }), undefined);
+
+
+//
+// Test predicate is not called when array is empty
+//
+(function() {
+ var a = new constructor([]);
+ var l = -1;
+ var o = -1;
+ var v = -1;
+ var k = -1;
+
+ a.findLast(function(val, key, obj) {
+ o = obj;
+ l = obj.length;
+ v = val;
+ k = key;
+
+ return false;
+ });
+
+ assertEquals(-1, l);
+ assertEquals(-1, o);
+ assertEquals(-1, v);
+ assertEquals(-1, k);
+})();
+
+
+//
+// Test predicate is called with correct arguments
+//
+(function() {
+ var a = new constructor([5]);
+ var l = -1;
+ var o = -1;
+ var v = -1;
+ var k = -1;
+
+ var found = a.findLast(function(val, key, obj) {
+ o = obj;
+ l = obj.length;
+ v = val;
+ k = key;
+
+ return false;
+ });
+
+ assertArrayEquals(a, o);
+ assertEquals(a.length, l);
+ assertEquals(5, v);
+ assertEquals(0, k);
+ assertEquals(undefined, found);
+})();
+
+
+//
+// Test predicate is called array.length times
+//
+(function() {
+ var a = new constructor([1, 2, 3, 4, 5]);
+ var l = 0;
+ var found = a.findLast(function() {
+ l++;
+ return false;
+ });
+
+ assertEquals(a.length, l);
+ assertEquals(undefined, found);
+})();
+
+
+//
+// Test array modifications
+//
+(function() {
+ a = new constructor([1, 2, 3]);
+ found = a.findLast(function(val, key) { a[key] = ++val; return false; });
+ assertArrayEquals([2, 3, 4], a);
+ assertEquals(3, a.length);
+ assertEquals(undefined, found);
+})();
+
+//
+// Test thisArg
+//
+(function() {
+ // Test String as a thisArg
+ var found = new constructor([1, 2, 3]).findLast(function(val, key) {
+ return this.charAt(Number(key)) === String(val);
+ }, "321");
+ assertEquals(2, found);
+
+ // Test object as a thisArg
+ var thisArg = {
+ elementAt: function(key) {
+ return this[key];
+ }
+ };
+ Array.prototype.push.apply(thisArg, [3, 2, 1]);
+
+ found = new constructor([1, 2, 3]).findLast(function(val, key) {
+ return this.elementAt(key) === val;
+ }, thisArg);
+ assertEquals(2, found);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ new constructor([1, 2]).findLast(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ new constructor([1, 2]).findLast(function() { a.push(this) }, {});
+ assertEquals(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ new constructor([1, 2]).findLast(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
+
+// Test exceptions
+assertThrows('constructor.prototype.findLast.call(null, function() { })',
+ TypeError);
+assertThrows('constructor.prototype.findLast.call(undefined, function() { })',
+ TypeError);
+assertThrows('constructor.prototype.findLast.apply(null, function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLast.apply(undefined, function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLast.apply([], function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLast.apply({}, function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLast.apply("", function() { }, [])',
+ TypeError);
+
+assertThrows('new constructor([]).findLast(null)', TypeError);
+assertThrows('new constructor([]).findLast(undefined)', TypeError);
+assertThrows('new constructor([]).findLast(0)', TypeError);
+assertThrows('new constructor([]).findLast(true)', TypeError);
+assertThrows('new constructor([]).findLast(false)', TypeError);
+assertThrows('new constructor([]).findLast("")', TypeError);
+assertThrows('new constructor([]).findLast({})', TypeError);
+assertThrows('new constructor([]).findLast([])', TypeError);
+assertThrows('new constructor([]).findLast(/\d+/)', TypeError);
+
+// Shadowing length doesn't affect findLast, unlike Array.prototype.findLast
+a = new constructor([1, 2]);
+Object.defineProperty(a, 'length', {value: 1});
+var x = 0;
+assertEquals(a.findLast(function(elt) { x += elt; return false; }), undefined);
+assertEquals(x, 3);
+assertEquals(Array.prototype.findLast.call(a,
+ function(elt) { x += elt; return false; }), undefined);
+assertEquals(x, 4);
+
+// Detached Operation
+var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is detached.");
+ return 0;
+ }
+};
+
+var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+%ArrayBufferDetach(array.buffer);
+
+assertThrows(() => array.findLast(tmp), TypeError);
+
+//
+// Test detaching in predicate.
+//
+(function() {
+
+var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+var values = [];
+assertEquals(array.findLast((value) => {
+ values.push(value);
+ if (value === 5) {
+ %ArrayBufferDetach(array.buffer);
+ }
+}), undefined);
+assertEquals(values, [10, 9, 8, 7, 6, 5, undefined, undefined, undefined, undefined]);
+
+var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+assertEquals(array.findLast((value, idx) => {
+ if (value !== undefined) {
+ %ArrayBufferDetach(array.buffer);
+ }
+ return idx === 0;
+}), undefined);
+})();
+}
diff --git a/deps/v8/test/mjsunit/harmony/typedarray-findlastindex.js b/deps/v8/test/mjsunit/harmony/typedarray-findlastindex.js
new file mode 100644
index 0000000000..9a82a6dd50
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/typedarray-findlastindex.js
@@ -0,0 +1,224 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-array-find-last
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array];
+
+for (var constructor of typedArrayConstructors) {
+
+assertEquals(1, constructor.prototype.findLastIndex.length);
+
+var a = new constructor([21, 22, 23, 24]);
+assertEquals(-1, a.findLastIndex(function() { return false; }));
+assertEquals(-1, a.findLastIndex(function(val) { return 121 === val; }));
+assertEquals(3, a.findLastIndex(function() { return true; }));
+assertEquals(1, a.findLastIndex(function(val) { return 22 === val; }), undefined);
+assertEquals(2, a.findLastIndex(function(val) { return 23 === val; }), null);
+assertEquals(3, a.findLastIndex(function(val) { return 24 === val; }));
+
+
+//
+// Test predicate is not called when array is empty
+//
+(function() {
+ var a = new constructor([]);
+ var l = -1;
+ var o = -1;
+ var v = -1;
+ var k = -1;
+
+ a.findLastIndex(function(val, key, obj) {
+ o = obj;
+ l = obj.length;
+ v = val;
+ k = key;
+
+ return false;
+ });
+
+ assertEquals(-1, l);
+ assertEquals(-1, o);
+ assertEquals(-1, v);
+ assertEquals(-1, k);
+})();
+
+
+//
+// Test predicate is called with correct arguments
+//
+(function() {
+ var a = new constructor([5]);
+ var l = -1;
+ var o = -1;
+ var v = -1;
+ var k = -1;
+
+ var index = a.findLastIndex(function(val, key, obj) {
+ o = obj;
+ l = obj.length;
+ v = val;
+ k = key;
+
+ return false;
+ });
+
+ assertArrayEquals(a, o);
+ assertEquals(a.length, l);
+ assertEquals(5, v);
+ assertEquals(0, k);
+ assertEquals(-1, index);
+})();
+
+
+//
+// Test predicate is called array.length times
+//
+(function() {
+ var a = new constructor([1, 2, 3, 4, 5]);
+ var l = 0;
+
+ a.findLastIndex(function() {
+ l++;
+ return false;
+ });
+
+ assertEquals(a.length, l);
+})();
+
+
+//
+// Test array modifications
+//
+(function() {
+ a = new constructor([1, 2, 3]);
+ a.findLastIndex(function(val, key) { a[key] = ++val; return false; });
+ assertArrayEquals([2, 3, 4], a);
+ assertEquals(3, a.length);
+})();
+
+
+//
+// Test thisArg
+//
+(function() {
+ // Test String as a thisArg
+ var index = new constructor([1, 2, 3]).findLastIndex(function(val, key) {
+ return this.charAt(Number(key)) === String(val);
+ }, "321");
+ assertEquals(1, index);
+
+ // Test object as a thisArg
+ var thisArg = {
+ elementAt: function(key) {
+ return this[key];
+ }
+ };
+ Array.prototype.push.apply(thisArg, [3, 2, 1]);
+
+ index = new constructor([1, 2, 3]).findLastIndex(function(val, key) {
+ return this.elementAt(key) === val;
+ }, thisArg);
+ assertEquals(1, index);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ new constructor([1, 2]).findLastIndex(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ new constructor([1, 2]).findLastIndex(function() { a.push(this) }, {});
+ assertEquals(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ new constructor([1, 2]).findLastIndex(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
+
+// Test exceptions
+assertThrows('constructor.prototype.findLastIndex.call(null, function() { })',
+ TypeError);
+assertThrows('constructor.prototype.findLastIndex.call(undefined, function() { })',
+ TypeError);
+assertThrows('constructor.prototype.findLastIndex.apply(null, function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLastIndex.apply(undefined, function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLastIndex.apply([], function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLastIndex.apply({}, function() { }, [])',
+ TypeError);
+assertThrows('constructor.prototype.findLastIndex.apply("", function() { }, [])',
+ TypeError);
+
+assertThrows('new constructor([]).findLastIndex(null)', TypeError);
+assertThrows('new constructor([]).findLastIndex(undefined)', TypeError);
+assertThrows('new constructor([]).findLastIndex(0)', TypeError);
+assertThrows('new constructor([]).findLastIndex(true)', TypeError);
+assertThrows('new constructor([]).findLastIndex(false)', TypeError);
+assertThrows('new constructor([]).findLastIndex("")', TypeError);
+assertThrows('new constructor([]).findLastIndex({})', TypeError);
+assertThrows('new constructor([]).findLastIndex([])', TypeError);
+assertThrows('new constructor([]).findLastIndex(/\d+/)', TypeError);
+
+// Shadowing length doesn't affect findLastIndex, unlike Array.prototype.findLastIndex
+a = new constructor([1, 2]);
+Object.defineProperty(a, 'length', {value: 1});
+var x = 0;
+assertEquals(a.findLastIndex(function(elt) { x += elt; return false; }), -1);
+assertEquals(x, 3);
+assertEquals(Array.prototype.findLastIndex.call(a,
+ function(elt) { x += elt; return false; }), -1);
+assertEquals(x, 4);
+
+// Detached Operation
+var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is detached.");
+ return 0;
+ }
+};
+var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+%ArrayBufferDetach(array.buffer);
+assertThrows(() => array.findLastIndex(tmp), TypeError);
+
+//
+// Test detaching in predicate.
+//
+(function() {
+
+var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+var values = [];
+assertEquals(array.findLastIndex((value, idx) => {
+ values.push(value);
+ if (value === 5) {
+ %ArrayBufferDetach(array.buffer);
+ }
+}), -1);
+assertEquals(values, [10, 9, 8, 7, 6, 5, undefined, undefined, undefined, undefined]);
+
+var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+assertEquals(array.findLastIndex((value, idx) => {
+ if (value !== undefined) {
+ %ArrayBufferDetach(array.buffer);
+ }
+ return idx === 0;
+}), 0);
+})();
+}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
index 7db4d44a6a..3262442b2f 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --expose-gc --noincremental-marking
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
let cleanup0_call_count = 0;
let cleanup0_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js b/deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js
new file mode 100644
index 0000000000..ecc2a239f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// https://bugs.chromium.org/p/v8/issues/detail?id=10816
+// When V8 sees a deprecated map, update the IC with its target.
+
+function A() { this.x = 1 }
+function B() { this.x = 1 }
+function load(o) { return o.x }
+%PrepareFunctionForOptimization(load);
+
+// Initialize the load IC with a map that will not be deprecated.
+load(new A());
+
+const oldB = new B();
+(new B()).x = 1.5; // deprecates map
+
+// Should add the target of the deprecated map to the load IC.
+load(oldB);
+
+%OptimizeFunctionOnNextCall(load);
+load(oldB);
+assertOptimized(load);
diff --git a/deps/v8/test/mjsunit/ic-migrated-map-add-when-uninitialized.js b/deps/v8/test/mjsunit/ic-migrated-map-add-when-uninitialized.js
new file mode 100644
index 0000000000..d94bd2a2f2
--- /dev/null
+++ b/deps/v8/test/mjsunit/ic-migrated-map-add-when-uninitialized.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// https://bugs.chromium.org/p/v8/issues/detail?id=10816
+// When V8 sees a deprecated map, update the IC with its target.
+
+function A() { this.x = 1 }
+function B() { this.x = 1 }
+function load(o) { return o.x }
+%PrepareFunctionForOptimization(load);
+
+const oldB = new B();
+(new B()).x = 1.5; // deprecates map
+
+// Should add the target of the deprecated map to the load IC.
+load(oldB);
+
+%OptimizeFunctionOnNextCall(load);
+load(oldB);
+assertOptimized(load);
diff --git a/deps/v8/test/mjsunit/ic-migrated-map-update-when-deprecated.js b/deps/v8/test/mjsunit/ic-migrated-map-update-when-deprecated.js
new file mode 100644
index 0000000000..3a040c1596
--- /dev/null
+++ b/deps/v8/test/mjsunit/ic-migrated-map-update-when-deprecated.js
@@ -0,0 +1,27 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// https://bugs.chromium.org/p/v8/issues/detail?id=10816
+// When V8 sees a deprecated map, update the IC with its target.
+
+function A() { this.x = 1 }
+function B() { this.x = 1 }
+function load(o) { return o.x }
+%PrepareFunctionForOptimization(load);
+
+// Initialize the load IC with the map of A before we deprecate it.
+load(new A());
+
+const oldB = new B();
+(new A()).x = 1.5; // deprecates map already in FV
+(new B()).x = 1.5; // deprecates map for oldB
+
+// Should update the load IC with the target of the deprecated map.
+load(oldB);
+
+%OptimizeFunctionOnNextCall(load);
+load(oldB);
+assertOptimized(load);
diff --git a/deps/v8/test/mjsunit/interrupt-budget-override.js b/deps/v8/test/mjsunit/interrupt-budget-override.js
index 5f83b3ccc5..37d6a13a95 100644
--- a/deps/v8/test/mjsunit/interrupt-budget-override.js
+++ b/deps/v8/test/mjsunit/interrupt-budget-override.js
@@ -16,4 +16,5 @@ function f() {
f();
f();
f();
+%FinalizeOptimization();
assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/json-parser-recursive.js b/deps/v8/test/mjsunit/json-parser-recursive.js
index 0f086e39c3..ad3214bdf6 100644
--- a/deps/v8/test/mjsunit/json-parser-recursive.js
+++ b/deps/v8/test/mjsunit/json-parser-recursive.js
@@ -24,9 +24,11 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --stack-size=100
var str = "[1]";
-for (var i = 0; i < 100000; i++) {
+for (var i = 0; i < 10000; i++) {
str = "[1," + str + "]";
}
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 841b0b80bb..4e4e5ec61b 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -34,11 +34,18 @@
'regress/modules-skip*': [SKIP],
'wasm/exceptions-utils': [SKIP],
'wasm/wasm-module-builder': [SKIP],
+ 'compiler/fast-api-helpers': [SKIP],
+ 'typedarray-helpers': [SKIP],
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
##############################################################################
+ # Temporal tests to be implemented
+ # https://crbug.com/v8/11544
+ 'temporal/*': [FAIL],
+
+ ##############################################################################
# Open bugs.
# BUG(v8:2989).
@@ -73,6 +80,9 @@
# https://crbug.com/1129854
'tools/log': ['arch == arm or arch == arm64', SKIP],
+ # https://crbug.com/v8/10948
+ 'wasm/atomics': [PASS, ['arch == arm and not simulator_run', SKIP]],
+
##############################################################################
# Tests where variants make no sense.
'd8/enable-tracing': [PASS, NO_VARIANTS],
@@ -406,8 +416,6 @@
'es6/unicode-regexp-ignore-case': [FAIL],
'regress/regress-5036': [FAIL],
'es7/regexp-ui-word': [FAIL],
- 'regexp-modifiers-i18n': [FAIL],
- 'regexp-modifiers-autogenerated-i18n': [FAIL],
# Desugaring regexp property class relies on ICU. Anything goes as long as we
# don't crash.
@@ -822,6 +830,7 @@
'regress/regress-490': [SKIP],
'regress/regress-create-exception': [SKIP],
'regress/regress-3247124': [SKIP],
+ 'compiler/regress-1226988': [SKIP],
# Requires bigger stack size in the Genesis and if stack size is increased,
# the test requires too much time to run. However, the problem test covers
@@ -1037,7 +1046,7 @@
'compiler/serializer-transition-propagation': [SKIP],
'opt-elements-kind': [SKIP],
'regress/regress-trap-allocation-memento': [SKIP],
- 'regress/regrtoess-v8-9267-*': [SKIP],
+ 'regress/regress-v8-9267-*': [SKIP],
'shared-function-tier-up-turbo': [SKIP],
# BUG(v8:11656) Skipped until we make progress on NumFuzz.
@@ -1093,6 +1102,7 @@
'wasm/futex': [SKIP],
'regress/regress-1205290': [SKIP],
'regress/regress-1212404': [SKIP],
+ 'regress/regress-1221035': [SKIP],
'regress/wasm/regress-1067621': [SKIP],
# BUG(v8:9975).
@@ -1261,6 +1271,7 @@
'wasm/tier-up-testing-flag': [SKIP],
'wasm/tier-down-to-liftoff': [SKIP],
'wasm/wasm-dynamic-tiering': [SKIP],
+ 'wasm/test-partial-serialization': [SKIP],
}], # arch not in (x64, ia32, arm64, arm)
##############################################################################
@@ -1271,68 +1282,6 @@
}],
##############################################################################
-['variant == turboprop or variant == turboprop_as_toptier', {
- # Deopts differently than TurboFan.
- 'compiler/native-context-specialization-hole-check': [SKIP],
- 'compiler/number-comparison-truncations': [SKIP],
- 'compiler/redundancy-elimination': [SKIP],
- 'compiler/regress-9945-*': [SKIP],
- 'es6/super-ic-opt-no-turboprop': [SKIP],
-
- # Static asserts for optimizations don't hold due to removed optimization
- # phases.
- 'compiler/concurrent-inlining-1': [SKIP],
- 'compiler/concurrent-inlining-2': [SKIP],
- 'compiler/constant-fold-add-static': [SKIP],
- 'compiler/diamond-followedby-branch': [SKIP],
- 'compiler/is-being-interpreted-*': [SKIP],
- 'compiler/load-elimination-const-field': [SKIP],
- 'compiler/serializer-accessors': [SKIP],
- 'compiler/serializer-apply': [SKIP],
- 'compiler/serializer-call': [SKIP],
- 'compiler/serializer-dead-after-jump': [SKIP],
- 'compiler/serializer-dead-after-return': [SKIP],
- 'compiler/serializer-feedback-propagation-*': [SKIP],
- 'compiler/serializer-transition-propagation': [SKIP],
-
- # Some tests rely on inlining.
- 'compiler/call-with-arraylike-or-spread*': [SKIP],
- 'compiler/inlined-call-polymorphic': [SKIP],
- 'compiler/opt-higher-order-functions': [SKIP],
- 'regress/regress-1049982-1': [SKIP],
- 'regress/regress-1049982-2': [SKIP],
- 'es6/iterator-eager-deopt': [SKIP],
-
- # In turboprop we reuse the optimized code on soft deopt. The following tests
- # test for a soft deopt and they won't work in TurboProp.
- 'deopt-recursive-soft-once': [SKIP],
- 'regress/regress-3709': [SKIP],
- 'regress/regress-5790': [SKIP],
-
- # const field tracking is disabled in turboprop
- 'const-field-tracking': [SKIP],
-
- # Dynamic map checks doesn't use information from maps so we don't inline
- # array builtins or track field representation.
- 'compiler/deopt-inlined-from-call': [SKIP],
- 'compiler/field-representation-tracking': [SKIP],
- 'field-type-tracking': [SKIP],
-
- # Tests failing for the lack of function context specialization in Turboprop.
- 'compiler/abstract-equal-receiver': [FAIL],
- 'compiler/constant-fold-cow-array': [FAIL],
- 'compiler/promise-resolve-stable-maps': [FAIL],
-
- # Tests failing due to reduced constant propagation in Turboprop.
- 'compiler/js-create-arguments': [FAIL],
- 'compiler/catch-block-load': [FAIL],
- 'compiler/construct-bound-function': [FAIL],
- 'compiler/construct-object': [FAIL],
- 'compiler/construct-receiver': [FAIL],
- 'compiler/js-create': [FAIL],
-}], # variant == turboprop or variant = turboprop_as_toptier
-
-##############################################################################
['variant == top_level_await', {
# specifically expects to fail on top level await.
'harmony/modules-import-15': [SKIP],
@@ -1483,6 +1432,7 @@
'regress/wasm/regress-1187831': [SKIP],
'regress/wasm/regress-1199662': [SKIP],
'regress/wasm/regress-1231950': [SKIP],
+ 'regress/wasm/regress-1242689': [SKIP],
}], # no_simd_hardware == True
##############################################################################
@@ -1528,6 +1478,13 @@
'compiler/native-context-specialization-hole-check': [SKIP],
'compiler/test-literal-map-migration': [SKIP],
'compiler/deopt-pretenure': [SKIP],
+ 'compiler/fast-api-sequences-x64': [SKIP],
+
+ # TODO(v8:12031): Reimplement elements kinds transitions when concurrent
+ # inlining.
+ 'default-nospec': [SKIP],
+ 'es6/collections-constructor-*': [SKIP],
+ 'es6/map-constructor-entry-side-effect*': [SKIP],
}], # single_generation
################################################################################
@@ -1666,4 +1623,10 @@
'wasm/shared-memory-gc-stress': [SKIP],
}], # third_party_heap
+##############################################################################
+['arch != x64', {
+ # Tests that include types only supported on x64.
+ 'compiler/fast-api-sequences-x64': [SKIP],
+}], # arch != x64
+
]
diff --git a/deps/v8/test/mjsunit/never-optimize.js b/deps/v8/test/mjsunit/never-optimize.js
index 95c8c8650a..bf53dd6cc2 100644
--- a/deps/v8/test/mjsunit/never-optimize.js
+++ b/deps/v8/test/mjsunit/never-optimize.js
@@ -43,5 +43,6 @@ for (let i = 0; i < 1000; ++i) {
u1();
u2();
}
+%FinalizeOptimization();
assertUnoptimized(u1);
assertOptimized(u2);
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js b/deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js
deleted file mode 100644
index aace7113a2..0000000000
--- a/deps/v8/test/mjsunit/regexp-modifiers-autogenerated-i18n.js
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --regexp-mode-modifiers
-
-// These regexps are just grepped out of the other tests we already have
-// and the syntax changed from out-of-line i flag to inline i flag.
-
-// These tests won't all run on the noi18n build of V8.
-
-assertTrue(/(?i)\u00e5/u.test("\u00c5"));
-assertTrue(/(?i)\u00e5/u.test("\u00e5"));
-assertTrue(/(?i)\u00c5/u.test("\u00e5"));
-assertTrue(/(?i)\u00c5/u.test("\u00c5"));
-assertTrue(/(?i)\u212b/u.test("\u212b"));
-assertFalse(/(?i)\u00df/u.test("SS"));
-assertFalse(/(?i)\u1f8d/u.test("\u1f05\u03b9"));
-assertTrue(/(?i)\u1f6b/u.test("\u1f63"));
-assertTrue(/(?i)\u00e5/u.test("\u212b"));
-assertTrue(/(?i)\u00e5/u.test("\u00c5"));
-assertTrue(/(?i)\u00e5/u.test("\u00e5"));
-assertTrue(/(?i)\u00e5/u.test("\u212b"));
-assertTrue(/(?i)\u00c5/u.test("\u00e5"));
-assertTrue(/(?i)\u00c5/u.test("\u212b"));
-assertTrue(/(?i)\u00c5/u.test("\u00c5"));
-assertTrue(/(?i)\u212b/u.test("\u00c5"));
-assertTrue(/(?i)\u212b/u.test("\u00e5"));
-assertTrue(/(?i)\u212b/u.test("\u212b"));
-assertTrue(/(?i)\u{10400}/u.test("\u{10428}"));
-assertTrue(/(?i)\ud801\udc00/u.test("\u{10428}"));
-assertTrue(/(?i)[\u{10428}]/u.test("\u{10400}"));
-assertTrue(/(?i)[\ud801\udc28]/u.test("\u{10400}"));
-assertFalse(/(?i)\u00df/u.test("SS"));
-assertFalse(/(?i)\u1f8d/u.test("\u1f05\u03b9"));
-assertTrue(/(?i)\u1f8d/u.test("\u1f85"));
-assertTrue(/(?i)\u1f6b/u.test("\u1f63"));
-assertTrue(/(?i)\u00e5\u00e5\u00e5/u.test("\u212b\u00e5\u00c5"));
-assertTrue(/(?i)AB\u{10400}/u.test("ab\u{10428}"));
-assertTrue(/(?i)\w/u.test('\u017F'));
-assertTrue(/(?i)\w/u.test('\u212A'));
-assertFalse(/(?i)\W/u.test('\u017F'));
-assertFalse(/(?i)\W/u.test('\u212A'));
-assertFalse(/(?i)\W/u.test('s'));
-assertFalse(/(?i)\W/u.test('S'));
-assertFalse(/(?i)\W/u.test('K'));
-assertFalse(/(?i)\W/u.test('k'));
-assertTrue(/(?i)[\w]/u.test('\u017F'));
-assertTrue(/(?i)[\w]/u.test('\u212A'));
-assertFalse(/(?i)[\W]/u.test('\u017F'));
-assertFalse(/(?i)[\W]/u.test('\u212A'));
-assertFalse(/(?i)[\W]/u.test('s'));
-assertFalse(/(?i)[\W]/u.test('S'));
-assertFalse(/(?i)[\W]/u.test('K'));
-assertFalse(/(?i)[\W]/u.test('k'));
-assertTrue(/(?i)\b/u.test('\u017F'));
-assertTrue(/(?i)\b/u.test('\u212A'));
-assertTrue(/(?i)\b/u.test('s'));
-assertTrue(/(?i)\b/u.test('S'));
-assertFalse(/(?i)\B/u.test('\u017F'));
-assertFalse(/(?i)\B/u.test('\u212A'));
-assertFalse(/(?i)\B/u.test('s'));
-assertFalse(/(?i)\B/u.test('S'));
-assertFalse(/(?i)\B/u.test('K'));
-assertFalse(/(?i)\B/u.test('k'));
-assertTrue(/(?i)\p{Ll}/u.test("a"));
-assertTrue(/(?i)\p{Ll}/u.test("\u{118D4}"));
-assertTrue(/(?i)\p{Ll}/u.test("A"));
-assertTrue(/(?i)\p{Ll}/u.test("\u{118B4}"));
-assertTrue(/(?i)\P{Ll}/u.test("a"));
-assertTrue(/(?i)\P{Ll}/u.test("\u{118D4}"));
-assertTrue(/(?i)\P{Ll}/u.test("A"));
-assertTrue(/(?i)\P{Ll}/u.test("\u{118B4}"));
-assertTrue(/(?i)\p{Lu}/u.test("a"));
-assertTrue(/(?i)\p{Lu}/u.test("\u{118D4}"));
-assertTrue(/(?i)\p{Lu}/u.test("A"));
-assertTrue(/(?i)\p{Lu}/u.test("\u{118B4}"));
-assertTrue(/(?i)\P{Lu}/u.test("a"));
-assertTrue(/(?i)\P{Lu}/u.test("\u{118D4}"));
-assertTrue(/(?i)\P{Lu}/u.test("A"));
-assertTrue(/(?i)\P{Lu}/u.test("\u{118B4}"));
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js b/deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js
deleted file mode 100644
index 82d1341b2a..0000000000
--- a/deps/v8/test/mjsunit/regexp-modifiers-autogenerated.js
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --regexp-mode-modifiers
-
-// These regexps are just grepped out of the other tests we already have
-// and the syntax changed from out-of-line i flag to inline i flag.
-
-assertFalse(/(?i)x(...)\1/.test("x\u03a3\u03c2\u03c3\u03c2\u03c3"));
-assertTrue(/(?i)\u03a3((?:))\1\1x/.test("\u03c2x"), "backref-UC16-empty");
-assertTrue(/(?i)x(?:...|(...))\1x/.test("x\u03a3\u03c2\u03c3x"));
-assertTrue(/(?i)x(?:...|(...))\1x/.test("x\u03c2\u03c3\u039b\u03a3\u03c2\u03bbx"));
-assertFalse(/(?i)\xc1/.test('fooA'), "quickcheck-uc16-pattern-ascii-subject");
-assertFalse(/(?i)x(...)\1/.test("xaaaaa"), "backref-ASCII-short");
-assertTrue(/(?i)x((?:))\1\1x/.test("xx"), "backref-ASCII-empty");
-assertTrue(/(?i)x(?:...|(...))\1x/.test("xabcx"), "backref-ASCII-uncaptured");
-assertTrue(/(?i)x(?:...|(...))\1x/.test("xabcABCx"), "backref-ASCII-backtrack");
-assertFalse(/(?i)f/.test('b'));
-assertFalse(/(?i)[abc]f/.test('x'));
-assertFalse(/(?i)[abc]f/.test('xa'));
-assertFalse(/(?i)[abc]</.test('x'));
-assertFalse(/(?i)[abc]</.test('xa'));
-assertFalse(/(?i)f[abc]/.test('x'));
-assertFalse(/(?i)f[abc]/.test('xa'));
-assertFalse(/(?i)<[abc]/.test('x'));
-assertFalse(/(?i)<[abc]/.test('xa'));
-assertFalse(/(?i)[\u00e5]/.test("\u212b"));
-assertFalse(/(?i)[\u212b]/.test("\u00e5\u1234"));
-assertFalse(/(?i)[\u212b]/.test("\u00e5"));
-assertFalse(/(?i)\u{10400}/.test("\u{10428}"));
-assertFalse(/(?i)[\u00e5]/.test("\u212b"));
-assertFalse(/(?i)[\u212b]/.test("\u00e5\u1234"));
-assertFalse(/(?i)[\u212b]/.test("\u00e5"));
-assertFalse(/(?i)\u{10400}/.test("\u{10428}"));
-assertTrue(/(?i)[@-A]/.test("a"));
-assertTrue(/(?i)[@-A]/.test("A"));
-assertTrue(/(?i)[@-A]/.test("@"));
-assertFalse(/(?i)[¿-À]/.test('¾'));
-assertTrue(/(?i)[¿-À]/.test('¿'));
-assertTrue(/(?i)[¿-À]/.test('À'));
-assertTrue(/(?i)[¿-À]/.test('à'));
-assertFalse(/(?i)[¿-À]/.test('á'));
-assertFalse(/(?i)[¿-À]/.test('Á'));
-assertFalse(/(?i)[¿-À]/.test('Á'));
-assertFalse(/(?i)[Ö-×]/.test('Õ'));
-assertTrue(/(?i)[Ö-×]/.test('Ö'));
-assertTrue(/(?i)[Ö-×]/.test('ö'));
-assertTrue(/(?i)[Ö-×]/.test('×'));
-assertFalse(/(?i)[Ö-×]/.test('Ø'));
-assertTrue(/(?i)(a[\u1000A])+/.test('aa'));
-assertTrue(/(?i)\u0178/.test('\u00ff'));
-assertTrue(/(?i)\u039c/.test('\u00b5'));
-assertTrue(/(?i)\u039c/.test('\u03bc'));
-assertTrue(/(?i)\u00b5/.test('\u03bc'));
-assertTrue(/(?i)[\u039b-\u039d]/.test('\u00b5'));
-assertFalse(/(?i)[^\u039b-\u039d]/.test('\u00b5'));
-
-assertTrue(/(?m)^bar/.test("bar"));
-assertTrue(/(?m)^bar/.test("bar\nfoo"));
-assertTrue(/(?m)^bar/.test("foo\nbar"));
-assertTrue(/(?m)bar$/.test("bar"));
-assertTrue(/(?m)bar$/.test("bar\nfoo"));
-assertTrue(/(?m)bar$/.test("foo\nbar"));
-assertFalse(/(?m)^bxr/.test("bar"));
-assertFalse(/(?m)^bxr/.test("bar\nfoo"));
-assertFalse(/(?m)^bxr/.test("foo\nbar"));
-assertFalse(/(?m)bxr$/.test("bar"));
-assertFalse(/(?m)bxr$/.test("bar\nfoo"));
-assertFalse(/(?m)bxr$/.test("foo\nbar"));
-assertTrue(/(?m)^.*$/.test("\n"));
-assertTrue(/(?m)^([()]|.)*$/.test("()\n()"));
-assertTrue(/(?m)^([()]|.)*$/.test("()\n"));
-assertTrue(/(?m)^[()]*$/.test("()\n."));
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-dotall.js b/deps/v8/test/mjsunit/regexp-modifiers-dotall.js
deleted file mode 100644
index 70c379c2e8..0000000000
--- a/deps/v8/test/mjsunit/regexp-modifiers-dotall.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --regexp-mode-modifiers
-
-// S flag switches dotall mode on and off. Combine with i flag changes to test
-// the parser.
-test(/.(?s).(?i-s).a(?-i)a/);
-test(/.(?s:.)(?i:.a)a/);
-test(/.(?s).(?i-s).a(?-i)a/u);
-test(/.(?s:.)(?i:.a)a/u);
-
-// m flag makes no difference
-test(/.(?sm).(?i-s).a(?-i)a/);
-test(/.(?s:.)(?i:.a)a/);
-test(/.(?sm).(?im-s).a(?m-i)a/u);
-test(/.(?s:.)(?i:.a)a/u);
-
-function test(re) {
- assertTrue(re.test("...aa"));
- assertTrue(re.test(".\n.aa"));
- assertTrue(re.test(".\n.Aa"));
- assertFalse(re.test("\n\n.Aa"));
- assertFalse(re.test(".\n\nAa"));
- assertFalse(re.test(".\n.AA"));
-}
diff --git a/deps/v8/test/mjsunit/regexp-modifiers-i18n.js b/deps/v8/test/mjsunit/regexp-modifiers-i18n.js
deleted file mode 100644
index e9ffe05ac9..0000000000
--- a/deps/v8/test/mjsunit/regexp-modifiers-i18n.js
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --regexp-mode-modifiers
-
-// These tests won't all run on the noi18n build of V8.
-
-aa(/(a)(?i)\1/u);
-aa(/([az])(?i)\1/u);
-
-function aa(re) {
- assertTrue(re.test("aa"));
- assertTrue(re.test("aA"));
- assertFalse(re.test("Aa"));
- assertFalse(re.test("AA"));
-}
-
-aai(/(a)(?-i)\1/iu);
-aai(/([az])(?-i)\1/iu);
-
-function aai(re) {
- assertTrue(re.test("aa"));
- assertFalse(re.test("aA"));
- assertFalse(re.test("Aa"));
- assertTrue(re.test("AA"));
-}
-
-abcd(/a(b(?i)c)d/u);
-abcd(/[aw]([bx](?i)[cy])[dz]/u);
-
-function abcd(re) {
- assertTrue(re.test("abcd"));
- assertFalse(re.test("abcD"));
- assertTrue(re.test("abCd"));
- assertFalse(re.test("abCD"));
- assertFalse(re.test("aBcd"));
- assertFalse(re.test("aBcD"));
- assertFalse(re.test("aBCd"));
- assertFalse(re.test("aBCD"));
- assertFalse(re.test("Abcd"));
- assertFalse(re.test("AbcD"));
- assertFalse(re.test("AbCd"));
- assertFalse(re.test("AbCD"));
- assertFalse(re.test("ABcd"));
- assertFalse(re.test("ABcD"));
- assertFalse(re.test("ABCd"));
- assertFalse(re.test("ABCD"));
-}
-
-abcdei(/a(b(?-i)c)d/iu);
-abcdei(/[aw]([bx](?-i)[cy])[dz]/iu);
-
-function abcdei(re) {
- assertTrue(re.test("abcd"));
- assertTrue(re.test("abcD"));
- assertFalse(re.test("abCd"));
- assertFalse(re.test("abCD"));
- assertTrue(re.test("aBcd"));
- assertTrue(re.test("aBcD"));
- assertFalse(re.test("aBCd"));
- assertFalse(re.test("aBCD"));
- assertTrue(re.test("Abcd"));
- assertTrue(re.test("AbcD"));
- assertFalse(re.test("AbCd"));
- assertFalse(re.test("AbCD"));
- assertTrue(re.test("ABcd"));
- assertTrue(re.test("ABcD"));
- assertFalse(re.test("ABCd"));
- assertFalse(re.test("ABCD"));
-}
-
-abc(/a(?i:b)c/u);
-abc(/[ax](?i:[by])[cz]/u);
-
-function abc(re) {
- assertTrue(re.test("abc"));
- assertFalse(re.test("abC"));
- assertTrue(re.test("aBc"));
- assertFalse(re.test("aBC"));
- assertFalse(re.test("Abc"));
- assertFalse(re.test("AbC"));
- assertFalse(re.test("ABc"));
- assertFalse(re.test("ABC"));
-}
-
-abci(/a(?-i:b)c/iu);
-abci(/[ax](?-i:[by])[cz]/iu);
-
-function abci(re) {
- assertTrue(re.test("abc"));
- assertTrue(re.test("abC"));
- assertFalse(re.test("aBc"));
- assertFalse(re.test("aBC"));
- assertTrue(re.test("Abc"));
- assertTrue(re.test("AbC"));
- assertFalse(re.test("ABc"));
- assertFalse(re.test("ABC"));
-}
-
-// The following tests are taken from test/mjsunit/es7/regexp-ui-word.js but
-// using inline syntax instead of the global /i flag.
-assertTrue(/(?i)\w/u.test('\u017F'));
-assertTrue(/(?i)\w/u.test('\u212A'));
-assertFalse(/(?i)\W/u.test('\u017F'));
-assertFalse(/(?i)\W/u.test('\u212A'));
-assertFalse(/(?i)\W/u.test('s'));
-assertFalse(/(?i)\W/u.test('S'));
-assertFalse(/(?i)\W/u.test('K'));
-assertFalse(/(?i)\W/u.test('k'));
-
-assertTrue(/(?i)[\w]/u.test('\u017F'));
-assertTrue(/(?i)[\w]/u.test('\u212A'));
-assertFalse(/(?i)[\W]/u.test('\u017F'));
-assertFalse(/(?i)[\W]/u.test('\u212A'));
-assertFalse(/(?i)[\W]/u.test('s'));
-assertFalse(/(?i)[\W]/u.test('S'));
-assertFalse(/(?i)[\W]/u.test('K'));
-assertFalse(/(?i)[\W]/u.test('k'));
-
-assertTrue(/(?i)\b/u.test('\u017F'));
-assertFalse(/(?i:)\b/u.test('\u017F'));
-assertTrue(/(?i)\b/u.test('\u212A'));
-assertFalse(/(?i:)\b/u.test('\u212A'));
-assertTrue(/(?i)\b/u.test('s'));
-assertTrue(/(?i)\b/u.test('S'));
-assertFalse(/(?i)\B/u.test('\u017F'));
-assertFalse(/(?i)\B/u.test('\u212A'));
-assertFalse(/(?i)\B/u.test('s'));
-assertFalse(/(?i)\B/u.test('S'));
-assertFalse(/(?i)\B/u.test('K'));
-assertFalse(/(?i)\B/u.test('k'));
-
-assertEquals(["abcd\u017F", "\u017F"], /a.*?(.)(?i)\b/u.exec('abcd\u017F cd'));
-assertEquals(["abcd\u212A", "\u212A"], /a.*?(.)(?i)\b/u.exec('abcd\u212A cd'));
-
-assertEquals(["a\u017F", "\u017F"], /a.*?(?i:\B)(.)/u.exec('a\u017F '));
-assertEquals(["a\u212A", "\u212A"], /a.*?(?i:\B)(.)/u.exec('a\u212A '));
diff --git a/deps/v8/test/mjsunit/regexp-modifiers.js b/deps/v8/test/mjsunit/regexp-modifiers.js
deleted file mode 100644
index 7e76717912..0000000000
--- a/deps/v8/test/mjsunit/regexp-modifiers.js
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --regexp-mode-modifiers
-
-aa(/(a)(?i)\1/);
-aa(/([az])(?i)\1/);
-
-function aa(re) {
- assertTrue(re.test("aa"));
- assertTrue(re.test("aA"));
- assertFalse(re.test("Aa"));
- assertFalse(re.test("AA"));
-}
-
-aai(/(a)(?-i)\1/i);
-aai(/([az])(?-i)\1/i);
-
-function aai(re) {
- assertTrue(re.test("aa"));
- assertFalse(re.test("aA"));
- assertFalse(re.test("Aa"));
- assertTrue(re.test("AA"));
-}
-
-abcd(/a(b(?i)c)d/);
-abcd(/[aw]([bx](?i)[cy])[dz]/);
-
-function abcd(re) {
- assertTrue(re.test("abcd"));
- assertFalse(re.test("abcD"));
- assertTrue(re.test("abCd"));
- assertFalse(re.test("abCD"));
- assertFalse(re.test("aBcd"));
- assertFalse(re.test("aBcD"));
- assertFalse(re.test("aBCd"));
- assertFalse(re.test("aBCD"));
- assertFalse(re.test("Abcd"));
- assertFalse(re.test("AbcD"));
- assertFalse(re.test("AbCd"));
- assertFalse(re.test("AbCD"));
- assertFalse(re.test("ABcd"));
- assertFalse(re.test("ABcD"));
- assertFalse(re.test("ABCd"));
- assertFalse(re.test("ABCD"));
-}
-
-abcdei(/a(b(?-i)c)d/i);
-abcdei(/[aw]([bx](?-i)[cy])[dz]/i);
-
-function abcdei(re) {
- assertTrue(re.test("abcd"));
- assertTrue(re.test("abcD"));
- assertFalse(re.test("abCd"));
- assertFalse(re.test("abCD"));
- assertTrue(re.test("aBcd"));
- assertTrue(re.test("aBcD"));
- assertFalse(re.test("aBCd"));
- assertFalse(re.test("aBCD"));
- assertTrue(re.test("Abcd"));
- assertTrue(re.test("AbcD"));
- assertFalse(re.test("AbCd"));
- assertFalse(re.test("AbCD"));
- assertTrue(re.test("ABcd"));
- assertTrue(re.test("ABcD"));
- assertFalse(re.test("ABCd"));
- assertFalse(re.test("ABCD"));
-}
-
-abc(/a(?i:b)c/);
-abc(/[ax](?i:[by])[cz]/);
-
-function abc(re) {
- assertTrue(re.test("abc"));
- assertFalse(re.test("abC"));
- assertTrue(re.test("aBc"));
- assertFalse(re.test("aBC"));
- assertFalse(re.test("Abc"));
- assertFalse(re.test("AbC"));
- assertFalse(re.test("ABc"));
- assertFalse(re.test("ABC"));
-}
-
-abci(/a(?-i:b)c/i);
-abci(/[ax](?-i:[by])[cz]/i);
-
-function abci(re) {
- assertTrue(re.test("abc"));
- assertTrue(re.test("abC"));
- assertFalse(re.test("aBc"));
- assertFalse(re.test("aBC"));
- assertTrue(re.test("Abc"));
- assertTrue(re.test("AbC"));
- assertFalse(re.test("ABc"));
- assertFalse(re.test("ABC"));
-}
-
-assertThrows(() => new RegExp("foo(?i:"));
-assertThrows(() => new RegExp("foo(?--i)"));
-assertThrows(() => new RegExp("foo(?i-i)"));
-
-assertThrows(() => new RegExp("foo(?m:"));
-assertThrows(() => new RegExp("foo(?--m)"));
-assertThrows(() => new RegExp("foo(?m-m)"));
-
-var re = /^\s(?m)^.$\s(?-m)$/;
-assertTrue(re.test("\n.\n"));
-assertFalse(re.test(" .\n"));
-assertFalse(re.test("\n. "));
-assertFalse(re.test(" . "));
-assertFalse(re.test("_\n.\n"));
-assertFalse(re.test("\n.\n_"));
-assertFalse(re.test("_\n.\n_"));
-
-assertEquals(["abcd", "d"], /a.*?(.)(?i)\b/.exec('abcd\u017F cd'));
-assertEquals(["abcd", "d"], /a.*?(.)(?i)\b/.exec('abcd\u212A cd'));
-
-assertEquals(["a\u017F ", " "], /a.*?(?i)\B(.)/.exec('a\u017F '));
-assertEquals(["a\u212A ", " "], /a.*?(?i)\B(.)/.exec('a\u212A '));
-
-// Nested flags.
-var res = [
- /^a(?i:b(?-i:c(?i:d)e)f)g$/,
- /^a(?i:b(?-i)c(?i)d(?-i)e(?i)f)g$/,
- /^(?-i:a(?i:b(?-i:c(?i:d)e)f)g)$/i,
- /^(?-i:a(?i:b(?-i)c(?i)d(?-i)e(?i)f)g)$/i,
-];
-
-for (var idx = 0; idx < res.length; idx++) {
- var re = res[idx];
- for (var i = 0; i < 128; i++) {
- var s = (i & 1) ? "A" : "a";
- s += (i & 2) ? "B" : "b";
- s += (i & 4) ? "C" : "c";
- s += (i & 8) ? "D" : "d";
- s += (i & 16) ? "E" : "e";
- s += (i & 32) ? "F" : "f";
- s += (i & 64) ? "G" : "g";
- if ((i & (1 | 4 | 16 | 64)) != 0) {
- assertFalse(re.test(s), s);
- } else {
- assertTrue(re.test(s), s);
- }
- }
-}
diff --git a/deps/v8/test/mjsunit/regress/regress-1034449.js b/deps/v8/test/mjsunit/regress/regress-1034449.js
index 6c22ecd099..3a87531c6e 100644
--- a/deps/v8/test/mjsunit/regress/regress-1034449.js
+++ b/deps/v8/test/mjsunit/regress/regress-1034449.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --no-always-opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
function f(len) {
return new Array(len);
diff --git a/deps/v8/test/mjsunit/regress/regress-1221035.js b/deps/v8/test/mjsunit/regress/regress-1221035.js
new file mode 100644
index 0000000000..4ebe4fcc7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1221035.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let sab = new SharedArrayBuffer(40);
+let i32arr = new Int32Array(sab);
+let worker = new Worker(
+ 'onmessage = function(memory) { while (memory[1] == 0) {} };',
+ {type: 'string'});
+worker.postMessage(i32arr);
+i32arr.copyWithin(Array(0x8000).fill("a"), 0);
+i32arr[1] = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-1227568.js b/deps/v8/test/mjsunit/regress/regress-1227568.js
index 351d8ea5b9..65f0b5dcdf 100644
--- a/deps/v8/test/mjsunit/regress/regress-1227568.js
+++ b/deps/v8/test/mjsunit/regress/regress-1227568.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop --no-analyze-environment-liveness
-// Flags:--interrupt-budget=100
+// Flags: --allow-natives-syntax --no-analyze-environment-liveness
+// Flags: --interrupt-budget=100
var val = {};
try {
diff --git a/deps/v8/test/mjsunit/regress/regress-1230930.js b/deps/v8/test/mjsunit/regress/regress-1230930.js
new file mode 100644
index 0000000000..d35d250813
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1230930.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --stress-gc-during-compilation
+
+const __v_0 = class __c_0 extends Array {
+ constructor() {
+ super();
+ this.y = 1;
+ }
+};
+function __f_1() {
+ var __v_2 = new __v_0();
+}
+ %PrepareFunctionForOptimization(__f_1);
+__f_1();
+%OptimizeFunctionOnNextCall(__f_1);
+__f_1();
diff --git a/deps/v8/test/mjsunit/regress/regress-1231901.js b/deps/v8/test/mjsunit/regress/regress-1231901.js
new file mode 100644
index 0000000000..b93b4faf35
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1231901.js
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --interrupt-budget=100
+
+function __f_9() {}
+function __f_10() {}
+
+function f(a, b) {
+ if (b) {
+ new __f_10().__proto__ = a;
+ } else {
+ __f_10.prototype = a;
+ }
+}
+
+function g(a, b, c) {
+ var d = a ? new __f_9() : {};
+ if (b) { g(d); }
+ f(d, c);
+}
+
+g(false, true, false);
+g(false, false, false);
+g(false, false, false, undefined);
+
+g(false, true, true);
+g(false, false, true);
+g(false, false, true, undefined);
+
+g(true, true, false);
+g(true, false, false);
+g(true, false, false, undefined);
+
+g(true, true, true);
+g(true, false, true);
+g(true, false, true, undefined);
diff --git a/deps/v8/test/mjsunit/regress/regress-1232620.js b/deps/v8/test/mjsunit/regress/regress-1232620.js
new file mode 100644
index 0000000000..3553eb7cc1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1232620.js
@@ -0,0 +1,17 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Try to catch TSAN issues with access to SharedArrayBuffer.
+
+function onmessage([buf]) {
+ const arr = new Int32Array(buf);
+ for (let val = 1; val < 100; ++val) arr.fill(val);
+}
+const arr = new Int32Array(new SharedArrayBuffer(4));
+const worker = new Worker(`onmessage = ${onmessage}`, {type: 'string'});
+worker.postMessage([arr.buffer]);
+// Wait until the worker starts filling the array.
+while (Atomics.load(arr) == 0) { }
+// Try setting a value on the shared array buffer that races with the fill.
+arr.set(arr);
diff --git a/deps/v8/test/mjsunit/regress/regress-1235071.js b/deps/v8/test/mjsunit/regress/regress-1235071.js
new file mode 100644
index 0000000000..521347dc93
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1235071.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --noanalyze-environment-liveness --interrupt-budget=1000 --allow-natives-syntax
+
+function __f_4() {
+ var __v_3 = function() {};
+ var __v_5 = __v_3.prototype;
+ Number.prototype.__proto__ = __v_3;
+ __v_5, __v_3.prototype;
+}
+%PrepareFunctionForOptimization(__f_4);
+for (let i = 0; i < 100; i++) __f_4();
diff --git a/deps/v8/test/mjsunit/regress/regress-1236303.js b/deps/v8/test/mjsunit/regress/regress-1236303.js
new file mode 100644
index 0000000000..27baea1034
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1236303.js
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var __v_16 = {};
+__f_27();
+function __f_27() {
+ var __v_15 = Symbol();
+ __v_16[__v_15] = "abc";
+ for (var __v_1 = 0; __v_1 < 100000; __v_1++) {
+ }
+}
+__f_27();
diff --git a/deps/v8/test/mjsunit/regress/regress-1236307.js b/deps/v8/test/mjsunit/regress/regress-1236307.js
new file mode 100644
index 0000000000..617782c3ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1236307.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function __f_2(__v_6) {
+ try {
+ if (__v_6 > 0) return __f_2(...[__v_6 - 1]);
+ } catch (e) {}
+}
+__f_2(100000);
+__f_2(100000);
diff --git a/deps/v8/test/mjsunit/regress/regress-1236560.js b/deps/v8/test/mjsunit/regress/regress-1236560.js
new file mode 100644
index 0000000000..987f348aad
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1236560.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let obj = {};
+let arr = new Uint8Array(3);
+function __f_0() {
+ arr[2] = obj;
+}
+obj.toString = __f_0;
+assertThrows(() => obj.toString(), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js b/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
index 4d2f273020..2c3fded9e0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --interrupt-budget=200 --stack-size=200 --budget-for-feedback-vector-allocation=100 --expose-gc --stress-flush-bytecode
+// Flags: --interrupt-budget=200 --stack-size=200
+// Flags: --budget-for-feedback-vector-allocation=100 --expose-gc
+// Flags: --stress-flush-code --flus-bytecode
var i = 0;
function main() {
@@ -30,7 +32,7 @@ function v0() {
const v21 = Object.defineProperty([].__proto__,"e",{set:v10});
}
const v26 = v0();
-// With --stress-flush-bytecode GC flushes the bytecode for v0 and v10
+// With --stress-flush-code GC flushes the bytecode for v0 and v10
gc();
assertThrows(v0, TypeError);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1113085.js b/deps/v8/test/mjsunit/regress/regress-crbug-1113085.js
new file mode 100644
index 0000000000..1748e46b53
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1113085.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --force-slow-path
+
+let obj = [1, 2, 3];
+obj[Symbol.isConcatSpreadable] = false;
+assertEquals([obj], obj.concat());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1227476.js b/deps/v8/test/mjsunit/regress/regress-crbug-1227476.js
new file mode 100644
index 0000000000..38f97178dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1227476.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(
+ () => {
+ let ar = new Int32Array();
+ ar.__defineGetter__(-2, function() {});
+ }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1235182.js b/deps/v8/test/mjsunit/regress/regress-crbug-1235182.js
new file mode 100644
index 0000000000..0a3b1e71e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1235182.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var call_f = new Function('f(' + ('0,').repeat(7023) + ')');
+function f() {[1, 2, 3].sort(call_f);}
+assertThrows(call_f, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1236962.js b/deps/v8/test/mjsunit/regress/regress-crbug-1236962.js
new file mode 100644
index 0000000000..1b0eac4903
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1236962.js
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --budget-for-feedback-vector-allocation=0 --interrupt-budget=1000
+
+(function() {
+Empty = function() {};
+
+var FuncImpl = new Function();
+Func = function() {
+ if (FuncImpl === undefined) {
+ try {
+ FuncImpl = new Function();
+ } catch (e) {
+ throw new Error('');
+ }
+ }
+ return FuncImpl();
+};
+})();
+
+var object = {};
+main = function(unused = true) {
+ var func = Func();
+ Empty(func & object.a & object.a & object.a & object.a !== 0, '');
+ Empty(func & object.a !== 0, '');
+};
+
+for (let i = 0; i < 40; i++) {
+ main();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1237153.js b/deps/v8/test/mjsunit/regress/regress-crbug-1237153.js
new file mode 100644
index 0000000000..8caed1b1b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1237153.js
@@ -0,0 +1,17 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Try to catch TSAN issues with access to SharedArrayBuffer.
+
+function onmessage([buf]) {
+ const arr = new Int32Array(buf);
+ for (let val = 1; val < 100; ++val) arr.fill(val);
+}
+const arr = new Int32Array(new SharedArrayBuffer(4));
+const worker = new Worker(`onmessage = ${onmessage}`, { type: 'string' });
+worker.postMessage([arr.buffer]);
+// Wait until the worker starts filling the array.
+while (Atomics.load(arr) == 0) { }
+// Try creating a slice of the shared array buffer that races with the fill.
+const slice = arr.slice(0, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-422858.js b/deps/v8/test/mjsunit/regress/regress-crbug-422858.js
index bede64025e..ba75fc01a4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-422858.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-422858.js
@@ -3,21 +3,21 @@
// found in the LICENSE file.
var date = new Date("2016/01/02 10:00 GMT-8")
-assertEquals(0, date.getUTCMinutes());
+assertEquals(0, date.getMinutes());
assertEquals(18, date.getUTCHours());
date = new Date("2016/01/02 10:00 GMT-12")
-assertEquals(0, date.getUTCMinutes());
+assertEquals(0, date.getMinutes());
assertEquals(22, date.getUTCHours());
date = new Date("2016/01/02 10:00 GMT-123")
-assertEquals(23, date.getUTCMinutes());
+assertEquals(23, date.getMinutes());
assertEquals(11, date.getUTCHours());
date = new Date("2016/01/02 10:00 GMT-0856")
-assertEquals(56, date.getUTCMinutes());
+assertEquals(56, date.getMinutes());
assertEquals(18, date.getUTCHours());
date = new Date("2016/01/02 10:00 GMT-08000")
-assertEquals(NaN, date.getUTCMinutes());
+assertEquals(NaN, date.getMinutes());
assertEquals(NaN, date.getUTCHours());
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-8799.js b/deps/v8/test/mjsunit/regress/regress-v8-8799.js
index 6e3eb07b03..bcc8cdbe17 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-8799.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8799.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --flush-bytecode --stress-flush-bytecode
+// Flags: --expose-gc --flush-bytecode --stress-flush-code
// Ensure tagged template objects are cached even after bytecode flushing.
var f = (x) => eval`a${x}b`;
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9656.js b/deps/v8/test/mjsunit/regress/regress-v8-9656.js
index c28463572e..036d12316a 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-9656.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9656.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-flush-bytecode
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-code
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js b/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js
index a575b61324..89ac80eac3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js
@@ -9,7 +9,7 @@ let proxy = new Proxy(obj, {});
let builder = new WasmModuleBuilder();
builder.addType(kSig_v_v);
let imports = builder.addImport("m","f", kSig_v_v);
-let exception = builder.addException(kSig_v_v);
+let exception = builder.addTag(kSig_v_v);
builder.addFunction("foo", kSig_v_v)
.addBody([
kExprTry,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1236958.js b/deps/v8/test/mjsunit/regress/wasm/regress-1236958.js
new file mode 100644
index 0000000000..63766f72f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1236958.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+let array = builder.addArray(kWasmI64, true);
+
+builder.addFunction('test', kSig_v_v)
+ .addBody([kExprLoop, kWasmVoid,
+ kExprI64Const, 15,
+ kExprI32Const, 12,
+ kGCPrefix, kExprRttCanon, array,
+ kGCPrefix, kExprArrayNewWithRtt, array,
+ kExprDrop,
+ kExprEnd])
+ .exportFunc();
+
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1239522.js b/deps/v8/test/mjsunit/regress/wasm/regress-1239522.js
new file mode 100644
index 0000000000..dfa8e72cae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1239522.js
@@ -0,0 +1,16 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var asm = function(global) {
+ 'use asm';
+ function f() {}
+ return f;
+};
+function asm2(global, imports) {
+ 'use asm';
+ var asm = imports.asm;
+ function f() {}
+ return {f: f};
+}
+asm2(this, {asm: asm});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1242689.js b/deps/v8/test/mjsunit/regress/wasm/regress-1242689.js
new file mode 100644
index 0000000000..fafca4c430
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1242689.js
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This test case is minified from a clusterfuzz generated test. It exercises a
+// bug in I64x2ShrS where the codegen was overwriting a input Register
+// containing the shift value.
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+
+// Generate function 1 (out of 1).
+builder.addFunction("main", kSig_i_v)
+ .addBodyWithEnd([
+ // signature: i_iii
+ // body:
+ kExprI32Const, 0x00, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0xee, 0xc6, 0x01, // i32.const, 25454 (0x636e)
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kSimdPrefix, kExprI8x16ExtractLaneS, 0x00, // i8x16.extract_lane_s
+ kSimdPrefix, kExprI64x2ShrS, 0x01, // i64x2.shr_s
+ kSimdPrefix, kExprI8x16ExtractLaneS, 0x00, // i8x16.extract_lane_s
+ kExprI32Const, 0xee, 0xc6, 0x01, // i32.const, 0x636e
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kSimdPrefix, kExprI8x16ExtractLaneS, 0x00, // i8x16.extract_lane_s
+ kExprI32Const, 0x00, // i32.const
+ kExprSelect, // select
+ kExprEnd, // end @48
+]).exportFunc();
+
+const instance = builder.instantiate();
+print(instance.exports.main());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
index 03a02e6861..92b14bbbd6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
@@ -11,7 +11,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
(function testExternRefNull() {
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_r_v)
- .addBody([kExprRefNull, kWasmExternRef])
+ .addBody([kExprRefNull, kExternRefCode])
.exportFunc();
var wire_bytes = builder.toBuffer();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8094.js b/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
index ea21614e5b..212f75774b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
@@ -8,7 +8,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// Instantiate a throwing module.
var builder = new WasmModuleBuilder();
-builder.addException(kSig_v_v);
+builder.addTag(kSig_v_v);
builder.addFunction("propel", kSig_v_v)
.addBody([kExprThrow, 0])
.exportFunc();
@@ -22,8 +22,8 @@ try {
exception = e;
}
-// Check that the exception is an instance of the correct error function and
+// Check that the exception is an instance of WebAssembly.Exception and
// that no extraneous properties exist. Setting such properties could be
// observable by JavaScript and could break compatibility.
-assertInstanceof(exception, WebAssembly.RuntimeError);
+assertInstanceof(exception, WebAssembly.Exception);
assertArrayEquals(["stack", "message"], Object.getOwnPropertyNames(exception));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8846.js b/deps/v8/test/mjsunit/regress/wasm/regress-8846.js
index 9e9ca4f819..eee36d5ddb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8846.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8846.js
@@ -6,10 +6,10 @@
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
-(function TestAsyncCompileExceptionSection() {
+(function TestAsyncCompileTagSection() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("thrw", kSig_v_v)
.addBody([
kExprThrow, except,
@@ -21,7 +21,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertPromiseResult(WebAssembly.instantiate(module), inst => step3(inst));
}
function step3(instance) {
- assertThrows(() => instance.exports.thrw(), WebAssembly.RuntimeError);
+ assertThrows(() => instance.exports.thrw(), WebAssembly.Exception);
}
step1(builder.toBuffer());
})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8896.js b/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
index 7f73846f92..733b6aa00a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
@@ -10,7 +10,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
(function TestSerializeDeserializeRuntimeCall() {
var builder = new WasmModuleBuilder();
- var except = builder.addException(kSig_v_v);
+ var except = builder.addTag(kSig_v_v);
builder.addFunction("f", kSig_v_v)
.addBody([
kExprThrow, except,
@@ -21,5 +21,5 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
var serialized = %SerializeWasmModule(module);
module = %DeserializeWasmModule(serialized, wire_bytes);
var instance2 = new WebAssembly.Instance(module);
- assertThrows(() => instance2.exports.f(), WebAssembly.RuntimeError);
+ assertThrows(() => instance2.exports.f(), WebAssembly.Exception);
})();
diff --git a/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js b/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js
index add184cd42..3e779af415 100644
--- a/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js
@@ -6,6 +6,14 @@
"use strict";
+function CreateResizableArrayBuffer(byteLength, maxByteLength) {
+ return new ArrayBuffer(byteLength, {maxByteLength: maxByteLength});
+}
+
+function CreateGrowableSharedArrayBuffer(byteLength, maxByteLength) {
+ return new SharedArrayBuffer(byteLength, {maxByteLength: maxByteLength});
+}
+
function resizeHelper(ab, value) {
const return_value = ab.resize(value);
assertEquals(undefined, return_value);
@@ -19,43 +27,146 @@ function growHelper(ab, value) {
}
(function TestRABBasics() {
- const rab = new ResizableArrayBuffer(10, 20);
- assertTrue(rab instanceof ResizableArrayBuffer);
- assertFalse(rab instanceof GrowableSharedArrayBuffer);
- assertFalse(rab instanceof ArrayBuffer);
+ const rab = CreateResizableArrayBuffer(10, 20);
+ assertTrue(rab instanceof ArrayBuffer);
assertFalse(rab instanceof SharedArrayBuffer);
assertEquals(10, rab.byteLength);
assertEquals(20, rab.maxByteLength);
})();
(function TestRABCtorByteLengthEqualsMax() {
- const rab = new ResizableArrayBuffer(10, 10);
+ const rab = CreateResizableArrayBuffer(10, 10);
assertEquals(10, rab.byteLength);
assertEquals(10, rab.maxByteLength);
})();
(function TestRABCtorByteLengthZero() {
- const rab = new ResizableArrayBuffer(0, 10);
+ const rab = CreateResizableArrayBuffer(0, 10);
assertEquals(0, rab.byteLength);
assertEquals(10, rab.maxByteLength);
})();
(function TestRABCtorByteLengthAndMaxZero() {
- const rab = new ResizableArrayBuffer(0, 0);
+ const rab = CreateResizableArrayBuffer(0, 0);
assertEquals(0, rab.byteLength);
assertEquals(0, rab.maxByteLength);
})();
-(function TestRABCtorNoMaxByteLength() {
- assertThrows(() => { new ResizableArrayBuffer(10); }, RangeError);
- // But this is fine; undefined is converted to 0.
- const rab = new ResizableArrayBuffer(0);
- assertEquals(0, rab.byteLength);
- assertEquals(0, rab.maxByteLength);
+const ctors = [[ArrayBuffer, (b) => b.resizable],
+ [SharedArrayBuffer, (b) => b.growable]];
+
+(function TestOptionsBagNotObject() {
+ for (let [ctor, resizable] of ctors) {
+ const buffer = new ctor(10, 'this is not an options bag');
+ assertFalse(resizable(buffer));
+ }
+})();
+
+(function TestOptionsBagMaxByteLengthGetterThrows() {
+ let evil = {};
+ Object.defineProperty(evil, 'maxByteLength',
+ {get: () => { throw new Error('thrown'); }});
+ for (let [ctor, resizable] of ctors) {
+ let caught = false;
+ try {
+ new ctor(10, evil);
+ } catch(e) {
+ assertEquals('thrown', e.message);
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+})();
+
+(function TestMaxByteLengthNonExisting() {
+ for (let [ctor, resizable] of ctors) {
+ const buffer = new ctor(10, {});
+ assertFalse(resizable(buffer));
+ }
+})();
+
+(function TestMaxByteLengthUndefinedOrNan() {
+ for (let [ctor, resizable] of ctors) {
+ const buffer1 = new ctor(10, {maxByteLength: undefined});
+ assertFalse(resizable(buffer1));
+ const buffer2 = new ctor(0, {maxByteLength: NaN});
+ assertTrue(resizable(buffer2));
+ assertEquals(0, buffer2.byteLength);
+ assertEquals(0, buffer2.maxByteLength);
+ }
+})();
+
+(function TestMaxByteLengthBooleanNullOrString() {
+ for (let [ctor, resizable] of ctors) {
+ const buffer1 = new ctor(0, {maxByteLength: true});
+ assertTrue(resizable(buffer1));
+ assertEquals(0, buffer1.byteLength);
+ assertEquals(1, buffer1.maxByteLength);
+ const buffer2 = new ctor(0, {maxByteLength: false});
+ assertTrue(resizable(buffer2));
+ assertEquals(0, buffer2.byteLength);
+ assertEquals(0, buffer2.maxByteLength);
+ const buffer3 = new ctor(0, {maxByteLength: null});
+ assertTrue(resizable(buffer3));
+ assertEquals(0, buffer3.byteLength);
+ assertEquals(0, buffer3.maxByteLength);
+ const buffer4 = new ctor(0, {maxByteLength: '100'});
+ assertTrue(resizable(buffer4));
+ assertEquals(0, buffer4.byteLength);
+ assertEquals(100, buffer4.maxByteLength);
+ }
+})();
+
+(function TestMaxByteLengthDouble() {
+ for (let [ctor, resizable] of ctors) {
+ const buffer1 = new ctor(0, {maxByteLength: -0.0});
+ assertTrue(resizable(buffer1));
+ assertEquals(0, buffer1.byteLength);
+ assertEquals(0, buffer1.maxByteLength);
+ const buffer2 = new ctor(0, {maxByteLength: -0.1});
+ assertTrue(resizable(buffer2));
+ assertEquals(0, buffer2.byteLength);
+ assertEquals(0, buffer2.maxByteLength);
+ const buffer3 = new ctor(0, {maxByteLength: 1.2});
+ assertTrue(resizable(buffer3));
+ assertEquals(0, buffer3.byteLength);
+ assertEquals(1, buffer3.maxByteLength);
+ assertThrows(() => { new ctor(0, {maxByteLength: -1.5}) });
+ assertThrows(() => { new ctor(0, {maxByteLength: -1}) });
+ }
+})();
+
+(function TestMaxByteLengthThrows() {
+ const evil = {valueOf: () => { throw new Error('thrown');}};
+ for (let [ctor, resizable] of ctors) {
+ let caught = false;
+ try {
+ new ctor(0, {maxByteLength: evil});
+ } catch (e) {
+ assertEquals('thrown', e.message);
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+})();
+
+(function TestByteLengthThrows() {
+ const evil1 = {valueOf: () => { throw new Error('byteLength throws');}};
+ const evil2 = {valueOf: () => { throw new Error('maxByteLength throws');}};
+ for (let [ctor, resizable] of ctors) {
+ let caught = false;
+ try {
+ new ctor(evil1, {maxByteLength: evil2});
+ } catch (e) {
+ assertEquals('byteLength throws', e.message);
+ caught = true;
+ }
+ assertTrue(caught);
+ }
})();
(function TestAllocatingOutrageouslyMuchThrows() {
- assertThrows(() => { new ResizableArrayBuffer(0, 2 ** 100);}, RangeError);
+ assertThrows(() => { CreateResizableArrayBuffer(0, 2 ** 100);}, RangeError);
})();
(function TestRABCtorOperationOrder() {
@@ -64,7 +175,7 @@ function growHelper(ab, value) {
log += 'valueof length, '; return 10; }};
const mock_max_length = {valueOf: function() {
log += 'valueof max_length, '; return 10; }};
- new ResizableArrayBuffer(mock_length, mock_max_length);
+ CreateResizableArrayBuffer(mock_length, mock_max_length);
assertEquals('valueof length, valueof max_length, ', log);
})();
@@ -75,7 +186,7 @@ function growHelper(ab, value) {
log += 'valueof length, '; return 10; }};
const mock_max_length = {valueOf: function() {
log += 'valueof max_length, '; return 10; }};
- new ResizableArrayBuffer(mock_length, mock_max_length);
+ CreateResizableArrayBuffer(mock_length, mock_max_length);
assertEquals('valueof length, valueof max_length, ', log);
})();
@@ -86,70 +197,86 @@ function growHelper(ab, value) {
ArrayBuffer.prototype, name).get;
const sab_getter = Object.getOwnPropertyDescriptor(
SharedArrayBuffer.prototype, name).get;
- const rab_getter = Object.getOwnPropertyDescriptor(
- ResizableArrayBuffer.prototype, name).get;
- const gsab_getter = Object.getOwnPropertyDescriptor(
- GrowableSharedArrayBuffer.prototype, name).get;
const ab = new ArrayBuffer(40);
const sab = new SharedArrayBuffer(40);
- const rab = new ResizableArrayBuffer(40, 40);
- const gsab = new GrowableSharedArrayBuffer(40, 40);
+ const rab = CreateResizableArrayBuffer(40, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(40, 40);
assertEquals(40, ab_getter.call(ab));
+ assertEquals(40, ab_getter.call(rab));
assertEquals(40, sab_getter.call(sab));
- assertEquals(40, rab_getter.call(rab));
- assertEquals(40, gsab_getter.call(gsab));
+ assertEquals(40, sab_getter.call(gsab));
assertThrows(() => { ab_getter.call(sab);});
- assertThrows(() => { ab_getter.call(rab);});
assertThrows(() => { ab_getter.call(gsab);});
assertThrows(() => { sab_getter.call(ab);});
assertThrows(() => { sab_getter.call(rab);});
- assertThrows(() => { sab_getter.call(gsab);});
-
- assertThrows(() => { rab_getter.call(ab);});
- assertThrows(() => { rab_getter.call(sab);});
- assertThrows(() => { rab_getter.call(gsab);});
-
- assertThrows(() => { gsab_getter.call(ab);});
- assertThrows(() => { gsab_getter.call(sab);});
- assertThrows(() => { gsab_getter.call(rab);});
})();
(function TestMaxByteLengthGetterReceiverChecks() {
const name = 'maxByteLength';
- const rab_getter = Object.getOwnPropertyDescriptor(
- ResizableArrayBuffer.prototype, name).get;
- const gsab_getter = Object.getOwnPropertyDescriptor(
- GrowableSharedArrayBuffer.prototype, name).get;
+ const ab_getter = Object.getOwnPropertyDescriptor(
+ ArrayBuffer.prototype, name).get;
+ const sab_getter = Object.getOwnPropertyDescriptor(
+ SharedArrayBuffer.prototype, name).get;
const ab = new ArrayBuffer(40);
const sab = new SharedArrayBuffer(40);
- const rab = new ResizableArrayBuffer(20, 40);
- const gsab = new GrowableSharedArrayBuffer(20, 40);
+ const rab = CreateResizableArrayBuffer(20, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(20, 40);
- assertEquals(40, rab_getter.call(rab));
- assertEquals(40, gsab_getter.call(gsab));
+ assertEquals(40, ab_getter.call(ab));
+ assertEquals(40, ab_getter.call(rab));
+ assertEquals(40, sab_getter.call(sab));
+ assertEquals(40, sab_getter.call(gsab));
- assertThrows(() => { rab_getter.call(ab);});
- assertThrows(() => { rab_getter.call(sab);});
- assertThrows(() => { rab_getter.call(gsab);});
+ assertThrows(() => { ab_getter.call(sab);});
+ assertThrows(() => { ab_getter.call(gsab);});
- assertThrows(() => { gsab_getter.call(ab);});
- assertThrows(() => { gsab_getter.call(sab);});
- assertThrows(() => { gsab_getter.call(rab);});
+ assertThrows(() => { sab_getter.call(ab);});
+ assertThrows(() => { sab_getter.call(rab);});
+})();
+
+(function TestResizableGetterReceiverChecks() {
+ const ab_getter = Object.getOwnPropertyDescriptor(
+ ArrayBuffer.prototype, 'resizable').get;
+ const sab_getter = Object.getOwnPropertyDescriptor(
+ SharedArrayBuffer.prototype, 'growable').get;
+
+ const ab = new ArrayBuffer(40);
+ const sab = new SharedArrayBuffer(40);
+ const rab = CreateResizableArrayBuffer(40, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(40, 40);
+
+ assertEquals(false, ab_getter.call(ab));
+ assertEquals(true, ab_getter.call(rab));
+ assertEquals(false, sab_getter.call(sab));
+ assertEquals(true, sab_getter.call(gsab));
+
+ assertThrows(() => { ab_getter.call(sab);});
+ assertThrows(() => { ab_getter.call(gsab);});
+
+ assertThrows(() => { sab_getter.call(ab);});
+ assertThrows(() => { sab_getter.call(rab);});
+})();
+
+(function TestByteLengthAndMaxByteLengthOfDetached() {
+ const rab = CreateResizableArrayBuffer(10, 20);
+ %ArrayBufferDetach(rab);
+ assertEquals(0, rab.byteLength);
+ assertEquals(0, rab.maxByteLength);
})();
(function TestResizeAndGrowReceiverChecks() {
- const rab_resize = ResizableArrayBuffer.prototype.resize;
- const gsab_grow = GrowableSharedArrayBuffer.prototype.grow;
+ const rab_resize = ArrayBuffer.prototype.resize;
+ const gsab_grow = SharedArrayBuffer.prototype.grow;
const ab = new ArrayBuffer(40);
const sab = new SharedArrayBuffer(40);
- const rab = new ResizableArrayBuffer(10, 40);
- const gsab = new GrowableSharedArrayBuffer(10, 40);
+ const rab = CreateResizableArrayBuffer(10, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 40);
rab_resize.call(rab, 20);
gsab_grow.call(gsab, 20);
@@ -163,39 +290,39 @@ function growHelper(ab, value) {
})();
(function TestRABResizeToMax() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
resizeHelper(rab, 20);
})();
(function TestRABResizeToSameSize() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
resizeHelper(rab, 10);
})();
(function TestRABResizeToSmaller() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
resizeHelper(rab, 5);
})();
(function TestRABResizeToZero() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
resizeHelper(rab, 0);
})();
(function TestRABResizeZeroToZero() {
- const rab = new ResizableArrayBuffer(0, 20);
+ const rab = CreateResizableArrayBuffer(0, 20);
resizeHelper(rab, 0);
})();
(function TestRABGrowBeyondMaxThrows() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
assertEquals(10, rab.byteLength);
assertThrows(() => {rab.grow(21)});
assertEquals(10, rab.byteLength);
})();
(function TestRABResizeMultipleTimes() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
const sizes = [15, 7, 7, 0, 8, 20, 20, 10];
for (let s of sizes) {
resizeHelper(rab, s);
@@ -203,7 +330,7 @@ function growHelper(ab, value) {
})();
(function TestRABResizeParameters() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
rab.resize('15');
assertEquals(15, rab.byteLength);
rab.resize({valueOf: function() { return 16; }});
@@ -213,7 +340,7 @@ function growHelper(ab, value) {
})();
(function TestRABResizeInvalidParameters() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
assertThrows(() => { rab.resize(-1) }, RangeError);
assertThrows(() => { rab.resize({valueOf: function() {
throw new Error('length param'); }})});
@@ -227,13 +354,13 @@ function growHelper(ab, value) {
})();
(function TestRABResizeDetached() {
- const rab = new ResizableArrayBuffer(10, 20);
+ const rab = CreateResizableArrayBuffer(10, 20);
%ArrayBufferDetach(rab);
assertThrows(() => { rab.resize(15) }, TypeError);
})();
(function DetachInsideResizeParameterConversion() {
- const rab = new ResizableArrayBuffer(40, 80);
+ const rab = CreateResizableArrayBuffer(40, 80);
const evil = {
valueOf: () => { %ArrayBufferDetach(rab); return 20; }
@@ -243,7 +370,7 @@ function growHelper(ab, value) {
})();
(function ResizeInsideResizeParameterConversion() {
- const rab = new ResizableArrayBuffer(40, 80);
+ const rab = CreateResizableArrayBuffer(40, 80);
const evil = {
valueOf: () => { rab.resize(10); return 20; }
@@ -256,7 +383,7 @@ function growHelper(ab, value) {
(function TestRABNewMemoryAfterResizeInitializedToZero() {
const maybe_page_size = 4096;
- const rab = new ResizableArrayBuffer(maybe_page_size, 2 * maybe_page_size);
+ const rab = CreateResizableArrayBuffer(maybe_page_size, 2 * maybe_page_size);
const i8a = new Int8Array(rab);
rab.resize(2 * maybe_page_size);
for (let i = 0; i < 2 * maybe_page_size; ++i) {
@@ -266,7 +393,7 @@ function growHelper(ab, value) {
(function TestRABMemoryInitializedToZeroAfterShrinkAndGrow() {
const maybe_page_size = 4096;
- const rab = new ResizableArrayBuffer(maybe_page_size, 2 * maybe_page_size);
+ const rab = CreateResizableArrayBuffer(maybe_page_size, 2 * maybe_page_size);
const i8a = new Int8Array(rab);
for (let i = 0; i < maybe_page_size; ++i) {
i8a[i] = 1;
@@ -279,81 +406,71 @@ function growHelper(ab, value) {
})();
(function TestGSABBasics() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
- assertFalse(gsab instanceof ResizableArrayBuffer);
- assertTrue(gsab instanceof GrowableSharedArrayBuffer);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertFalse(gsab instanceof ArrayBuffer);
- assertFalse(gsab instanceof SharedArrayBuffer);
+ assertTrue(gsab instanceof SharedArrayBuffer);
assertEquals(10, gsab.byteLength);
assertEquals(20, gsab.maxByteLength);
})();
(function TestGSABCtorByteLengthEqualsMax() {
- const gsab = new GrowableSharedArrayBuffer(10, 10);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 10);
assertEquals(10, gsab.byteLength);
assertEquals(10, gsab.maxByteLength);
})();
(function TestGSABCtorByteLengthZero() {
- const gsab = new GrowableSharedArrayBuffer(0, 10);
+ const gsab = CreateGrowableSharedArrayBuffer(0, 10);
assertEquals(0, gsab.byteLength);
assertEquals(10, gsab.maxByteLength);
})();
(function TestGSABCtorByteLengthAndMaxZero() {
- const gsab = new GrowableSharedArrayBuffer(0, 0);
- assertEquals(0, gsab.byteLength);
- assertEquals(0, gsab.maxByteLength);
-})();
-
-(function TestGSABCtorNoMaxByteLength() {
- assertThrows(() => { new GrowableSharedArrayBuffer(10); }, RangeError);
- // But this is fine; undefined is converted to 0.
- const gsab = new GrowableSharedArrayBuffer(0);
+ const gsab = CreateGrowableSharedArrayBuffer(0, 0);
assertEquals(0, gsab.byteLength);
assertEquals(0, gsab.maxByteLength);
})();
(function TestAllocatingOutrageouslyMuchThrows() {
- assertThrows(() => { new GrowableSharedArrayBuffer(0, 2 ** 100);},
+ assertThrows(() => { CreateGrowableSharedArrayBuffer(0, 2 ** 100);},
RangeError);
})();
(function TestGSABGrowToMax() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertEquals(10, gsab.byteLength);
growHelper(gsab, 20);
})();
(function TestGSABGrowToSameSize() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertEquals(10, gsab.byteLength);
growHelper(gsab, 10);
})();
(function TestGSABGrowToSmallerThrows() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertEquals(10, gsab.byteLength);
assertThrows(() => {gsab.grow(5)});
assertEquals(10, gsab.byteLength);
})();
(function TestGSABGrowToZeroThrows() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertEquals(10, gsab.byteLength);
assertThrows(() => {gsab.grow(0)});
assertEquals(10, gsab.byteLength);
})();
(function TestGSABGrowBeyondMaxThrows() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertEquals(10, gsab.byteLength);
assertThrows(() => {gsab.grow(21)});
assertEquals(10, gsab.byteLength);
})();
(function TestGSABGrowMultipleTimes() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertEquals(10, gsab.byteLength);
const sizes = [15, 7, 7, 0, 8, 20, 20, 10];
for (let s of sizes) {
@@ -368,7 +485,7 @@ function growHelper(ab, value) {
})();
(function TestGSABGrowParameters() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
gsab.grow('15');
assertEquals(15, gsab.byteLength);
gsab.grow({valueOf: function() { return 16; }});
@@ -378,7 +495,7 @@ function growHelper(ab, value) {
})();
(function TestGSABGrowInvalidParameters() {
- const gsab = new GrowableSharedArrayBuffer(0, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(0, 20);
assertThrows(() => { gsab.grow(-1) }, RangeError);
assertThrows(() => { gsab.grow({valueOf: function() {
throw new Error('length param'); }})});
@@ -393,7 +510,7 @@ function growHelper(ab, value) {
(function TestGSABMemoryInitializedToZeroAfterGrow() {
const maybe_page_size = 4096;
- const gsab = new GrowableSharedArrayBuffer(maybe_page_size,
+ const gsab = CreateGrowableSharedArrayBuffer(maybe_page_size,
2 * maybe_page_size);
const i8a = new Int8Array(gsab);
gsab.grow(2 * maybe_page_size);
@@ -404,7 +521,7 @@ function growHelper(ab, value) {
})();
(function GrowGSABOnADifferentThread() {
- const gsab = new GrowableSharedArrayBuffer(10, 20);
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
assertEquals(10, gsab.byteLength);
function workerCode() {
function assert(thing) {
@@ -414,11 +531,10 @@ function growHelper(ab, value) {
}
onmessage = function(params) {
const gsab = params.gsab;
- assert(!(gsab instanceof ResizableArrayBuffer));
- assert(gsab instanceof GrowableSharedArrayBuffer);
assert(!(gsab instanceof ArrayBuffer));
- assert(!(gsab instanceof SharedArrayBuffer));
+ assert(gsab instanceof SharedArrayBuffer);
assert(10 == gsab.byteLength);
+ assert(20 == gsab.maxByteLength);
gsab.grow(15);
postMessage('ok');
}
diff --git a/deps/v8/test/mjsunit/temporal/calendar-constructor.js b/deps/v8/test/mjsunit/temporal/calendar-constructor.js
new file mode 100644
index 0000000000..fcdcbe4f2c
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-constructor.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar
+// 1. If NewTarget is undefined, then
+// a. Throw a TypeError exception.
+assertThrows(() => Temporal.Calendar("iso8601"), TypeError,
+ "Constructor Temporal.Calendar requires 'new'");
+
+assertThrows(() => new Temporal.Calendar(), RangeError,
+ "Invalid calendar specified: undefined");
+
+// Wrong case
+assertThrows(() => new Temporal.Calendar("ISO8601"), RangeError,
+ "Invalid calendar specified: ISO8601");
+
+assertEquals("iso8601", (new Temporal.Calendar("iso8601")).id)
diff --git a/deps/v8/test/mjsunit/temporal/calendar-date-add.js b/deps/v8/test/mjsunit/temporal/calendar-date-add.js
new file mode 100644
index 0000000000..ed8afaf0f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-date-add.js
@@ -0,0 +1,95 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.dateadd
+let cal = new Temporal.Calendar("iso8601");
+
+let p1y = new Temporal.Duration(1);
+let p4y = new Temporal.Duration(4);
+let p5m = new Temporal.Duration(0, 5);
+let p1y2m = new Temporal.Duration(1,2);
+let p1y4d = new Temporal.Duration(1,0,0,4);
+let p1y2m4d = new Temporal.Duration(1,2,0,4);
+let p10d = new Temporal.Duration(0,0,0,10);
+let p1w = new Temporal.Duration(0,0,1);
+let p6w = new Temporal.Duration(0,0,6);
+let p2w3d = new Temporal.Duration(0,0,2,3);
+let p1y2w = new Temporal.Duration(1,0,2);
+let p2m3w = new Temporal.Duration(0,2,3);
+
+assertEquals("2021-02-28", cal.dateAdd("2020-02-29", p1y).toJSON());
+assertEquals("2024-02-29", cal.dateAdd("2020-02-29", p4y).toJSON());
+assertEquals("2022-07-16", cal.dateAdd("2021-07-16", p1y).toJSON());
+
+assertEquals("2021-12-16", cal.dateAdd("2021-07-16", p5m).toJSON());
+assertEquals("2022-01-16", cal.dateAdd("2021-08-16", p5m).toJSON());
+assertEquals("2022-03-31", cal.dateAdd("2021-10-31", p5m).toJSON());
+assertEquals("2022-02-28", cal.dateAdd("2021-09-30", p5m).toJSON());
+assertEquals("2020-02-29", cal.dateAdd("2019-09-30", p5m).toJSON());
+assertEquals("2020-03-01", cal.dateAdd("2019-10-01", p5m).toJSON());
+
+assertEquals("2022-09-16", cal.dateAdd("2021-07-16", p1y2m).toJSON());
+assertEquals("2023-01-30", cal.dateAdd("2021-11-30", p1y2m).toJSON());
+assertEquals("2023-02-28", cal.dateAdd("2021-12-31", p1y2m).toJSON());
+assertEquals("2024-02-29", cal.dateAdd("2022-12-31", p1y2m).toJSON());
+
+assertEquals("2022-07-20", cal.dateAdd("2021-07-16", p1y4d).toJSON());
+assertEquals("2022-03-03", cal.dateAdd("2021-02-27", p1y4d).toJSON());
+assertEquals("2024-03-02", cal.dateAdd("2023-02-27", p1y4d).toJSON());
+assertEquals("2023-01-03", cal.dateAdd("2021-12-30", p1y4d).toJSON());
+assertEquals("2022-08-03", cal.dateAdd("2021-07-30", p1y4d).toJSON());
+assertEquals("2022-07-04", cal.dateAdd("2021-06-30", p1y4d).toJSON());
+
+assertEquals("2022-09-20", cal.dateAdd("2021-07-16", p1y2m4d).toJSON());
+assertEquals("2022-05-01", cal.dateAdd("2021-02-27", p1y2m4d).toJSON());
+assertEquals("2022-04-30", cal.dateAdd("2021-02-26", p1y2m4d).toJSON());
+assertEquals("2024-04-30", cal.dateAdd("2023-02-26", p1y2m4d).toJSON());
+assertEquals("2023-03-04", cal.dateAdd("2021-12-30", p1y2m4d).toJSON());
+assertEquals("2022-10-04", cal.dateAdd("2021-07-30", p1y2m4d).toJSON());
+assertEquals("2022-09-03", cal.dateAdd("2021-06-30", p1y2m4d).toJSON());
+
+assertEquals("2021-07-26", cal.dateAdd("2021-07-16", p10d).toJSON());
+assertEquals("2021-08-05", cal.dateAdd("2021-07-26", p10d).toJSON());
+assertEquals("2022-01-05", cal.dateAdd("2021-12-26", p10d).toJSON());
+assertEquals("2020-03-07", cal.dateAdd("2020-02-26", p10d).toJSON());
+assertEquals("2021-03-08", cal.dateAdd("2021-02-26", p10d).toJSON());
+assertEquals("2020-02-29", cal.dateAdd("2020-02-19", p10d).toJSON());
+assertEquals("2021-03-01", cal.dateAdd("2021-02-19", p10d).toJSON());
+
+assertEquals("2021-02-26", cal.dateAdd("2021-02-19", p1w).toJSON());
+assertEquals("2021-03-06", cal.dateAdd("2021-02-27", p1w).toJSON());
+assertEquals("2020-03-05", cal.dateAdd("2020-02-27", p1w).toJSON());
+assertEquals("2021-12-31", cal.dateAdd("2021-12-24", p1w).toJSON());
+assertEquals("2022-01-03", cal.dateAdd("2021-12-27", p1w).toJSON());
+assertEquals("2021-02-03", cal.dateAdd("2021-01-27", p1w).toJSON());
+assertEquals("2021-07-04", cal.dateAdd("2021-06-27", p1w).toJSON());
+assertEquals("2021-08-03", cal.dateAdd("2021-07-27", p1w).toJSON());
+
+assertEquals("2021-04-02", cal.dateAdd("2021-02-19", p6w).toJSON());
+assertEquals("2021-04-10", cal.dateAdd("2021-02-27", p6w).toJSON());
+assertEquals("2020-04-09", cal.dateAdd("2020-02-27", p6w).toJSON());
+assertEquals("2022-02-04", cal.dateAdd("2021-12-24", p6w).toJSON());
+assertEquals("2022-02-07", cal.dateAdd("2021-12-27", p6w).toJSON());
+assertEquals("2021-03-10", cal.dateAdd("2021-01-27", p6w).toJSON());
+assertEquals("2021-08-08", cal.dateAdd("2021-06-27", p6w).toJSON());
+assertEquals("2021-09-07", cal.dateAdd("2021-07-27", p6w).toJSON());
+
+assertEquals("2020-03-17", cal.dateAdd("2020-02-29", p2w3d).toJSON());
+assertEquals("2020-03-16", cal.dateAdd("2020-02-28", p2w3d).toJSON());
+assertEquals("2021-03-17", cal.dateAdd("2021-02-28", p2w3d).toJSON());
+assertEquals("2021-01-14", cal.dateAdd("2020-12-28", p2w3d).toJSON());
+
+assertEquals("2021-03-14", cal.dateAdd("2020-02-29", p1y2w).toJSON());
+assertEquals("2021-03-14", cal.dateAdd("2020-02-28", p1y2w).toJSON());
+assertEquals("2022-03-14", cal.dateAdd("2021-02-28", p1y2w).toJSON());
+assertEquals("2022-01-11", cal.dateAdd("2020-12-28", p1y2w).toJSON());
+
+assertEquals("2020-05-20", cal.dateAdd("2020-02-29", p2m3w).toJSON());
+assertEquals("2020-05-19", cal.dateAdd("2020-02-28", p2m3w).toJSON());
+assertEquals("2021-05-19", cal.dateAdd("2021-02-28", p2m3w).toJSON());
+assertEquals("2021-03-21", cal.dateAdd("2020-12-28", p2m3w).toJSON());
+assertEquals("2020-03-20", cal.dateAdd("2019-12-28", p2m3w).toJSON());
+assertEquals("2020-01-18", cal.dateAdd("2019-10-28", p2m3w).toJSON());
+assertEquals("2020-01-21", cal.dateAdd("2019-10-31", p2m3w).toJSON());
diff --git a/deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js b/deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js
new file mode 100644
index 0000000000..f272932b43
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-date-from-fields.js
@@ -0,0 +1,220 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.datefromfields
+let cal = new Temporal.Calendar("iso8601")
+
+// Check throw for first arg
+assertThrows(() => cal.dateFromFields(),
+ TypeError,
+ "Temporal.Calendar.prototype.dateFromFields called on non-object");
+[undefined, true, false, 123, 456n, Symbol(), "string",
+ 123.456, NaN, null].forEach(
+ function(fields) {
+ assertThrows(() => cal.dateFromFields(fields), TypeError,
+ "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ assertThrows(() => cal.dateFromFields(fields, undefined), TypeError,
+ "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ assertThrows(() => cal.dateFromFields(fields, {overflow: "constrain"}),
+ TypeError,
+ "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ assertThrows(() => cal.dateFromFields(fields, {overflow: "reject"}),
+ TypeError,
+ "Temporal.Calendar.prototype.dateFromFields called on non-object");
+ });
+
+assertThrows(() => cal.dateFromFields({month: 1, day: 17}),
+ TypeError, "invalid_argument");
+assertThrows(() => cal.dateFromFields({year: 2021, day: 17}),
+ TypeError, "invalid_argument");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 12}),
+ TypeError, "invalid_argument");
+
+assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "m1", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M1", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "m01", day: 17}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.dateFromFields({year: 2021, month: 12, monthCode: "M11",
+ day: 17}), RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M00", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M19", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M99", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.dateFromFields({year: 2021, monthCode: "M13", day: 17}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.dateFromFields({year: 2021, month: -1, day: 17}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: -Infinity, day: 17}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 7, day: -17}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 7, day: -Infinity}),
+ RangeError, "Invalid time value");
+
+assertThrows(() => cal.dateFromFields({year: 2021, month: 12, day: 0},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 12, day: 32},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 1, day: 32},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 2, day: 29},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 6, day: 31},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 9, day: 31},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 0, day: 5},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields({year: 2021, month: 13, day: 5},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M12", day: 0}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M12", day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M01", day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M02", day: 29}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M06", day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M09", day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M00", day: 5}, {overflow: "reject"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, monthCode: "M13", day: 5}, {overflow: "reject"}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 12, day: 0}), RangeError, "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 0, day: 3}), RangeError, "Invalid time value");
+
+// Check throw for the second arg
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 7, day: 13}, {overflow: "invalid"}),
+ RangeError,
+ "Value invalid out of range for Temporal.Calendar.prototype.dateFromFields"
+ + " options property overflow");
+
+assertEquals("2021-07-15",
+ cal.dateFromFields({year: 2021, month: 7, day: 15}).toJSON());
+assertEquals("2021-07-03",
+ cal.dateFromFields({year: 2021, month: 7, day: 3}).toJSON());
+assertEquals("2021-12-31",
+ cal.dateFromFields({year: 2021, month: 12, day: 31}).toJSON());
+assertEquals("2021-07-15",
+ cal.dateFromFields({year: 2021, monthCode: "M07", day: 15}).toJSON());
+assertEquals("2021-07-03",
+ cal.dateFromFields({year: 2021, monthCode: "M07", day: 3}).toJSON());
+assertEquals("2021-12-31",
+ cal.dateFromFields({year: 2021, monthCode: "M12", day: 31}).toJSON());
+
+assertEquals("2021-01-31",
+ cal.dateFromFields({year: 2021, month: 1, day: 133}).toJSON());
+assertEquals("2021-02-28",
+ cal.dateFromFields({year: 2021, month: 2, day: 133}).toJSON());
+assertEquals("2021-03-31",
+ cal.dateFromFields({year: 2021, month: 3, day: 9033}).toJSON());
+assertEquals("2021-04-30",
+ cal.dateFromFields({year: 2021, month: 4, day: 50}).toJSON());
+assertEquals("2021-05-31",
+ cal.dateFromFields({year: 2021, month: 5, day: 77}).toJSON());
+assertEquals("2021-06-30",
+ cal.dateFromFields({year: 2021, month: 6, day: 33}).toJSON());
+assertEquals("2021-07-31",
+ cal.dateFromFields({year: 2021, month: 7, day: 33}).toJSON());
+assertEquals("2021-08-31",
+ cal.dateFromFields({year: 2021, month: 8, day: 300}).toJSON());
+assertEquals("2021-09-30",
+ cal.dateFromFields({year: 2021, month: 9, day: 400}).toJSON());
+assertEquals("2021-10-31",
+ cal.dateFromFields({year: 2021, month: 10, day: 400}).toJSON());
+assertEquals("2021-11-30",
+ cal.dateFromFields({year: 2021, month: 11, day: 400}).toJSON());
+assertEquals("2021-12-31",
+ cal.dateFromFields({year: 2021, month: 12, day: 500}).toJSON());
+assertEquals("2021-12-31",
+ cal.dateFromFields({year: 2021, month: 13, day: 500}).toJSON());
+assertEquals("2021-12-31",
+ cal.dateFromFields({year: 2021, month: 999999, day: 500}).toJSON());
+assertEquals("2021-01-31",
+ cal.dateFromFields({year: 2021, monthCode: "M01", day: 133}).toJSON());
+assertEquals("2021-02-28",
+ cal.dateFromFields({year: 2021, monthCode: "M02", day: 133}).toJSON());
+assertEquals("2021-03-31",
+ cal.dateFromFields({year: 2021, monthCode: "M03", day: 9033}).toJSON());
+assertEquals("2021-04-30",
+ cal.dateFromFields({year: 2021, monthCode: "M04", day: 50}).toJSON());
+assertEquals("2021-05-31",
+ cal.dateFromFields({year: 2021, monthCode: "M05", day: 77}).toJSON());
+assertEquals("2021-06-30",
+ cal.dateFromFields({year: 2021, monthCode: "M06", day: 33}).toJSON());
+assertEquals("2021-07-31",
+ cal.dateFromFields({year: 2021, monthCode: "M07", day: 33}).toJSON());
+assertEquals("2021-08-31",
+ cal.dateFromFields({year: 2021, monthCode: "M08", day: 300}).toJSON());
+assertEquals("2021-09-30",
+ cal.dateFromFields({year: 2021, monthCode: "M09", day: 400}).toJSON());
+assertEquals("2021-10-31",
+ cal.dateFromFields({year: 2021, monthCode: "M10", day: 400}).toJSON());
+assertEquals("2021-11-30",
+ cal.dateFromFields({year: 2021, monthCode: "M11", day: 400}).toJSON());
+assertEquals("2021-12-31",
+ cal.dateFromFields({year: 2021, monthCode: "M12", day: 500}).toJSON());
+
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 1, day: 32}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 2, day: 29}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 3, day: 32}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 4, day: 31}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 5, day: 32}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 6, day: 31}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 7, day: 32}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 8, day: 32}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 9, day: 31}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 10, day: 32}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 11, day: 31}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 12, day: 32}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.dateFromFields(
+ {year: 2021, month: 13, day: 5}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
diff --git a/deps/v8/test/mjsunit/temporal/calendar-date-until.js b/deps/v8/test/mjsunit/temporal/calendar-date-until.js
new file mode 100644
index 0000000000..a403646118
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-date-until.js
@@ -0,0 +1,226 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.dateuntil
+let cal = new Temporal.Calendar("iso8601");
+
+// Test throw
+[ "hour", "minute", "second", "millisecond", "microsecond", "nanosecond" ]
+.forEach(function(largestUnit) {
+ assertThrows(() => cal.dateUntil("2021-07-16", "2021-07-17",
+ {largestUnit}), RangeError,
+ "Invalid unit argument for Temporal.Calendar.prototype.dateUntil() "+
+ "'largestUnit'");
+});
+
+assertEquals("PT0S", cal.dateUntil("2021-07-16", "2021-07-16").toJSON());
+assertEquals("P1D", cal.dateUntil("2021-07-16", "2021-07-17").toJSON());
+assertEquals("P32D", cal.dateUntil("2021-07-16", "2021-08-17").toJSON());
+assertEquals("P62D", cal.dateUntil("2021-07-16", "2021-09-16").toJSON());
+assertEquals("P365D", cal.dateUntil("2021-07-16", "2022-07-16").toJSON());
+assertEquals("P3652D", cal.dateUntil("2021-07-16", "2031-07-16").toJSON());
+
+assertEquals("-P1D", cal.dateUntil("2021-07-17", "2021-07-16").toJSON());
+assertEquals("-P32D", cal.dateUntil("2021-08-17", "2021-07-16").toJSON());
+assertEquals("-P62D", cal.dateUntil("2021-09-16", "2021-07-16").toJSON());
+assertEquals("-P365D", cal.dateUntil("2022-07-16", "2021-07-16").toJSON());
+assertEquals("-P3652D", cal.dateUntil("2031-07-16", "2021-07-16").toJSON());
+
+["day", "days"].forEach(function(largestUnit) {
+ let opt = {largestUnit};
+ assertEquals("PT0S", cal.dateUntil("2021-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("P1D", cal.dateUntil("2021-07-16", "2021-07-17", opt).toJSON());
+ assertEquals("P32D", cal.dateUntil("2021-07-16", "2021-08-17", opt).toJSON());
+ assertEquals("P62D", cal.dateUntil("2021-07-16", "2021-09-16", opt).toJSON());
+ assertEquals("P365D",
+ cal.dateUntil("2021-07-16", "2022-07-16", opt).toJSON());
+ assertEquals("P3652D"
+ ,cal.dateUntil("2021-07-16", "2031-07-16", opt).toJSON());
+
+ assertEquals("-P1D",
+ cal.dateUntil("2021-07-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P32D",
+ cal.dateUntil("2021-08-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P62D",
+ cal.dateUntil("2021-09-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P365D",
+ cal.dateUntil("2022-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P3652D",
+ cal.dateUntil("2031-07-16", "2021-07-16", opt).toJSON());
+});
+
+["week", "weeks"].forEach(function(largestUnit) {
+ let opt = {largestUnit};
+ assertEquals("PT0S", cal.dateUntil("2021-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("P1D", cal.dateUntil("2021-07-16", "2021-07-17", opt).toJSON());
+ assertEquals("P1W", cal.dateUntil("2021-07-16", "2021-07-23", opt).toJSON());
+ assertEquals("P4W4D",
+ cal.dateUntil("2021-07-16", "2021-08-17", opt).toJSON());
+ assertEquals("P4W", cal.dateUntil("2021-07-16", "2021-08-13", opt).toJSON());
+ assertEquals("P8W6D",
+ cal.dateUntil("2021-07-16", "2021-09-16", opt).toJSON());
+ assertEquals("P52W1D",
+ cal.dateUntil("2021-07-16", "2022-07-16", opt).toJSON());
+ assertEquals("P521W5D"
+ ,cal.dateUntil("2021-07-16", "2031-07-16", opt).toJSON());
+
+ assertEquals("-P1D",
+ cal.dateUntil("2021-07-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P4W4D",
+ cal.dateUntil("2021-08-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P4W",
+ cal.dateUntil("2021-08-13", "2021-07-16", opt).toJSON());
+ assertEquals("-P8W6D",
+ cal.dateUntil("2021-09-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P52W1D",
+ cal.dateUntil("2022-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P521W5D",
+ cal.dateUntil("2031-07-16", "2021-07-16", opt).toJSON());
+});
+
+["month", "months"].forEach(function(largestUnit) {
+ let opt = {largestUnit};
+ assertEquals("PT0S", cal.dateUntil("2021-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("P1D", cal.dateUntil("2021-07-16", "2021-07-17", opt).toJSON());
+ assertEquals("P7D", cal.dateUntil("2021-07-16", "2021-07-23", opt).toJSON());
+ assertEquals("P1M", cal.dateUntil("2021-07-16", "2021-08-16", opt).toJSON());
+ assertEquals("P1M", cal.dateUntil("2020-12-16", "2021-01-16", opt).toJSON());
+ assertEquals("P1M", cal.dateUntil("2021-01-05", "2021-02-05", opt).toJSON());
+ assertEquals("P2M", cal.dateUntil("2021-01-07", "2021-03-07", opt).toJSON());
+ assertEquals("P1M1D",
+ cal.dateUntil("2021-07-16", "2021-08-17", opt).toJSON());
+ assertEquals("P28D", cal.dateUntil("2021-07-16", "2021-08-13", opt).toJSON());
+ assertEquals("P2M", cal.dateUntil("2021-07-16", "2021-09-16", opt).toJSON());
+ assertEquals("P12M",
+ cal.dateUntil("2021-07-16", "2022-07-16", opt).toJSON());
+ assertEquals("P120M"
+ ,cal.dateUntil("2021-07-16", "2031-07-16", opt).toJSON());
+
+ assertEquals("-P1D",
+ cal.dateUntil("2021-07-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P1M1D",
+ cal.dateUntil("2021-08-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P28D",
+ cal.dateUntil("2021-08-13", "2021-07-16", opt).toJSON());
+ assertEquals("-P1M",
+ cal.dateUntil("2021-08-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P1M3D",
+ cal.dateUntil("2021-08-16", "2021-07-13", opt).toJSON());
+ assertEquals("-P2M",
+ cal.dateUntil("2021-09-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P2M5D",
+ cal.dateUntil("2021-09-21", "2021-07-16", opt).toJSON());
+ assertEquals("-P12M",
+ cal.dateUntil("2022-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P12M1D",
+ cal.dateUntil("2022-07-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P120M",
+ cal.dateUntil("2031-07-16", "2021-07-16", opt).toJSON());
+});
+
+["year", "years"].forEach(function(largestUnit) {
+ let opt = {largestUnit};
+ assertEquals("PT0S", cal.dateUntil("2021-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("P1D", cal.dateUntil("2021-07-16", "2021-07-17", opt).toJSON());
+ assertEquals("P7D", cal.dateUntil("2021-07-16", "2021-07-23", opt).toJSON());
+ assertEquals("P1M", cal.dateUntil("2021-07-16", "2021-08-16", opt).toJSON());
+ assertEquals("P1M", cal.dateUntil("2020-12-16", "2021-01-16", opt).toJSON());
+ assertEquals("P1M", cal.dateUntil("2021-01-05", "2021-02-05", opt).toJSON());
+ assertEquals("P2M", cal.dateUntil("2021-01-07", "2021-03-07", opt).toJSON());
+ assertEquals("P1M1D", cal.dateUntil("2021-07-16", "2021-08-17", opt).toJSON());
+ assertEquals("P28D", cal.dateUntil("2021-07-16", "2021-08-13", opt).toJSON());
+ assertEquals("P2M", cal.dateUntil("2021-07-16", "2021-09-16", opt).toJSON());
+ assertEquals("P1Y",
+ cal.dateUntil("2021-07-16", "2022-07-16", opt).toJSON());
+ assertEquals("P1Y3D",
+ cal.dateUntil("2021-07-16", "2022-07-19", opt).toJSON());
+ assertEquals("P1Y2M3D",
+ cal.dateUntil("2021-07-16", "2022-09-19", opt).toJSON());
+ assertEquals("P10Y",
+ cal.dateUntil("2021-07-16", "2031-07-16", opt).toJSON());
+ assertEquals("P10Y5M",
+ cal.dateUntil("2021-07-16", "2031-12-16", opt).toJSON());
+ assertEquals("P23Y7M",
+ cal.dateUntil("1997-12-16", "2021-07-16", opt).toJSON());
+ assertEquals("P24Y",
+ cal.dateUntil("1997-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("P23Y11M29D",
+ cal.dateUntil("1997-07-16", "2021-07-15", opt).toJSON());
+ assertEquals("P23Y11M30D",
+ cal.dateUntil("1997-06-16", "2021-06-15", opt).toJSON());
+ assertEquals("P60Y1M",
+ cal.dateUntil("1960-02-16", "2020-03-16", opt).toJSON());
+ assertEquals("P61Y27D",
+ cal.dateUntil("1960-02-16", "2021-03-15", opt).toJSON());
+ assertEquals("P60Y28D",
+ cal.dateUntil("1960-02-16", "2020-03-15", opt).toJSON());
+
+ assertEquals("P3M16D",
+ cal.dateUntil("2021-03-30", "2021-07-16", opt).toJSON());
+ assertEquals("P1Y3M16D",
+ cal.dateUntil("2020-03-30", "2021-07-16", opt).toJSON());
+ assertEquals("P61Y3M16D",
+ cal.dateUntil("1960-03-30", "2021-07-16", opt).toJSON());
+ assertEquals("P1Y6M16D",
+ cal.dateUntil("2019-12-30", "2021-07-16", opt).toJSON());
+ assertEquals("P6M16D",
+ cal.dateUntil("2020-12-30", "2021-07-16", opt).toJSON());
+ assertEquals("P23Y6M16D",
+ cal.dateUntil("1997-12-30", "2021-07-16", opt).toJSON());
+ assertEquals("P2019Y6M21D",
+ cal.dateUntil("0001-12-25", "2021-07-16", opt).toJSON());
+ assertEquals("P1Y2M5D",
+ cal.dateUntil("2019-12-30", "2021-03-05", opt).toJSON());
+
+ assertEquals("-P1D",
+ cal.dateUntil("2021-07-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P1M1D",
+ cal.dateUntil("2021-08-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P28D",
+ cal.dateUntil("2021-08-13", "2021-07-16", opt).toJSON());
+ assertEquals("-P1M",
+ cal.dateUntil("2021-08-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P1M3D",
+ cal.dateUntil("2021-08-16", "2021-07-13", opt).toJSON());
+ assertEquals("-P2M",
+ cal.dateUntil("2021-09-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P2M5D",
+ cal.dateUntil("2021-09-21", "2021-07-16", opt).toJSON());
+ assertEquals("-P1Y",
+ cal.dateUntil("2022-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P1Y1D",
+ cal.dateUntil("2022-07-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P1Y3M1D",
+ cal.dateUntil("2022-10-17", "2021-07-16", opt).toJSON());
+ assertEquals("-P10Y",
+ cal.dateUntil("2031-07-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P10Y11M",
+ cal.dateUntil("2032-07-16", "2021-08-16", opt).toJSON());
+
+ assertEquals("-P10Y5M",
+ cal.dateUntil("2031-12-16", "2021-07-16", opt).toJSON());
+ assertEquals("-P13Y7M",
+ cal.dateUntil("2011-07-16", "1997-12-16", opt).toJSON());
+ assertEquals("-P24Y",
+ cal.dateUntil("2021-07-16", "1997-07-16", opt).toJSON());
+ assertEquals("-P23Y11M30D",
+ cal.dateUntil("2021-07-15", "1997-07-16", opt).toJSON());
+ assertEquals("-P23Y11M29D",
+ cal.dateUntil("2021-06-15", "1997-06-16", opt).toJSON());
+ assertEquals("-P60Y1M",
+ cal.dateUntil("2020-03-16", "1960-02-16", opt).toJSON());
+ assertEquals("-P61Y28D",
+ cal.dateUntil("2021-03-15", "1960-02-16", opt).toJSON());
+ assertEquals("-P60Y28D",
+ cal.dateUntil("2020-03-15", "1960-02-16", opt).toJSON());
+
+ assertEquals("-P61Y3M17D",
+ cal.dateUntil("2021-07-16", "1960-03-30", opt).toJSON());
+
+ assertEquals("-P2019Y6M22D",
+ cal.dateUntil("2021-07-16", "0001-12-25", opt).toJSON());
+ assertEquals("-P23Y6M17D",
+ cal.dateUntil("2021-07-16", "1997-12-30", opt).toJSON());
+});
diff --git a/deps/v8/test/mjsunit/temporal/calendar-day-of-week.js b/deps/v8/test/mjsunit/temporal/calendar-day-of-week.js
new file mode 100644
index 0000000000..b9e6c3ed28
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-day-of-week.js
@@ -0,0 +1,80 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.dayofweek
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(4, cal.dayOfWeek(new Temporal.PlainDate(1970, 1, 1)));
+assertEquals(6, cal.dayOfWeek(new Temporal.PlainDate(2000, 1, 1)));
+
+assertEquals(5, cal.dayOfWeek(new Temporal.PlainDate(2021, 1, 15)));
+// leap year
+assertEquals(6, cal.dayOfWeek(new Temporal.PlainDate(2020, 2, 15)));
+assertEquals(2, cal.dayOfWeek(new Temporal.PlainDate(2000, 2, 15)));
+// non-leap year
+assertEquals(1, cal.dayOfWeek(new Temporal.PlainDate(2021, 2, 15)));
+assertEquals(1, cal.dayOfWeek(new Temporal.PlainDate(2021, 3, 15)));
+assertEquals(4, cal.dayOfWeek(new Temporal.PlainDate(2021, 4, 15)));
+assertEquals(6, cal.dayOfWeek(new Temporal.PlainDate(2021, 5, 15)));
+assertEquals(2, cal.dayOfWeek(new Temporal.PlainDate(2021, 6, 15)));
+assertEquals(4, cal.dayOfWeek(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals(7, cal.dayOfWeek(new Temporal.PlainDate(2021, 8, 15)));
+assertEquals(3, cal.dayOfWeek(new Temporal.PlainDate(2021, 9, 15)));
+assertEquals(5, cal.dayOfWeek(new Temporal.PlainDate(2021, 10, 15)));
+assertEquals(1, cal.dayOfWeek(new Temporal.PlainDate(2021, 11, 15)));
+assertEquals(3, cal.dayOfWeek(new Temporal.PlainDate(2021, 12, 15)));
+
+assertEquals(4,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 1, 23, 5, 30, 13)));
+// leap year
+assertEquals(5,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1996, 2, 23, 5, 30, 13)));
+assertEquals(3,
+ cal.dayOfWeek(new Temporal.PlainDateTime(2000, 2, 23, 5, 30, 13)));
+// non leap year
+assertEquals(7,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 2, 23, 5, 30, 13)));
+assertEquals(7,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 3, 23, 5, 30, 13)));
+assertEquals(3,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 4, 23, 5, 30, 13)));
+assertEquals(5,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 5, 23, 5, 30, 13)));
+assertEquals(1,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 6, 23, 5, 30, 13)));
+assertEquals(3,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 7, 23, 5, 30, 13)));
+assertEquals(6,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(2,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 9, 23, 5, 30, 13)));
+assertEquals(4,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 10, 23, 5, 30, 13)));
+assertEquals(7,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 11, 23, 5, 30, 13)));
+assertEquals(2,
+ cal.dayOfWeek(new Temporal.PlainDateTime(1997, 12, 23, 5, 30, 13)));
+
+assertEquals(5, cal.dayOfWeek("2019-01-18"));
+// leap year
+assertEquals(2, cal.dayOfWeek("2020-02-18"));
+// non leap
+assertEquals(1, cal.dayOfWeek("2019-02-18"));
+assertEquals(1, cal.dayOfWeek("2019-03-18"));
+assertEquals(4, cal.dayOfWeek("2019-04-18"));
+assertEquals(6, cal.dayOfWeek("2019-05-18"));
+assertEquals(2, cal.dayOfWeek("2019-06-18"));
+assertEquals(4, cal.dayOfWeek("2019-07-18"));
+assertEquals(7, cal.dayOfWeek("2019-08-18"));
+assertEquals(3, cal.dayOfWeek("2019-09-18"));
+assertEquals(5, cal.dayOfWeek("2019-10-18"));
+assertEquals(1, cal.dayOfWeek("2019-11-18"));
+assertEquals(3, cal.dayOfWeek("2019-12-18"));
+
+// TODO test the following later
+//assertEquals(7, cal.dayOfWeek(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(31, cal.dayOfWeek(new Temporal.ZonedDateTime(86400n * 366n * 50n,
+// "UTC")))
+//assertEquals(30, cal.dayOfWeek({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-day-of-year.js b/deps/v8/test/mjsunit/temporal/calendar-day-of-year.js
new file mode 100644
index 0000000000..a89f1438be
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-day-of-year.js
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.dayofyear
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(1, cal.dayOfYear(new Temporal.PlainDate(1970, 1, 1)));
+assertEquals(1, cal.dayOfYear(new Temporal.PlainDate(2000, 1, 1)));
+
+assertEquals(15, cal.dayOfYear(new Temporal.PlainDate(2021, 1, 15)));
+assertEquals(46, cal.dayOfYear(new Temporal.PlainDate(2020, 2, 15)));
+assertEquals(46, cal.dayOfYear(new Temporal.PlainDate(2000, 2, 15)));
+assertEquals(75, cal.dayOfYear(new Temporal.PlainDate(2020, 3, 15)));
+assertEquals(75, cal.dayOfYear(new Temporal.PlainDate(2000, 3, 15)));
+assertEquals(74, cal.dayOfYear(new Temporal.PlainDate(2001, 3, 15)));
+assertEquals(366, cal.dayOfYear(new Temporal.PlainDate(2000, 12, 31)));
+assertEquals(365, cal.dayOfYear(new Temporal.PlainDate(2001, 12, 31)));
+
+assertEquals(23,
+ cal.dayOfYear(new Temporal.PlainDateTime(1997, 1, 23, 5, 30, 13)));
+assertEquals(54,
+ cal.dayOfYear(new Temporal.PlainDateTime(1997, 2, 23, 5, 30, 13)));
+assertEquals(83,
+ cal.dayOfYear(new Temporal.PlainDateTime(1996, 3, 23, 5, 30, 13)));
+assertEquals(82,
+ cal.dayOfYear(new Temporal.PlainDateTime(1997, 3, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.dayOfYear(new Temporal.PlainDateTime(1997, 12, 31, 5, 30, 13)));
+assertEquals(366,
+ cal.dayOfYear(new Temporal.PlainDateTime(1996, 12, 31, 5, 30, 13)));
+
+assertEquals(18, cal.dayOfYear("2019-01-18"));
+assertEquals(49, cal.dayOfYear("2020-02-18"));
+assertEquals(365, cal.dayOfYear("2019-12-31"));
+assertEquals(366, cal.dayOfYear("2000-12-31"));
+
+// TODO test the following later
+//assertEquals(7, cal.dayOfYear(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(31, cal.dayOfYear(new Temporal.ZonedDateTime(
+// 86400n * 366n * 50n, "UTC")))
+//assertEquals(30, cal.dayOfYear({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-day.js b/deps/v8/test/mjsunit/temporal/calendar-day.js
new file mode 100644
index 0000000000..27ad5a3369
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-day.js
@@ -0,0 +1,17 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.day
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(15, cal.day(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals(23, cal.day(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(6, cal.day(new Temporal.PlainMonthDay(2, 6)));
+assertEquals(18, cal.day("2019-03-18"));
+
+// TODO test the following later
+//assertEquals(??, cal.day(new Temporal.ZonedDateTime(86400n * 366n * 50n,
+// "UTC")))
+//assertEquals(11, cal.day({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-days-in-month.js b/deps/v8/test/mjsunit/temporal/calendar-days-in-month.js
new file mode 100644
index 0000000000..379e1fbaa1
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-days-in-month.js
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.daysinmonth
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(31, cal.daysInMonth(new Temporal.PlainDate(2021, 1, 15)));
+// leap year
+assertEquals(29, cal.daysInMonth(new Temporal.PlainDate(2020, 2, 15)));
+assertEquals(29, cal.daysInMonth(new Temporal.PlainDate(2000, 2, 15)));
+// non-leap year
+assertEquals(28, cal.daysInMonth(new Temporal.PlainDate(2021, 2, 15)));
+assertEquals(31, cal.daysInMonth(new Temporal.PlainDate(2021, 3, 15)));
+assertEquals(30, cal.daysInMonth(new Temporal.PlainDate(2021, 4, 15)));
+assertEquals(31, cal.daysInMonth(new Temporal.PlainDate(2021, 5, 15)));
+assertEquals(30, cal.daysInMonth(new Temporal.PlainDate(2021, 6, 15)));
+assertEquals(31, cal.daysInMonth(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals(31, cal.daysInMonth(new Temporal.PlainDate(2021, 8, 15)));
+assertEquals(30, cal.daysInMonth(new Temporal.PlainDate(2021, 9, 15)));
+assertEquals(31, cal.daysInMonth(new Temporal.PlainDate(2021, 10, 15)));
+assertEquals(30, cal.daysInMonth(new Temporal.PlainDate(2021, 11, 15)));
+assertEquals(31, cal.daysInMonth(new Temporal.PlainDate(2021, 12, 15)));
+
+assertEquals(31,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 1, 23, 5, 30, 13)));
+// leap year
+assertEquals(29,
+ cal.daysInMonth(new Temporal.PlainDateTime(1996, 2, 23, 5, 30, 13)));
+assertEquals(29,
+ cal.daysInMonth(new Temporal.PlainDateTime(2000, 2, 23, 5, 30, 13)));
+// non leap year
+assertEquals(28,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 2, 23, 5, 30, 13)));
+assertEquals(31,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 3, 23, 5, 30, 13)));
+assertEquals(30,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 4, 23, 5, 30, 13)));
+assertEquals(31,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 5, 23, 5, 30, 13)));
+assertEquals(30,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 6, 23, 5, 30, 13)));
+assertEquals(31,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 7, 23, 5, 30, 13)));
+assertEquals(31,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(30,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 9, 23, 5, 30, 13)));
+assertEquals(31,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 10, 23, 5, 30, 13)));
+assertEquals(30,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 11, 23, 5, 30, 13)));
+assertEquals(31,
+ cal.daysInMonth(new Temporal.PlainDateTime(1997, 12, 23, 5, 30, 13)));
+
+assertEquals(31, cal.daysInMonth("2019-01-18"));
+// leap year
+assertEquals(29, cal.daysInMonth("2020-02-18"));
+// non leap
+assertEquals(28, cal.daysInMonth("2019-02-18"));
+assertEquals(31, cal.daysInMonth("2019-03-18"));
+assertEquals(30, cal.daysInMonth("2019-04-18"));
+assertEquals(31, cal.daysInMonth("2019-05-18"));
+assertEquals(30, cal.daysInMonth("2019-06-18"));
+assertEquals(31, cal.daysInMonth("2019-07-18"));
+assertEquals(31, cal.daysInMonth("2019-08-18"));
+assertEquals(30, cal.daysInMonth("2019-09-18"));
+assertEquals(31, cal.daysInMonth("2019-10-18"));
+assertEquals(30, cal.daysInMonth("2019-11-18"));
+assertEquals(31, cal.daysInMonth("2019-12-18"));
+
+// TODO test the following later
+//assertEquals(7, cal.daysInMonth(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(31, cal.daysInMonth(new Temporal.ZonedDateTime(
+// 86400n * 366n * 50n, "UTC")))
+//assertEquals(30, cal.daysInMonth({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-days-in-week.js b/deps/v8/test/mjsunit/temporal/calendar-days-in-week.js
new file mode 100644
index 0000000000..76f9baccc4
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-days-in-week.js
@@ -0,0 +1,18 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.weekofyear
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(7, cal.daysInWeek(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals(7,
+ cal.daysInWeek(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(7, cal.daysInWeek("2019-03-18"));
+
+// TODO test the following later
+//assertEquals(7, cal.daysInWeek(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(??, cal.day(new Temporal.ZonedDateTime(86400n * 366n * 50n,
+// "UTC")))
+//assertEquals(11, cal.day({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-days-in-year.js b/deps/v8/test/mjsunit/temporal/calendar-days-in-year.js
new file mode 100644
index 0000000000..7d34268796
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-days-in-year.js
@@ -0,0 +1,57 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.daysinyear
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(1995, 7, 15)));
+assertEquals(366, cal.daysInYear(new Temporal.PlainDate(1996, 7, 15)));
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(1997, 7, 15)));
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(1998, 7, 15)));
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(1999, 7, 15)));
+assertEquals(366, cal.daysInYear(new Temporal.PlainDate(2000, 7, 15)));
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(2001, 7, 15)));
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(2002, 7, 15)));
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(2003, 7, 15)));
+assertEquals(366, cal.daysInYear(new Temporal.PlainDate(2004, 7, 15)));
+assertEquals(365, cal.daysInYear(new Temporal.PlainDate(2005, 7, 15)));
+
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(1995, 8, 23, 5, 30, 13)));
+assertEquals(366,
+ cal.daysInYear(new Temporal.PlainDateTime(1996, 8, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(1998, 8, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(1999, 8, 23, 5, 30, 13)));
+assertEquals(366,
+ cal.daysInYear(new Temporal.PlainDateTime(2000, 8, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(2001, 8, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(2002, 8, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(2003, 8, 23, 5, 30, 13)));
+assertEquals(366,
+ cal.daysInYear(new Temporal.PlainDateTime(2004, 8, 23, 5, 30, 13)));
+assertEquals(365,
+ cal.daysInYear(new Temporal.PlainDateTime(2005, 8, 23, 5, 30, 13)));
+
+assertEquals(365, cal.daysInYear("2019-03-18"));
+assertEquals(366, cal.daysInYear("2020-03-18"));
+assertEquals(365, cal.daysInYear("2021-03-18"));
+assertEquals(365, cal.daysInYear("2022-03-18"));
+assertEquals(365, cal.daysInYear("2023-03-18"));
+assertEquals(366, cal.daysInYear("2024-03-18"));
+assertEquals(365, cal.daysInYear("2025-03-18"));
+assertEquals(365, cal.daysInYear("2026-03-18"));
+
+// TODO test the following later
+//assertEquals(365, cal.daysInYear(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(365, cal.daysInYear(new Temporal.ZonedDateTime(
+// 86400n * 366n * 50n, "UTC")))
+//assertEquals(365, cal.daysInYear({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-fields.js b/deps/v8/test/mjsunit/temporal/calendar-fields.js
new file mode 100644
index 0000000000..cf10d9537e
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-fields.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.fields
+let cal = new Temporal.Calendar("iso8601")
+
+assertEquals("iso8601", cal.id)
+
+const fields = {
+ *[Symbol.iterator]() {
+ let i = 0;
+ while (i++ < 1000) {
+ yield "year";
+ }
+ }
+}
+
+let expected = Array.from(fields);
+// For now, we only input it as array
+let inpiut = expected;
+assertArrayEquals(expected, cal.fields(expected));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-from.js b/deps/v8/test/mjsunit/temporal/calendar-from.js
new file mode 100644
index 0000000000..ab63c84ac2
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-from.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.from
+// 1. If NewTarget is undefined, then
+// a. Throw a TypeError exception.
+//assertThrows(() => Temporal.Calendar.from("invalid"), TypeError,
+// "Constructor Temporal.Calendar requires 'new'");
+
+assertEquals("iso8601",
+ (Temporal.Calendar.from("iso8601")).id);
+assertEquals("iso8601",
+ (Temporal.Calendar.from(new Temporal.PlainDate(2021, 7, 3))).id);
+assertEquals("iso8601",
+ (Temporal.Calendar.from(new Temporal.PlainDateTime(2021, 7, 3, 4, 5))).id);
+assertEquals("iso8601",
+ (Temporal.Calendar.from(new Temporal.PlainTime())).id);
+assertEquals("iso8601",
+ (Temporal.Calendar.from(new Temporal.PlainYearMonth(2011, 4))).id);
+assertEquals("iso8601",
+ (Temporal.Calendar.from(new Temporal.PlainMonthDay(2, 6))).id);
diff --git a/deps/v8/test/mjsunit/temporal/calendar-in-leap-year.js b/deps/v8/test/mjsunit/temporal/calendar-in-leap-year.js
new file mode 100644
index 0000000000..cd61f1dae9
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-in-leap-year.js
@@ -0,0 +1,57 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.inleapyear
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(1995, 7, 15)));
+assertEquals(true, cal.inLeapYear(new Temporal.PlainDate(1996, 7, 15)));
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(1997, 7, 15)));
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(1998, 7, 15)));
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(1999, 7, 15)));
+assertEquals(true, cal.inLeapYear(new Temporal.PlainDate(2000, 7, 15)));
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(2001, 7, 15)));
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(2002, 7, 15)));
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(2003, 7, 15)));
+assertEquals(true, cal.inLeapYear(new Temporal.PlainDate(2004, 7, 15)));
+assertEquals(false, cal.inLeapYear(new Temporal.PlainDate(2005, 7, 15)));
+
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(1995, 8, 23, 5, 30, 13)));
+assertEquals(true,
+ cal.inLeapYear(new Temporal.PlainDateTime(1996, 8, 23, 5, 30, 13)));
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(1998, 8, 23, 5, 30, 13)));
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(1999, 8, 23, 5, 30, 13)));
+assertEquals(true,
+ cal.inLeapYear(new Temporal.PlainDateTime(2000, 8, 23, 5, 30, 13)));
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(2001, 8, 23, 5, 30, 13)));
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(2002, 8, 23, 5, 30, 13)));
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(2003, 8, 23, 5, 30, 13)));
+assertEquals(true,
+ cal.inLeapYear(new Temporal.PlainDateTime(2004, 8, 23, 5, 30, 13)));
+assertEquals(false,
+ cal.inLeapYear(new Temporal.PlainDateTime(2005, 8, 23, 5, 30, 13)));
+
+assertEquals(false, cal.inLeapYear("2019-03-18"));
+assertEquals(true, cal.inLeapYear("2020-03-18"));
+assertEquals(false, cal.inLeapYear("2021-03-18"));
+assertEquals(false, cal.inLeapYear("2022-03-18"));
+assertEquals(false, cal.inLeapYear("2023-03-18"));
+assertEquals(true, cal.inLeapYear("2024-03-18"));
+assertEquals(false, cal.inLeapYear("2025-03-18"));
+assertEquals(false, cal.inLeapYear("2026-03-18"));
+
+// TODO Test the following later
+//assertEquals(false, cal.inLeapYear(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(false, cal.inLeapYear(new Temporal.ZonedDateTime(
+// 86400n * truen * 50n, "UTC")))
+//assertEquals(false, cal.inLeapYear({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-merge-fields.js b/deps/v8/test/mjsunit/temporal/calendar-merge-fields.js
new file mode 100644
index 0000000000..82a846772f
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-merge-fields.js
@@ -0,0 +1,63 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.mergefields
+let cal = new Temporal.Calendar("iso8601")
+
+// Test throwing
+assertThrows(() => cal.mergeFields(), TypeError,
+ "Cannot convert undefined or null to object");
+assertThrows(() => cal.mergeFields(undefined, {}), TypeError,
+ "Cannot convert undefined or null to object");
+assertThrows(() => cal.mergeFields(null, {}), TypeError,
+ "Cannot convert undefined or null to object");
+assertThrows(() => cal.mergeFields({}, undefined), TypeError,
+ "Cannot convert undefined or null to object");
+assertThrows(() => cal.mergeFields({}, null), TypeError,
+ "Cannot convert undefined or null to object");
+
+// Test String, number, true, false, NaN, BigInt, Symbol types
+// pending on https://github.com/tc39/proposal-temporal/issues/1647
+
+// Assert only string will be merged
+assertArrayEquals({}, cal.mergeFields({1: 2}, {3: 4}));
+assertArrayEquals({}, cal.mergeFields({true: 2}, {false: 4}));
+assertArrayEquals({}, cal.mergeFields({1n: 2}, {2n: 4}));
+assertArrayEquals({}, cal.mergeFields({Infinity: 2}, {Infinity: 4}));
+assertArrayEquals({}, cal.mergeFields({undefined: 2}, {NaN: 4}));
+assertArrayEquals({}, cal.mergeFields({["foo"]: 2}, {["bar"]: 4}));
+assertArrayEquals({a:1, b:2, c:4}, cal.mergeFields({a: 1, b: 2}, {b:3, c:4}));
+assertArrayEquals({a:1, b:2, c:4, month:5},
+ cal.mergeFields({a: 1, b: 2}, {b:3, c:4, month:5}));
+assertArrayEquals({a:1, b:2, c:4, month:5, month:'M06'},
+ cal.mergeFields({a: 1, b: 2}, {b:3, c:4, month:5, monthCode:'M06'}));
+assertArrayEquals({a:1, b:2, c:4, month:'M06'}, cal.mergeFields({a: 1, b: 2},
+ {b:3, c:4, monthCode:'M06'}));
+
+assertArrayEquals({a:1, b:2, c:4, month:5},
+ cal.mergeFields({a: 1, b: 2, month:7}, {b:3, c:4, month:5}));
+assertArrayEquals({a:1, b:2, c:4, month:5},
+ cal.mergeFields({a: 1, b: 2, month:7, monthCode:'M08'},
+ {b:3, c:4, month:5}));
+assertArrayEquals({a:1, b:2, c:4, monthCode:'M06'},
+ cal.mergeFields({a: 1, b: 2, month:7}, {b:3, c:4, monthCode:'M06'}));
+assertArrayEquals({a:1, b:2, c:4, monthCode:'M06'},
+ cal.mergeFields({a: 1, b: 2, month:7, monthCode:'M08'},
+ {b:3, c:4, monthCode:'M06'}));
+assertArrayEquals({a:1, b:2, c:4, month:5, monthCode:'M06'},
+ cal.mergeFields({a: 1, b: 2, month:7},
+ {b:3, c:4, month:5, monthCode:'M06'}));
+assertArrayEquals({a:1, b:2, c:4, month:5, monthCode:'M06'},
+ cal.mergeFields({a: 1, b: 2, month:7, monthCode:'M08'},
+ {b:3, c:4, month:5, monthCode:'M06'}));
+
+assertArrayEquals({a:1, b:2, c:4, month:7},
+ cal.mergeFields({a: 1, b: 2, month:7}, {b:3, c:4}));
+assertArrayEquals({a:1, b:2, c:4, month:5, monthCode:'M08'},
+ cal.mergeFields({a: 1, b: 2, month:7, monthCode:'M08'}, {b:3, c:4}));
+assertArrayEquals({a:1, b:2, c:4, month:7, monthCode:'M08'},
+ cal.mergeFields({a: 1, b: 2, month:7, monthCode:'M08'}, {b:3, c:4}));
+assertArrayEquals({a:1, b:2, c:4, monthCode:'M08'},
+ cal.mergeFields({a: 1, b: 2, monthCode:'M08'}, {b:3, c:4}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-month-code.js b/deps/v8/test/mjsunit/temporal/calendar-month-code.js
new file mode 100644
index 0000000000..edb6422b01
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-month-code.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.monthcode
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals("M07", cal.monthCode(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals("M08",
+ cal.monthCode(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals("M06", cal.monthCode(new Temporal.PlainYearMonth(1999, 6)));
+assertEquals("M02", cal.monthCode(new Temporal.PlainMonthDay(2, 6)));
+assertEquals("M03", cal.monthCode("2019-03-15"));
+
+// TODO Test the following later
+//assertEquals("M01", cal.monthCode(new Temporal.ZonedDateTime(
+// 86400n * 366n * 50n, "UTC")))
+//assertEquals("M09", cal.monthCode({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js b/deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js
new file mode 100644
index 0000000000..1ff45c6117
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-month-day-from-fields.js
@@ -0,0 +1,238 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.monthdayfromfields
+let cal = new Temporal.Calendar("iso8601")
+
+// Check throw for first arg
+let nonObjMsg =
+ "Temporal.Calendar.prototype.monthDayFromFields called on non-object";
+assertThrows(() => cal.monthDayFromFields(), TypeError,
+ "Temporal.Calendar.prototype.monthDayFromFields called on non-object");
+[undefined, true, false, 123, 456n, Symbol(), "string"].forEach(
+ function(fields) {
+ assertThrows(() => cal.monthDayFromFields(fields), TypeError,
+ nonObjMsg);
+ assertThrows(() => cal.monthDayFromFields(fields, undefined), TypeError,
+ nonObjMsg);
+ assertThrows(() => cal.monthDayFromFields(fields,
+ {overflow: "constrain"}), TypeError, nonObjMsg);
+ assertThrows(() => cal.monthDayFromFields(fields, {overflow: "reject"}),
+ TypeError, nonObjMsg);
+ });
+
+assertThrows(() => cal.monthDayFromFields({month: 1, day: 17}),
+ TypeError, "invalid_argument");
+assertThrows(() => cal.monthDayFromFields({year: 2021, day: 17}),
+ TypeError, "invalid_argument");
+assertThrows(() => cal.monthDayFromFields({year: 2021, month: 12}),
+ TypeError, "invalid_argument");
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "m1", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M1", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "m01", day: 17}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 12, monthCode: "M11", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M00", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M19", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M99", day: 17}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M13", day: 17}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: -1, day: 17}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: -Infinity, day: 17}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 7, day: -17}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 7, day: -Infinity}),
+ RangeError, "Invalid time value");
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 12, day: 0}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 12, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 1, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 2, day: 29}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 6, day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 9, day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 0, day: 5}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 13, day: 5}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M12", day: 0}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M12", day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M01", day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M06", day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M09", day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M00", day: 5}, {overflow: "reject"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, monthCode: "M13", day: 5}, {overflow: "reject"}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 12, day: 0}), RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 0, day: 3}), RangeError, "Invalid time value");
+
+// Check throw for the second arg
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 7, day: 13}, {overflow: "invalid"}),
+ RangeError,
+ "Value invalid out of range for Temporal.Calendar.prototype." +
+ "monthDayFromFields options property overflow");
+
+assertEquals("07-15", cal.monthDayFromFields(
+ {year: 2021, month: 7, day: 15}).toJSON());
+assertEquals("07-03", cal.monthDayFromFields(
+ {year: 2021, month: 7, day: 3}).toJSON());
+assertEquals("12-31", cal.monthDayFromFields(
+ {year: 2021, month: 12, day: 31}).toJSON());
+assertEquals("07-15", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M07", day: 15}).toJSON());
+assertEquals("07-03", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M07", day: 3}).toJSON());
+assertEquals("12-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M12", day: 31}).toJSON());
+assertEquals("02-29", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M02", day: 29}).toJSON());
+
+assertEquals("01-31", cal.monthDayFromFields(
+ {year: 2021, month: 1, day: 133}).toJSON());
+assertEquals("02-28", cal.monthDayFromFields(
+ {year: 2021, month: 2, day: 133}).toJSON());
+assertEquals("03-31", cal.monthDayFromFields(
+ {year: 2021, month: 3, day: 9033}).toJSON());
+assertEquals("04-30", cal.monthDayFromFields(
+ {year: 2021, month: 4, day: 50}).toJSON());
+assertEquals("05-31", cal.monthDayFromFields(
+ {year: 2021, month: 5, day: 77}).toJSON());
+assertEquals("06-30", cal.monthDayFromFields(
+ {year: 2021, month: 6, day: 33}).toJSON());
+assertEquals("07-31", cal.monthDayFromFields(
+ {year: 2021, month: 7, day: 33}).toJSON());
+assertEquals("08-31", cal.monthDayFromFields(
+ {year: 2021, month: 8, day: 300}).toJSON());
+assertEquals("09-30", cal.monthDayFromFields(
+ {year: 2021, month: 9, day: 400}).toJSON());
+assertEquals("10-31", cal.monthDayFromFields(
+ {year: 2021, month: 10, day: 400}).toJSON());
+assertEquals("11-30", cal.monthDayFromFields(
+ {year: 2021, month: 11, day: 400}).toJSON());
+assertEquals("12-31", cal.monthDayFromFields(
+ {year: 2021, month: 12, day: 500}).toJSON());
+assertEquals("12-31", cal.monthDayFromFields(
+ {year: 2021, month: 13, day: 500}).toJSON());
+assertEquals("12-31", cal.monthDayFromFields(
+ {year: 2021, month: 999999, day: 500}).toJSON());
+assertEquals("01-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M01", day: 133}).toJSON());
+assertEquals("02-29", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M02", day: 133}).toJSON());
+assertEquals("03-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M03", day: 9033}).toJSON());
+assertEquals("04-30", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M04", day: 50}).toJSON());
+assertEquals("05-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M05", day: 77}).toJSON());
+assertEquals("06-30", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M06", day: 33}).toJSON());
+assertEquals("07-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M07", day: 33}).toJSON());
+assertEquals("08-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M08", day: 300}).toJSON());
+assertEquals("09-30", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M09", day: 400}).toJSON());
+assertEquals("10-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M10", day: 400}).toJSON());
+assertEquals("11-30", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M11", day: 400}).toJSON());
+assertEquals("12-31", cal.monthDayFromFields(
+ {year: 2021, monthCode: "M12", day: 500}).toJSON());
+
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 1, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 2, day: 29}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 3, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 4, day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 5, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 6, day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 7, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 8, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 9, day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 10, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 11, day: 31}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 12, day: 32}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.monthDayFromFields(
+ {year: 2021, month: 13, day: 5}, {overflow: "reject"}),
+ RangeError, "Invalid time value");
diff --git a/deps/v8/test/mjsunit/temporal/calendar-month.js b/deps/v8/test/mjsunit/temporal/calendar-month.js
new file mode 100644
index 0000000000..02b0fe644d
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-month.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.month
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(7, cal.month(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals(8, cal.month(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(6, cal.month(new Temporal.PlainYearMonth(1999, 6)));
+assertEquals(3, cal.month("2019-03-15"));
+assertThrows(() => cal.month(new Temporal.PlainMonthDay(3, 16)), TypeError,
+ "invalid_argument");
+
+// TODO Test the following later.
+//assertEquals(1, cal.month(new Temporal.ZonedDateTime(86400n * 366n * 50n,
+// "UTC")))
+//assertEquals(9, cal.month({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-months-in-year.js b/deps/v8/test/mjsunit/temporal/calendar-months-in-year.js
new file mode 100644
index 0000000000..f25beb2063
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-months-in-year.js
@@ -0,0 +1,22 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.monthsinyear
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(12, cal.monthsInYear(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals(12, cal.monthsInYear(new Temporal.PlainDate(1234, 7, 15)));
+assertEquals(12,
+ cal.monthsInYear(new Temporal.PlainDateTime(1997, 8, 23, 5, 30, 13)));
+assertEquals(12,
+ cal.monthsInYear(new Temporal.PlainDateTime(1234, 8, 23, 5, 30, 13)));
+assertEquals(12, cal.monthsInYear("2019-03-18"));
+assertEquals(12, cal.monthsInYear("1234-03-18"));
+
+// TODO Test the following later.
+//assertEquals(12, cal.monthsInYear(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(12, cal.monthsInYear(new Temporal.ZonedDateTime(
+// 86400n * 366n * 50n, "UTC")))
+//assertEquals(12, cal.monthsInYear({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-week-of-year.js b/deps/v8/test/mjsunit/temporal/calendar-week-of-year.js
new file mode 100644
index 0000000000..cfa2646e85
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-week-of-year.js
@@ -0,0 +1,68 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.weekofyear
+
+let cal = new Temporal.Calendar("iso8601");
+
+// From https://en.wikipedia.org/wiki/ISO_week_date#Relation_with_the_Gregorian_calendar
+assertEquals(53, cal.weekOfYear(new Temporal.PlainDate(1977, 01, 01)));
+assertEquals(53, cal.weekOfYear(new Temporal.PlainDate(1977, 01, 02)));
+
+assertEquals(52, cal.weekOfYear(new Temporal.PlainDate(1977, 12, 31)));
+assertEquals(52, cal.weekOfYear(new Temporal.PlainDate(1978, 01, 01)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1978, 01, 02)));
+
+assertEquals(52, cal.weekOfYear(new Temporal.PlainDate(1978, 12, 31)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1979, 01, 01)));
+
+assertEquals(52, cal.weekOfYear(new Temporal.PlainDate(1979, 12, 30)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1979, 12, 31)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1980, 01, 01)));
+
+assertEquals(52, cal.weekOfYear(new Temporal.PlainDate(1980, 12, 28)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1980, 12, 29)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1980, 12, 30)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1980, 12, 31)));
+assertEquals(1, cal.weekOfYear(new Temporal.PlainDate(1981, 01, 01)));
+
+assertEquals(53, cal.weekOfYear(new Temporal.PlainDate(1981, 12, 31)));
+assertEquals(53, cal.weekOfYear(new Temporal.PlainDate(1982, 01, 01)));
+assertEquals(53, cal.weekOfYear(new Temporal.PlainDate(1982, 01, 02)));
+assertEquals(53, cal.weekOfYear(new Temporal.PlainDate(1982, 01, 03)));
+
+
+assertEquals(53, cal.weekOfYear("1977-01-01"));
+assertEquals(53, cal.weekOfYear("1977-01-02"));
+
+assertEquals(52, cal.weekOfYear("1977-12-31"));
+assertEquals(52, cal.weekOfYear("1978-01-01"));
+assertEquals(1, cal.weekOfYear("1978-01-02"));
+
+assertEquals(52, cal.weekOfYear("1978-12-31"));
+assertEquals(1, cal.weekOfYear("1979-01-01"));
+
+assertEquals(52, cal.weekOfYear("1979-12-30"));
+assertEquals(1, cal.weekOfYear("1979-12-31"));
+assertEquals(1, cal.weekOfYear("1980-01-01"));
+
+assertEquals(52, cal.weekOfYear("1980-12-28"));
+assertEquals(1, cal.weekOfYear("1980-12-29"));
+assertEquals(1, cal.weekOfYear("1980-12-30"));
+assertEquals(1, cal.weekOfYear("1980-12-31"));
+assertEquals(1, cal.weekOfYear("1981-01-01"));
+
+assertEquals(53, cal.weekOfYear("1981-12-31"));
+assertEquals(53, cal.weekOfYear("1982-01-01"));
+assertEquals(53, cal.weekOfYear("1982-01-02"));
+assertEquals(53, cal.weekOfYear("1982-01-03"));
+
+// TODO test the following later
+//assertEquals(4, cal.weekOfYear(new Temporal.PlainDateTime(1997, 1, 23, 5,
+// 30, 13)));
+//assertEquals(7, cal.weekOfYear(new Temporal.PlainMonthDay(2, 6)));
+//assertEquals(31, cal.weekOfYear(new Temporal.ZonedDateTime(
+// 86400n * 366n * 50n, "UTC")))
+//assertEquals(30, cal.weekOfYear({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js b/deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js
new file mode 100644
index 0000000000..8124546339
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-year-month-from-fields.js
@@ -0,0 +1,144 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.yearmonthfromfields
+let cal = new Temporal.Calendar("iso8601")
+
+let nonObjMsg =
+ "Temporal.Calendar.prototype.yearMonthFromFields called on non-object");
+// Check throw for first arg
+assertThrows(() => cal.yearMonthFromFields(),
+ TypeError, nonObjMsg);
+[undefined, true, false, 123, 456n, Symbol(), "string"].forEach(
+ function(fields) {
+ assertThrows(() => cal.yearMonthFromFields(fields), TypeError, nonObjMsg);
+ assertThrows(() => cal.yearMonthFromFields(fields, undefined),
+ TypeError, nonObjMsg);
+ assertThrows(() => cal.yearMonthFromFields(fields,
+ {overflow: "constrain"}), TypeError, nonObjMsg);
+ assertThrows(() => cal.yearMonthFromFields(fields,
+ {overflow: "reject"}), TypeError, nonObjMsg);
+ });
+
+assertThrows(() => cal.yearMonthFromFields({month: 1}),
+ TypeError, "invalid_argument");
+assertThrows(() => cal.yearMonthFromFields({year: 2021}),
+ TypeError, "invalid_argument");
+
+assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "m1"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M1"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "m01"}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.yearMonthFromFields({year: 2021, month: 12,
+ monthCode: "M11"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M00"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M19"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M99"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, monthCode: "M13"}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.yearMonthFromFields({year: 2021, month: -1}),
+ RangeError, "Invalid time value");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, month: -Infinity}),
+ RangeError, "Invalid time value");
+
+assertThrows(() => cal.yearMonthFromFields({year: 2021, month: 0, day: 5},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+assertThrows(() => cal.yearMonthFromFields({year: 2021, month: 13, day: 5},
+ {overflow: "reject"}), RangeError, "Invalid time value");
+
+assertThrows(() => cal.yearMonthFromFields(
+ {year: 2021, monthCode: "M00"}, {overflow: "reject"}),
+ RangeError, "monthCode value is out of range.");
+assertThrows(() => cal.yearMonthFromFields(
+ {year: 2021, monthCode: "M13"}, {overflow: "reject"}),
+ RangeError, "monthCode value is out of range.");
+
+assertThrows(() => cal.yearMonthFromFields(
+ {year: 2021, month: 0}), RangeError, "Invalid time value");
+
+// Check throw for the second arg
+assertThrows(() => cal.yearMonthFromFields(
+ {year: 2021, month: 7}, {overflow: "invalid"}),
+ RangeError,
+ "Value invalid out of range for " +
+ "Temporal.Calendar.prototype.yearMonthFromFields options property " +
+ "overflow");
+
+assertEquals("2021-07",
+ cal.yearMonthFromFields({year: 2021, month: 7}).toJSON());
+assertEquals("2021-12",
+ cal.yearMonthFromFields({year: 2021, month: 12}).toJSON());
+assertEquals("2021-07",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M07"}).toJSON());
+assertEquals("2021-12",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M12"}).toJSON());
+
+assertEquals("2021-01",
+ cal.yearMonthFromFields({year: 2021, month: 1}).toJSON());
+assertEquals("2021-02",
+ cal.yearMonthFromFields({year: 2021, month: 2}).toJSON());
+assertEquals("2021-03",
+ cal.yearMonthFromFields({year: 2021, month: 3}).toJSON());
+assertEquals("2021-04",
+ cal.yearMonthFromFields({year: 2021, month: 4}).toJSON());
+assertEquals("2021-05",
+ cal.yearMonthFromFields({year: 2021, month: 5}).toJSON());
+assertEquals("2021-06",
+ cal.yearMonthFromFields({year: 2021, month: 6}).toJSON());
+assertEquals("2021-07",
+ cal.yearMonthFromFields({year: 2021, month: 7}).toJSON());
+assertEquals("2021-08",
+ cal.yearMonthFromFields({year: 2021, month: 8}).toJSON());
+assertEquals("2021-09",
+ cal.yearMonthFromFields({year: 2021, month: 9}).toJSON());
+assertEquals("2021-10",
+ cal.yearMonthFromFields({year: 2021, month: 10}).toJSON());
+assertEquals("2021-11",
+ cal.yearMonthFromFields({year: 2021, month: 11}).toJSON());
+assertEquals("2021-12",
+ cal.yearMonthFromFields({year: 2021, month: 12}).toJSON());
+assertEquals("2021-12",
+ cal.yearMonthFromFields({year: 2021, month: 13}).toJSON());
+assertEquals("2021-12",
+ cal.yearMonthFromFields({year: 2021, month: 999999}).toJSON());
+assertEquals("2021-01",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M01"}).toJSON());
+assertEquals("2021-02",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M02"}).toJSON());
+assertEquals("2021-03",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M03"}).toJSON());
+assertEquals("2021-04",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M04"}).toJSON());
+assertEquals("2021-05",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M05"}).toJSON());
+assertEquals("2021-06",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M06"}).toJSON());
+assertEquals("2021-07",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M07"}).toJSON());
+assertEquals("2021-08",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M08"}).toJSON());
+assertEquals("2021-09",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M09"}).toJSON());
+assertEquals("2021-10",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M10"}).toJSON());
+assertEquals("2021-11",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M11"}).toJSON());
+assertEquals("2021-12",
+ cal.yearMonthFromFields({year: 2021, monthCode: "M12"}).toJSON());
+
+assertThrows(() => cal.yearMonthFromFields(
+ {year: 2021, month: 13}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
+assertThrows(() => cal.yearMonthFromFields(
+ {year: 2021, month: 9995}, {overflow: "reject"}), RangeError,
+ "Invalid time value");
diff --git a/deps/v8/test/mjsunit/temporal/calendar-year.js b/deps/v8/test/mjsunit/temporal/calendar-year.js
new file mode 100644
index 0000000000..9336b05fab
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/calendar-year.js
@@ -0,0 +1,18 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+
+// https://tc39.es/proposal-temporal/#sec-temporal.calendar.prototype.year
+let cal = new Temporal.Calendar("iso8601");
+
+assertEquals(2021, cal.year(new Temporal.PlainDate(2021, 7, 15)));
+assertEquals(1997, cal.year(new Temporal.PlainDateTime(
+ 1997, 8, 23, 5, 30, 13)));
+assertEquals(1999, cal.year(new Temporal.PlainYearMonth(1999, 6)));
+assertEquals(2019, cal.year("2019-03-15"));
+
+// TODO Test the following later.
+//assertEquals(2020, cal.year(new Temporal.ZonedDateTime(86400n * 366n * 50n, "UTC")))
+//assertEquals(2001, cal.year({year: 2001, month: 9, day: 11}));
diff --git a/deps/v8/test/mjsunit/tools/processor.mjs b/deps/v8/test/mjsunit/tools/processor.mjs
index 4190108638..93090cf27e 100644
--- a/deps/v8/test/mjsunit/tools/processor.mjs
+++ b/deps/v8/test/mjsunit/tools/processor.mjs
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --logfile='+' --log --log-maps --log-ic --log-code
-// Flags: --log-function-events --no-stress-opt
+// Flags: --log-function-events --no-stress-opt --no-predictable
import { Processor } from "../../../tools/system-analyzer/processor.mjs";
diff --git a/deps/v8/test/mjsunit/typedarray-constructor-mixed-bigint.js b/deps/v8/test/mjsunit/typedarray-constructor-mixed-bigint.js
new file mode 100644
index 0000000000..20386fb515
--- /dev/null
+++ b/deps/v8/test/mjsunit/typedarray-constructor-mixed-bigint.js
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let BigIntCtors = [BigInt64Array, BigUint64Array];
+let NonBigIntCtors = [Int8Array,
+ Uint8Array,
+ Uint8ClampedArray,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Float32Array,
+ Float64Array];
+
+function assertThrowsCannotMixBigInt(cb) {
+ assertThrows(cb, TypeError, /Cannot mix BigInt/);
+}
+
+for (let bigIntTA of BigIntCtors) {
+ for (let nonBigIntTA of NonBigIntCtors) {
+ assertThrowsCannotMixBigInt(() => { new bigIntTA(new nonBigIntTA(0)); });
+ assertThrowsCannotMixBigInt(() => { new bigIntTA(new nonBigIntTA(1)); });
+
+ assertThrowsCannotMixBigInt(() => { new nonBigIntTA(new bigIntTA(0)); });
+ assertThrowsCannotMixBigInt(() => { new nonBigIntTA(new bigIntTA(1)); });
+ }
+}
diff --git a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
index 8cd6248ea3..fe18f4649c 100644
--- a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
@@ -6,24 +6,14 @@
"use strict";
-class MyUint8Array extends Uint8Array {};
-
-const ctors = [
- Uint8Array,
- Int8Array,
- Uint16Array,
- Int16Array,
- Int32Array,
- Float32Array,
- Float64Array,
- Uint8ClampedArray,
- BigUint64Array,
- BigInt64Array,
- MyUint8Array
-];
+d8.file.execute('test/mjsunit/typedarray-helpers.js');
+
+function CreateGrowableSharedArrayBuffer(byteLength, maxByteLength) {
+ return new SharedArrayBuffer(byteLength, {maxByteLength: maxByteLength});
+}
(function TypedArrayPrototype() {
- const gsab = new GrowableSharedArrayBuffer(40, 80);
+ const gsab = CreateGrowableSharedArrayBuffer(40, 80);
const sab = new SharedArrayBuffer(80);
for (let ctor of ctors) {
@@ -34,7 +24,7 @@ const ctors = [
})();
(function TypedArrayLengthAndByteLength() {
- const gsab = new GrowableSharedArrayBuffer(40, 80);
+ const gsab = CreateGrowableSharedArrayBuffer(40, 80);
for (let ctor of ctors) {
const ta = new ctor(gsab, 0, 3);
@@ -77,7 +67,7 @@ const ctors = [
})();
(function ConstructInvalid() {
- const gsab = new GrowableSharedArrayBuffer(40, 80);
+ const gsab = CreateGrowableSharedArrayBuffer(40, 80);
for (let ctor of ctors) {
// Length too big.
@@ -107,7 +97,7 @@ const ctors = [
})();
(function TypedArrayLengthWhenGrown1() {
- const gsab = new GrowableSharedArrayBuffer(16, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(16, 40);
// Create TAs which cover the bytes 0-7.
let tas_and_lengths = [];
@@ -138,7 +128,7 @@ const ctors = [
// The previous test with offsets.
(function TypedArrayLengthWhenGrown2() {
- const gsab = new GrowableSharedArrayBuffer(20, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(20, 40);
// Create TAs which cover the bytes 8-15.
let tas_and_lengths = [];
@@ -168,7 +158,7 @@ const ctors = [
})();
(function LengthTracking1() {
- const gsab = new GrowableSharedArrayBuffer(16, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(16, 40);
let tas = [];
for (let ctor of ctors) {
@@ -204,7 +194,7 @@ const ctors = [
// The previous test with offsets.
(function LengthTracking2() {
- const gsab = new GrowableSharedArrayBuffer(16, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(16, 40);
const offset = 8;
let tas = [];
@@ -245,7 +235,7 @@ const ctors = [
}
%EnsureFeedbackVectorForFunction(ReadElement2);
- const gsab = new GrowableSharedArrayBuffer(16, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(16, 40);
const i8a = new Int8Array(gsab, 0, 4);
for (let i = 0; i < 3; ++i) {
@@ -294,7 +284,7 @@ const ctors = [
%EnsureFeedbackVectorForFunction(HasElement);
%EnsureFeedbackVectorForFunction(WriteElement);
- const gsab = new GrowableSharedArrayBuffer(16, 40);
+ const gsab = CreateGrowableSharedArrayBuffer(16, 40);
const i8a = new Int8Array(gsab); // length-tracking
assertEquals(16, i8a.length);
@@ -341,8 +331,46 @@ const ctors = [
}
})();
+(function HasWithOffsetsWithFeedback() {
+ function GetElements(ta) {
+ let result = '';
+ for (let i = 0; i < 8; ++i) {
+ result += (i in ta) + ',';
+ // ^ feedback will be here
+ }
+ return result;
+ }
+ %EnsureFeedbackVectorForFunction(GetElements);
+
+ const gsab = CreateGrowableSharedArrayBuffer(4, 8);
+ const fixedLength = new Int8Array(gsab, 0, 4);
+ const fixedLengthWithOffset = new Int8Array(gsab, 1, 3);
+ const lengthTracking = new Int8Array(gsab, 0);
+ const lengthTrackingWithOffset = new Int8Array(gsab, 1);
+
+ assertEquals('true,true,true,true,false,false,false,false,',
+ GetElements(fixedLength));
+ assertEquals('true,true,true,false,false,false,false,false,',
+ GetElements(fixedLengthWithOffset));
+ assertEquals('true,true,true,true,false,false,false,false,',
+ GetElements(lengthTracking));
+ assertEquals('true,true,true,false,false,false,false,false,',
+ GetElements(lengthTrackingWithOffset));
+
+ gsab.grow(8);
+
+ assertEquals('true,true,true,true,false,false,false,false,',
+ GetElements(fixedLength));
+ assertEquals('true,true,true,false,false,false,false,false,',
+ GetElements(fixedLengthWithOffset));
+ assertEquals('true,true,true,true,true,true,true,true,',
+ GetElements(lengthTracking));
+ assertEquals('true,true,true,true,true,true,true,false,',
+ GetElements(lengthTrackingWithOffset));
+})();
+
(function EnumerateElements() {
- let gsab = new GrowableSharedArrayBuffer(100, 200);
+ let gsab = CreateGrowableSharedArrayBuffer(100, 200);
for (let ctor of ctors) {
const ta = new ctor(gsab, 0, 3);
let keys = '';
@@ -360,28 +388,23 @@ const ctors = [
function TestIteration(ta, expected) {
let values = [];
for (const value of ta) {
- values.push(value);
+ values.push(Number(value));
}
assertEquals(expected, values);
}
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
-
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
// We can use the same GSAB for all the TAs below, since we won't modify it
// after writing the initial values.
- const gsab = new GrowableSharedArrayBuffer(buffer_byte_length,
+ const gsab = CreateGrowableSharedArrayBuffer(buffer_byte_length,
2 * buffer_byte_length);
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
// Write some data into the array.
let ta_write = new ctor(gsab);
for (let i = 0; i < no_elements; ++i) {
- ta_write[i] = i % 128;
+ WriteToTypedArray(ta_write, i, i % 128);
}
// Create various different styles of TypedArrays with the GSAB as the
@@ -423,12 +446,12 @@ const ctors = [
// Helpers for iteration tests.
function CreateGsab(buffer_byte_length, ctor) {
- const gsab = new GrowableSharedArrayBuffer(buffer_byte_length,
- 2 * buffer_byte_length);
+ const gsab = CreateGrowableSharedArrayBuffer(buffer_byte_length,
+ 2 * buffer_byte_length);
// Write some data into the array.
let ta_write = new ctor(gsab);
for (let i = 0; i < buffer_byte_length / ctor.BYTES_PER_ELEMENT; ++i) {
- ta_write[i] = i % 128;
+ WriteToTypedArray(ta_write, i, i % 128);
}
return gsab;
}
@@ -438,7 +461,7 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
let values = [];
let grown = false;
for (const value of ta) {
- values.push(value);
+ values.push(Number(value));
if (!grown && values.length == grow_after) {
gsab.grow(new_byte_length);
grown = true;
@@ -453,10 +476,6 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
const offset = 2;
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
@@ -511,10 +530,6 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
// We need to recreate the gsab between all TA tests, since we grow it.
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
@@ -552,3 +567,123 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
}
}
}());
+
+(function Destructuring() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ let ta_write = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(ta_write, i, i);
+ }
+
+ {
+ let [a, b, c, d, e] = fixedLength;
+ assertEquals([0, 1, 2, 3], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+
+ {
+ let [a, b, c] = fixedLengthWithOffset;
+ assertEquals([2, 3], ToNumbers([a, b]));
+ assertEquals(undefined, c);
+ }
+
+ {
+ let [a, b, c, d, e] = lengthTracking;
+ assertEquals([0, 1, 2, 3], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+
+ {
+ let [a, b, c] = lengthTrackingWithOffset;
+ assertEquals([2, 3], ToNumbers([a, b]));
+ assertEquals(undefined, c);
+ }
+
+ // Grow. The new memory is zeroed.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+
+ {
+ let [a, b, c, d, e] = fixedLength;
+ assertEquals([0, 1, 2, 3], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+
+ {
+ let [a, b, c] = fixedLengthWithOffset;
+ assertEquals([2, 3], ToNumbers([a, b]));
+ assertEquals(undefined, c);
+ }
+
+ {
+ let [a, b, c, d, e, f, g] = lengthTracking;
+ assertEquals([0, 1, 2, 3, 0, 0], ToNumbers([a, b, c, d, e, f]));
+ assertEquals(undefined, g);
+ }
+
+ {
+ let [a, b, c, d, e] = lengthTrackingWithOffset;
+ assertEquals([2, 3, 0, 0], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+ }
+}());
+
+(function TestFill() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ assertEquals([0, 0, 0, 0], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(fixedLength, 1);
+ assertEquals([1, 1, 1, 1], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(fixedLengthWithOffset, 2);
+ assertEquals([1, 1, 2, 2], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(lengthTracking, 3);
+ assertEquals([3, 3, 3, 3], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(lengthTrackingWithOffset, 4);
+ assertEquals([3, 3, 4, 4], ReadDataFromBuffer(gsab, ctor));
+
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+
+ FillHelper(fixedLength, 13);
+ assertEquals([13, 13, 13, 13, 0, 0], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(fixedLengthWithOffset, 14);
+ assertEquals([13, 13, 14, 14, 0, 0], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(lengthTracking, 15);
+ assertEquals([15, 15, 15, 15, 15, 15], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(lengthTrackingWithOffset, 16);
+ assertEquals([15, 15, 16, 16, 16, 16], ReadDataFromBuffer(gsab, ctor));
+
+ // Filling with non-undefined start & end.
+ FillHelper(fixedLength, 17, 1, 3);
+ assertEquals([15, 17, 17, 16, 16, 16], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(fixedLengthWithOffset, 18, 1, 2);
+ assertEquals([15, 17, 17, 18, 16, 16], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(lengthTracking, 19, 1, 3);
+ assertEquals([15, 19, 19, 18, 16, 16], ReadDataFromBuffer(gsab, ctor));
+
+ FillHelper(lengthTrackingWithOffset, 20, 1, 2);
+ assertEquals([15, 19, 19, 20, 16, 16], ReadDataFromBuffer(gsab, ctor));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/typedarray-helpers.js b/deps/v8/test/mjsunit/typedarray-helpers.js
new file mode 100644
index 0000000000..e4996456c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/typedarray-helpers.js
@@ -0,0 +1,55 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class MyUint8Array extends Uint8Array {};
+class MyBigInt64Array extends BigInt64Array {};
+
+const ctors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Int32Array,
+ Float32Array,
+ Float64Array,
+ Uint8ClampedArray,
+ BigUint64Array,
+ BigInt64Array,
+ MyUint8Array,
+ MyBigInt64Array,
+];
+
+function ReadDataFromBuffer(ab, ctor) {
+ let result = [];
+ const ta = new ctor(ab, 0, ab.byteLength / ctor.BYTES_PER_ELEMENT);
+ for (let item of ta) {
+ result.push(Number(item));
+ }
+ return result;
+}
+
+function WriteToTypedArray(array, index, value) {
+ if (array instanceof BigInt64Array ||
+ array instanceof BigUint64Array) {
+ array[index] = BigInt(value);
+ } else {
+ array[index] = value;
+ }
+}
+
+function ToNumbers(array) {
+ let result = [];
+ for (let item of array) {
+ result.push(Number(item));
+ }
+ return result;
+}
+
+function FillHelper(ta, n, start, end) {
+ if (ta instanceof BigInt64Array || ta instanceof BigUint64Array) {
+ ta.fill(BigInt(n), start, end);
+ } else {
+ ta.fill(n, start, end);
+ }
+}
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
index 95e2a99ecd..69ad91e693 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
@@ -22,8 +22,12 @@ const ctors = [
MyUint8Array
];
+function CreateResizableArrayBuffer(byteLength, maxByteLength) {
+ return new ArrayBuffer(byteLength, {maxByteLength: maxByteLength});
+}
+
(function ConstructorThrowsIfBufferDetached() {
- const rab = new ResizableArrayBuffer(40, 80);
+ const rab = CreateResizableArrayBuffer(40, 80);
%ArrayBufferDetach(rab);
for (let ctor of ctors) {
@@ -34,7 +38,7 @@ const ctors = [
})();
(function TypedArrayLengthAndByteLength() {
- const rab = new ResizableArrayBuffer(40, 80);
+ const rab = CreateResizableArrayBuffer(40, 80);
let tas = [];
for (let ctor of ctors) {
@@ -53,7 +57,7 @@ const ctors = [
})();
(function AccessDetachedTypedArray() {
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
@@ -90,7 +94,7 @@ const ctors = [
}
%EnsureFeedbackVectorForFunction(ReadElement2);
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
assertEquals(0, ReadElement2(i8a));
@@ -114,7 +118,7 @@ const ctors = [
}
%EnsureFeedbackVectorForFunction(WriteElement2);
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
assertEquals(0, i8a[2]);
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
index 382ccbb136..9934683b23 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
@@ -6,24 +6,14 @@
"use strict";
-class MyUint8Array extends Uint8Array {};
-
-const ctors = [
- Uint8Array,
- Int8Array,
- Uint16Array,
- Int16Array,
- Int32Array,
- Float32Array,
- Float64Array,
- Uint8ClampedArray,
- BigUint64Array,
- BigInt64Array,
- MyUint8Array
-];
+d8.file.execute('test/mjsunit/typedarray-helpers.js');
+
+function CreateResizableArrayBuffer(byteLength, maxByteLength) {
+ return new ArrayBuffer(byteLength, {maxByteLength: maxByteLength});
+}
(function TypedArrayPrototype() {
- const rab = new ResizableArrayBuffer(40, 80);
+ const rab = CreateResizableArrayBuffer(40, 80);
const ab = new ArrayBuffer(80);
for (let ctor of ctors) {
@@ -34,7 +24,7 @@ const ctors = [
})();
(function TypedArrayLengthAndByteLength() {
- const rab = new ResizableArrayBuffer(40, 80);
+ const rab = CreateResizableArrayBuffer(40, 80);
for (let ctor of ctors) {
const ta = new ctor(rab, 0, 3);
@@ -77,7 +67,7 @@ const ctors = [
})();
(function ConstructInvalid() {
- const rab = new ResizableArrayBuffer(40, 80);
+ const rab = CreateResizableArrayBuffer(40, 80);
for (let ctor of ctors) {
// Length too big.
@@ -107,7 +97,7 @@ const ctors = [
})();
(function TypedArrayLengthWhenResizedOutOfBounds1() {
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
// Create TAs which cover the bytes 0-7.
let tas_and_lengths = [];
@@ -146,7 +136,7 @@ const ctors = [
// The previous test with offsets.
(function TypedArrayLengthWhenResizedOutOfBounds2() {
- const rab = new ResizableArrayBuffer(20, 40);
+ const rab = CreateResizableArrayBuffer(20, 40);
// Create TAs which cover the bytes 8-15.
let tas_and_lengths = [];
@@ -158,6 +148,7 @@ const ctors = [
for (let [ta, length] of tas_and_lengths) {
assertEquals(length, ta.length);
assertEquals(length * ta.BYTES_PER_ELEMENT, ta.byteLength);
+ assertEquals(8, ta.byteOffset);
}
rab.resize(10);
@@ -165,6 +156,7 @@ const ctors = [
for (let [ta, length] of tas_and_lengths) {
assertEquals(0, ta.length);
assertEquals(0, ta.byteLength);
+ assertEquals(0, ta.byteOffset);
}
// Resize the rab so that it just barely covers the needed 8 bytes.
@@ -173,6 +165,7 @@ const ctors = [
for (let [ta, length] of tas_and_lengths) {
assertEquals(length, ta.length);
assertEquals(length * ta.BYTES_PER_ELEMENT, ta.byteLength);
+ assertEquals(8, ta.byteOffset);
}
rab.resize(40);
@@ -180,11 +173,12 @@ const ctors = [
for (let [ta, length] of tas_and_lengths) {
assertEquals(length, ta.length);
assertEquals(length * ta.BYTES_PER_ELEMENT, ta.byteLength);
+ assertEquals(8, ta.byteOffset);
}
})();
(function LengthTracking1() {
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
let tas = [];
for (let ctor of ctors) {
@@ -246,7 +240,7 @@ const ctors = [
// The previous test with offsets.
(function LengthTracking2() {
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const offset = 8;
let tas = [];
@@ -257,12 +251,14 @@ const ctors = [
for (let ta of tas) {
assertEquals((16 - offset) / ta.BYTES_PER_ELEMENT, ta.length);
assertEquals(16 - offset, ta.byteLength);
+ assertEquals(offset, ta.byteOffset);
}
rab.resize(40);
for (let ta of tas) {
assertEquals((40 - offset) / ta.BYTES_PER_ELEMENT, ta.length);
assertEquals(40 - offset, ta.byteLength);
+ assertEquals(offset, ta.byteOffset);
}
// Resize to a number which is not a multiple of all byte_lengths.
@@ -271,6 +267,7 @@ const ctors = [
const expected_length = Math.floor((20 - offset)/ ta.BYTES_PER_ELEMENT);
assertEquals(expected_length, ta.length);
assertEquals(expected_length * ta.BYTES_PER_ELEMENT, ta.byteLength);
+ assertEquals(offset, ta.byteOffset);
}
// Resize so that all TypedArrays go out of bounds (because of the offset).
@@ -279,6 +276,7 @@ const ctors = [
for (let ta of tas) {
assertEquals(0, ta.length);
assertEquals(0, ta.byteLength);
+ assertEquals(0, ta.byteOffset);
}
rab.resize(0);
@@ -286,6 +284,7 @@ const ctors = [
for (let ta of tas) {
assertEquals(0, ta.length);
assertEquals(0, ta.byteLength);
+ assertEquals(0, ta.byteOffset);
}
rab.resize(8);
@@ -293,6 +292,7 @@ const ctors = [
for (let ta of tas) {
assertEquals(0, ta.length);
assertEquals(0, ta.byteLength);
+ assertEquals(offset, ta.byteOffset);
}
// Resize so that the TypedArrays which have element size > 1 go out of bounds
@@ -303,9 +303,11 @@ const ctors = [
if (ta.BYTES_PER_ELEMENT == 1) {
assertEquals(1, ta.length);
assertEquals(1, ta.byteLength);
+ assertEquals(offset, ta.byteOffset);
} else {
assertEquals(0, ta.length);
assertEquals(0, ta.byteLength);
+ assertEquals(offset, ta.byteOffset);
}
}
@@ -314,6 +316,7 @@ const ctors = [
for (let ta of tas) {
assertEquals((40 - offset) / ta.BYTES_PER_ELEMENT, ta.length);
assertEquals(40 - offset, ta.byteLength);
+ assertEquals(offset, ta.byteOffset);
}
})();
@@ -322,7 +325,7 @@ const ctors = [
if (ctor.BYTES_PER_ELEMENT != 1) {
continue;
}
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const array = new ctor(rab, 0, 4);
// Initial values
@@ -381,7 +384,7 @@ const ctors = [
if (ctor.BYTES_PER_ELEMENT != 1) {
continue;
}
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const array = new ctor(rab, 0, 4);
// Within-bounds read
@@ -419,7 +422,7 @@ const ctors = [
}
%EnsureFeedbackVectorForFunction(ReadElement2);
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
for (let i = 0; i < 3; ++i) {
@@ -469,7 +472,7 @@ const ctors = [
}
%EnsureFeedbackVectorForFunction(HasElement2);
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
@@ -499,13 +502,73 @@ const ctors = [
}
})();
+(function HasWithOffsetsWithFeedback() {
+ function GetElements(ta) {
+ let result = '';
+ for (let i = 0; i < 8; ++i) {
+ result += (i in ta) + ',';
+ // ^ feedback will be here
+ }
+ return result;
+ }
+ %EnsureFeedbackVectorForFunction(GetElements);
+
+ const rab = CreateResizableArrayBuffer(4, 8);
+ const fixedLength = new Int8Array(rab, 0, 4);
+ const fixedLengthWithOffset = new Int8Array(rab, 1, 3);
+ const lengthTracking = new Int8Array(rab, 0);
+ const lengthTrackingWithOffset = new Int8Array(rab, 1);
+
+ assertEquals('true,true,true,true,false,false,false,false,',
+ GetElements(fixedLength));
+ assertEquals('true,true,true,false,false,false,false,false,',
+ GetElements(fixedLengthWithOffset));
+ assertEquals('true,true,true,true,false,false,false,false,',
+ GetElements(lengthTracking));
+ assertEquals('true,true,true,false,false,false,false,false,',
+ GetElements(lengthTrackingWithOffset));
+
+ rab.resize(2);
+
+ assertEquals('false,false,false,false,false,false,false,false,',
+ GetElements(fixedLength));
+ assertEquals('false,false,false,false,false,false,false,false,',
+ GetElements(fixedLengthWithOffset));
+ assertEquals('true,true,false,false,false,false,false,false,',
+ GetElements(lengthTracking));
+ assertEquals('true,false,false,false,false,false,false,false,',
+ GetElements(lengthTrackingWithOffset));
+
+ // Resize beyond the offset of the length tracking arrays.
+ rab.resize(1);
+ assertEquals('false,false,false,false,false,false,false,false,',
+ GetElements(fixedLength));
+ assertEquals('false,false,false,false,false,false,false,false,',
+ GetElements(fixedLengthWithOffset));
+ assertEquals('true,false,false,false,false,false,false,false,',
+ GetElements(lengthTracking));
+ assertEquals('false,false,false,false,false,false,false,false,',
+ GetElements(lengthTrackingWithOffset));
+
+ rab.resize(8);
+
+ assertEquals('true,true,true,true,false,false,false,false,',
+ GetElements(fixedLength));
+ assertEquals('true,true,true,false,false,false,false,false,',
+ GetElements(fixedLengthWithOffset));
+ assertEquals('true,true,true,true,true,true,true,true,',
+ GetElements(lengthTracking));
+ assertEquals('true,true,true,true,true,true,true,false,',
+ GetElements(lengthTrackingWithOffset));
+})();
+
(function StoreToOutOfBoundsTypedArrayWithFeedback() {
function WriteElement2(ta, i) {
ta[2] = i;
}
%EnsureFeedbackVectorForFunction(WriteElement2);
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
assertEquals(0, i8a[2]);
@@ -554,7 +617,7 @@ const ctors = [
return 2 in ta;
}
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
i8a.__proto__ = {2: 'wrong value'};
i8a[2] = 10;
@@ -576,7 +639,7 @@ const ctors = [
%EnsureFeedbackVectorForFunction(ReadElement2);
%EnsureFeedbackVectorForFunction(HasElement2);
- const rab = new ResizableArrayBuffer(16, 40);
+ const rab = CreateResizableArrayBuffer(16, 40);
const i8a = new Int8Array(rab, 0, 4);
i8a.__proto__ = {2: 'wrong value'};
i8a[2] = 10;
@@ -593,7 +656,7 @@ const ctors = [
})();
(function EnumerateElements() {
- let rab = new ResizableArrayBuffer(100, 200);
+ let rab = CreateResizableArrayBuffer(100, 200);
for (let ctor of ctors) {
const ta = new ctor(rab, 0, 3);
let keys = '';
@@ -611,28 +674,23 @@ const ctors = [
function TestIteration(ta, expected) {
let values = [];
for (const value of ta) {
- values.push(value);
+ values.push(Number(value));
}
assertEquals(expected, values);
}
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
-
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
// We can use the same RAB for all the TAs below, since we won't modify it
// after writing the initial values.
- const rab = new ResizableArrayBuffer(buffer_byte_length,
+ const rab = CreateResizableArrayBuffer(buffer_byte_length,
2 * buffer_byte_length);
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
// Write some data into the array.
let ta_write = new ctor(rab);
for (let i = 0; i < no_elements; ++i) {
- ta_write[i] = i % 128;
+ WriteToTypedArray(ta_write, i, i % 128);
}
// Create various different styles of TypedArrays with the RAB as the
@@ -674,12 +732,12 @@ const ctors = [
// Helpers for iteration tests.
function CreateRab(buffer_byte_length, ctor) {
- const rab = new ResizableArrayBuffer(buffer_byte_length,
- 2 * buffer_byte_length);
+ const rab = CreateResizableArrayBuffer(buffer_byte_length,
+ 2 * buffer_byte_length);
// Write some data into the array.
let ta_write = new ctor(rab);
for (let i = 0; i < buffer_byte_length / ctor.BYTES_PER_ELEMENT; ++i) {
- ta_write[i] = i % 128;
+ WriteToTypedArray(ta_write, i, i % 128);
}
return rab;
}
@@ -689,7 +747,7 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
let values = [];
let resized = false;
for (const value of ta) {
- values.push(value);
+ values.push(Number(value));
if (!resized && values.length == resize_after) {
rab.resize(new_byte_length);
resized = true;
@@ -704,10 +762,6 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
const offset = 2;
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
@@ -762,10 +816,6 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
// We need to recreate the RAB between all TA tests, since we grow it.
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
@@ -809,10 +859,6 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
const offset = 2;
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
@@ -869,10 +915,6 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
const offset = 2;
for (let ctor of ctors) {
- if (ctor == BigInt64Array || ctor == BigUint64Array) {
- // This test doesn't work for BigInts.
- continue;
- }
const buffer_byte_length = no_elements * ctor.BYTES_PER_ELEMENT;
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
@@ -903,3 +945,214 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
});
}
}());
+
+(function Destructuring() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ let ta_write = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(ta_write, i, i);
+ }
+
+ {
+ let [a, b, c, d, e] = fixedLength;
+ assertEquals([0, 1, 2, 3], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+
+ {
+ let [a, b, c] = fixedLengthWithOffset;
+ assertEquals([2, 3], ToNumbers([a, b]));
+ assertEquals(undefined, c);
+ }
+
+ {
+ let [a, b, c, d, e] = lengthTracking;
+ assertEquals([0, 1, 2, 3], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+
+ {
+ let [a, b, c] = lengthTrackingWithOffset;
+ assertEquals([2, 3], ToNumbers([a, b]));
+ assertEquals(undefined, c);
+ }
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { let [a, b, c] = fixedLength; }, TypeError);
+ assertThrows(() => { let [a, b, c] = fixedLengthWithOffset; }, TypeError);
+
+ {
+ let [a, b, c, d] = lengthTracking;
+ assertEquals([0, 1, 2], ToNumbers([a, b, c]));
+ assertEquals(undefined, d);
+ }
+
+ {
+ let [a, b] = lengthTrackingWithOffset;
+ assertEquals([2], ToNumbers([a]));
+ assertEquals(undefined, b);
+ }
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { let [a, b, c] = fixedLength; }, TypeError);
+ assertThrows(() => { let [a, b, c] = fixedLengthWithOffset; }, TypeError);
+ assertThrows(() => { let [a, b, c] = lengthTrackingWithOffset; },
+ TypeError);
+
+ {
+ let [a, b] = lengthTracking;
+ assertEquals([0], ToNumbers([a]));
+ assertEquals(undefined, b);
+ }
+
+ // Shrink to 0.
+ rab.resize(0);
+
+ assertThrows(() => { let [a, b, c] = fixedLength; }, TypeError);
+ assertThrows(() => { let [a, b, c] = fixedLengthWithOffset; }, TypeError);
+ assertThrows(() => { let [a, b, c] = lengthTrackingWithOffset; },
+ TypeError);
+
+ {
+ let [a] = lengthTracking;
+ assertEquals(undefined, a);
+ }
+
+ // Grow so that all TAs are back in-bounds. The new memory is zeroed.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+
+ {
+ let [a, b, c, d, e] = fixedLength;
+ assertEquals([0, 0, 0, 0], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+
+ {
+ let [a, b, c] = fixedLengthWithOffset;
+ assertEquals([0, 0], ToNumbers([a, b]));
+ assertEquals(undefined, c);
+ }
+
+ {
+ let [a, b, c, d, e, f, g] = lengthTracking;
+ assertEquals([0, 0, 0, 0, 0, 0], ToNumbers([a, b, c, d, e, f]));
+ assertEquals(undefined, g);
+ }
+
+ {
+ let [a, b, c, d, e] = lengthTrackingWithOffset;
+ assertEquals([0, 0, 0, 0], ToNumbers([a, b, c, d]));
+ assertEquals(undefined, e);
+ }
+ }
+}());
+
+(function TestFill() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ assertEquals([0, 0, 0, 0], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(fixedLength, 1);
+ assertEquals([1, 1, 1, 1], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(fixedLengthWithOffset, 2);
+ assertEquals([1, 1, 2, 2], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTracking, 3);
+ assertEquals([3, 3, 3, 3], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTrackingWithOffset, 4);
+ assertEquals([3, 3, 4, 4], ReadDataFromBuffer(rab, ctor));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => FillHelper(fixedLength, 5), TypeError);
+ assertEquals([3, 3, 4], ReadDataFromBuffer(rab, ctor));
+
+ assertThrows(() => FillHelper(fixedLengthWithOffset, 6), TypeError);
+ assertEquals([3, 3, 4], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTracking, 7);
+ assertEquals([7, 7, 7], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTrackingWithOffset, 8);
+ assertEquals([7, 7, 8], ReadDataFromBuffer(rab, ctor));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => FillHelper(fixedLength, 9), TypeError);
+ assertEquals([7], ReadDataFromBuffer(rab, ctor));
+
+ assertThrows(() => FillHelper(fixedLengthWithOffset, 10), TypeError);
+ assertEquals([7], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTracking, 11);
+ assertEquals([11], ReadDataFromBuffer(rab, ctor));
+
+ assertThrows(() => FillHelper(lengthTrackingWithOffset, 12), TypeError);
+ assertEquals([11], ReadDataFromBuffer(rab, ctor));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+
+ FillHelper(fixedLength, 13);
+ assertEquals([13, 13, 13, 13, 0, 0], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(fixedLengthWithOffset, 14);
+ assertEquals([13, 13, 14, 14, 0, 0], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTracking, 15);
+ assertEquals([15, 15, 15, 15, 15, 15], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTrackingWithOffset, 16);
+ assertEquals([15, 15, 16, 16, 16, 16], ReadDataFromBuffer(rab, ctor));
+
+ // Filling with non-undefined start & end.
+ FillHelper(fixedLength, 17, 1, 3);
+ assertEquals([15, 17, 17, 16, 16, 16], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(fixedLengthWithOffset, 18, 1, 2);
+ assertEquals([15, 17, 17, 18, 16, 16], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTracking, 19, 1, 3);
+ assertEquals([15, 19, 19, 18, 16, 16], ReadDataFromBuffer(rab, ctor));
+
+ FillHelper(lengthTrackingWithOffset, 20, 1, 2);
+ assertEquals([15, 19, 19, 20, 16, 16], ReadDataFromBuffer(rab, ctor));
+ }
+})();
+
+(function FillParameterConversionResizes() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => { rab.resize(2); return 0;}};
+ assertThrows(() => { FillHelper(fixedLength, evil, 1, 2); }, TypeError);
+ rab.resize(4 * ctor.BYTES_PER_ELEMENT);
+ assertThrows(() => { FillHelper(fixedLength, 3, evil, 2); }, TypeError);
+ rab.resize(4 * ctor.BYTES_PER_ELEMENT);
+ assertThrows(() => { FillHelper(fixedLength, 3, 1, evil); }, TypeError);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc.js b/deps/v8/test/mjsunit/wasm/anyfunc.js
index 5969950433..eeab4983f5 100644
--- a/deps/v8/test/mjsunit/wasm/anyfunc.js
+++ b/deps/v8/test/mjsunit/wasm/anyfunc.js
@@ -152,7 +152,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_a_a);
builder.addFunction('main', sig_index)
- .addBody([kExprRefNull, kWasmAnyFunc, kExprLocalSet, 0, kExprLocalGet, 0])
+ .addBody([kExprRefNull, kAnyFuncCode, kExprLocalSet, 0, kExprLocalGet, 0])
.exportFunc();
const main = builder.instantiate().exports.main;
@@ -164,7 +164,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_a_v);
builder.addFunction('main', sig_index)
- .addBody([kExprRefNull, kWasmAnyFunc])
+ .addBody([kExprRefNull, kAnyFuncCode])
.exportFunc();
const main = builder.instantiate().exports.main;
@@ -176,7 +176,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_a_v);
builder.addFunction('main', sig_index)
- .addBody([kExprRefNull, kWasmAnyFunc, kExprReturn])
+ .addBody([kExprRefNull, kAnyFuncCode, kExprReturn])
.exportFunc();
const main = builder.instantiate().exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-api.js b/deps/v8/test/mjsunit/wasm/exceptions-api.js
index 0f30c6e59f..29d3de0602 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-api.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-api.js
@@ -2,58 +2,239 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-eh
+// Flags: --experimental-wasm-eh --experimental-wasm-reftypes
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestImport() {
print(arguments.callee.name);
- assertThrows(() => new WebAssembly.Exception(), TypeError,
- /Argument 0 must be an exception type/);
- assertThrows(() => new WebAssembly.Exception({}), TypeError,
- /Argument 0 must be an exception type with 'parameters'/);
- assertThrows(() => new WebAssembly.Exception({parameters: ['foo']}), TypeError,
+ assertThrows(() => new WebAssembly.Tag(), TypeError,
+ /Argument 0 must be a tag type/);
+ assertThrows(() => new WebAssembly.Tag({}), TypeError,
+ /Argument 0 must be a tag type with 'parameters'/);
+ assertThrows(() => new WebAssembly.Tag({parameters: ['foo']}), TypeError,
/Argument 0 parameter type at index #0 must be a value type/);
- assertThrows(() => new WebAssembly.Exception({parameters: {}}), TypeError,
+ assertThrows(() => new WebAssembly.Tag({parameters: {}}), TypeError,
/Argument 0 contains parameters without 'length'/);
- let js_except_i32 = new WebAssembly.Exception({parameters: ['i32']});
- let js_except_v = new WebAssembly.Exception({parameters: []});
+ let js_except_i32 = new WebAssembly.Tag({parameters: ['i32']});
+ let js_except_v = new WebAssembly.Tag({parameters: []});
let builder = new WasmModuleBuilder();
- builder.addImportedException("m", "ex", kSig_v_i);
+ builder.addImportedTag("m", "ex", kSig_v_i);
assertDoesNotThrow(() => builder.instantiate({ m: { ex: js_except_i32 }}));
assertThrows(
() => builder.instantiate({ m: { ex: js_except_v }}), WebAssembly.LinkError,
- /imported exception does not match the expected type/);
+ /imported tag does not match the expected type/);
assertThrows(
() => builder.instantiate({ m: { ex: js_except_v }}), WebAssembly.LinkError,
- /imported exception does not match the expected type/);
+ /imported tag does not match the expected type/);
+ assertTrue(js_except_i32.toString() == "[object WebAssembly.Tag]");
})();
(function TestExport() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex", kExternalException, except);
+ let except = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalTag, except);
let instance = builder.instantiate();
assertTrue(Object.prototype.hasOwnProperty.call(instance.exports, 'ex'));
assertEquals("object", typeof instance.exports.ex);
- assertInstanceof(instance.exports.ex, WebAssembly.Exception);
- assertSame(instance.exports.ex.constructor, WebAssembly.Exception);
+ assertInstanceof(instance.exports.ex, WebAssembly.Tag);
+ assertSame(instance.exports.ex.constructor, WebAssembly.Tag);
})();
(function TestImportExport() {
print(arguments.callee.name);
- let js_ex_i32 = new WebAssembly.Exception({parameters: ['i32']});
+ let js_ex_i32 = new WebAssembly.Tag({parameters: ['i32']});
let builder = new WasmModuleBuilder();
- let index = builder.addImportedException("m", "ex", kSig_v_i);
- builder.addExportOfKind("ex", kExternalException, index);
+ let index = builder.addImportedTag("m", "ex", kSig_v_i);
+ builder.addExportOfKind("ex", kExternalTag, index);
let instance = builder.instantiate({ m: { ex: js_ex_i32 }});
let res = instance.exports.ex;
assertEquals(res, js_ex_i32);
})();
+
+
+(function TestExceptionConstructor() {
+ print(arguments.callee.name);
+ // Check errors.
+ let js_tag = new WebAssembly.Tag({parameters: []});
+ assertThrows(() => new WebAssembly.Exception(0), TypeError,
+ /Argument 0 must be a WebAssembly tag/);
+ assertThrows(() => new WebAssembly.Exception({}), TypeError,
+ /Argument 0 must be a WebAssembly tag/);
+ assertThrows(() => WebAssembly.Exception(js_tag), TypeError,
+ /WebAssembly.Exception must be invoked with 'new'/);
+ let js_exception = new WebAssembly.Exception(js_tag, []);
+
+ // Check prototype.
+ assertSame(WebAssembly.Exception.prototype, js_exception.__proto__);
+ assertTrue(js_exception instanceof WebAssembly.Exception);
+
+ // Check prototype of a thrown exception.
+ let builder = new WasmModuleBuilder();
+ let wasm_tag = builder.addTag(kSig_v_v);
+ builder.addFunction("throw", kSig_v_v)
+ .addBody([kExprThrow, wasm_tag]).exportFunc();
+ let instance = builder.instantiate();
+ try {
+ instance.exports.throw();
+ } catch (e) {
+ assertTrue(e instanceof WebAssembly.Exception);
+ }
+})();
+
+(function TestExceptionConstructorWithPayload() {
+ print(arguments.callee.name);
+ let tag = new WebAssembly.Tag(
+ {parameters: ['i32', 'f32', 'i64', 'f64', 'externref']});
+ assertThrows(() => new WebAssembly.Exception(
+ tag, [1n, 2, 3n, 4, {}]), TypeError);
+ assertDoesNotThrow(() => new WebAssembly.Exception(tag, [3, 4, 5n, 6, {}]));
+})();
+
+(function TestCatchJSException() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let js_tag = new WebAssembly.Tag({parameters: []});
+ let js_func_index = builder.addImport('m', 'js_func', kSig_v_v);
+ let js_tag_index = builder.addImportedTag("m", "js_tag", kSig_v_v);
+ let tag_index = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("wasm_tag", kExternalTag, tag_index);
+ builder.addFunction("catch", kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprCallFunction, js_func_index,
+ kExprI32Const, 0,
+ kExprCatch, js_tag_index,
+ kExprI32Const, 1,
+ kExprCatch, tag_index,
+ kExprI32Const, 2,
+ kExprEnd
+ ]).exportFunc();
+ let tag;
+ function js_func() {
+ throw new WebAssembly.Exception(tag, []);
+ }
+ let instance = builder.instantiate({m: {js_func, js_tag}});
+ tag = js_tag;
+ assertEquals(1, instance.exports.catch());
+ tag = instance.exports.wasm_tag;
+ assertEquals(2, instance.exports.catch());
+})();
+
+function TestCatchJS(types_str, types, values) {
+ // Create a JS exception, catch it in wasm and check the unpacked value(s).
+ let builder = new WasmModuleBuilder();
+ let js_tag = new WebAssembly.Tag({parameters: types_str});
+ let js_func_index = builder.addImport('m', 'js_func', kSig_v_v);
+ let sig1 = makeSig(types, []);
+ let sig2 = makeSig([], types);
+ let js_tag_index = builder.addImportedTag("m", "js_tag", sig1);
+ let tag_index = builder.addTag(sig1);
+ let return_type = builder.addType(sig2);
+ builder.addExportOfKind("wasm_tag", kExternalTag, tag_index);
+ builder.addFunction("catch", sig2)
+ .addBody([
+ kExprTry, return_type,
+ kExprCallFunction, js_func_index,
+ kExprUnreachable,
+ kExprCatch, js_tag_index,
+ kExprCatch, tag_index,
+ kExprEnd
+ ]).exportFunc();
+ let exception;
+ function js_func() {
+ throw exception;
+ }
+ let expected = values.length == 1 ? values[0] : values;
+ let instance = builder.instantiate({m: {js_func, js_tag}});
+ exception = new WebAssembly.Exception(js_tag, values);
+ assertEquals(expected, instance.exports.catch());
+ exception = new WebAssembly.Exception(instance.exports.wasm_tag, values);
+ assertEquals(expected, instance.exports.catch());
+}
+
+(function TestCatchJSExceptionWithPayload() {
+ print(arguments.callee.name);
+ TestCatchJS(['i32'], [kWasmI32], [1]);
+ TestCatchJS(['i64'], [kWasmI64], [2n]);
+ TestCatchJS(['f32'], [kWasmF32], [3]);
+ TestCatchJS(['f64'], [kWasmF64], [4]);
+ TestCatchJS(['externref'], [kWasmExternRef], [{value: 5}]);
+ TestCatchJS(['i32', 'i64', 'f32', 'f64', 'externref'],
+ [kWasmI32, kWasmI64, kWasmF32, kWasmF64, kWasmExternRef],
+ [6, 7n, 8, 9, {value: 10}]);
+})();
+
+function TestGetArgHelper(types_str, types, values) {
+ let tag = new WebAssembly.Tag({parameters: types_str});
+ let exception = new WebAssembly.Exception(tag, values);
+ for (i = 0; i < types.length; ++i) {
+ assertEquals(exception.getArg(tag, i), values[i]);
+ }
+
+ let builder = new WasmModuleBuilder();
+ let sig = makeSig(types, []);
+ let tag_index = builder.addImportedTag("m", "t", sig);
+ let body = [];
+ for (i = 0; i < types.length; ++i) {
+ body.push(kExprLocalGet, i);
+ }
+ body.push(kExprThrow, tag_index);
+ builder.addFunction("throw", sig)
+ .addBody(body).exportFunc();
+ let instance = builder.instantiate({'m': {'t': tag}});
+ try {
+ instance.exports.throw(...values);
+ } catch (e) {
+ for (i = 0; i < types.length; ++i) {
+ assertEquals(e.getArg(tag, i), values[i]);
+ }
+ }
+}
+
+(function TestGetArg() {
+ // Check errors.
+ let tag = new WebAssembly.Tag({parameters: ['i32']});
+ let exception = new WebAssembly.Exception(tag, [0]);
+ assertThrows(() => exception.getArg(0, 0), TypeError,
+ /Argument 0 must be a WebAssembly.Tag/);
+ assertThrows(() => exception.getArg({}, 0), TypeError,
+ /Argument 0 must be a WebAssembly.Tag/);
+ assertThrows(() => exception.getArg(tag, undefined), TypeError,
+ /Index must be convertible to a valid number/);
+ assertThrows(() => exception.getArg(tag, 0xFFFFFFFF), RangeError,
+ /Index out of range/);
+ let wrong_tag = new WebAssembly.Tag({parameters: ['i32']});
+ assertThrows(() => exception.getArg(wrong_tag, 0), TypeError,
+ /First argument does not match the exception tag/);
+
+ // Check decoding.
+ TestGetArgHelper(['i32'], [kWasmI32], [1]);
+ TestGetArgHelper(['i64'], [kWasmI64], [2n]);
+ TestGetArgHelper(['f32'], [kWasmF32], [3]);
+ TestGetArgHelper(['f64'], [kWasmF64], [4]);
+ TestGetArgHelper(['externref'], [kWasmExternRef], [{val: 5}]);
+ TestGetArgHelper(['i32', 'i64', 'f32', 'f64', 'externref'], [kWasmI32, kWasmI64, kWasmF32, kWasmF64, kWasmExternRef], [5, 6n, 7, 8, {val: 9}]);
+})();
+
+(function TestExceptionIs() {
+ print(arguments.callee.name);
+ let tag1 = new WebAssembly.Tag({parameters: []});
+ let tag2 = new WebAssembly.Tag({parameters: []});
+ assertThrows(() => new WebAssembly.Exception({}, []), TypeError,
+ /Argument 0 must be a WebAssembly tag/);
+
+ let exception = new WebAssembly.Exception(tag1, []);
+ assertTrue(exception.is(tag1));
+ assertFalse(exception.is(tag2));
+
+ assertThrows(() => exception.is.apply({}, tag1), TypeError,
+ /Expected a WebAssembly.Exception object/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-export.js b/deps/v8/test/mjsunit/wasm/exceptions-export.js
index 823688bdee..1bd2cc0602 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-export.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-export.js
@@ -9,11 +9,11 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
(function TestExportMultiple() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_i);
- builder.addExportOfKind("ex1a", kExternalException, except1);
- builder.addExportOfKind("ex1b", kExternalException, except1);
- builder.addExportOfKind("ex2", kExternalException, except2);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_i);
+ builder.addExportOfKind("ex1a", kExternalTag, except1);
+ builder.addExportOfKind("ex1b", kExternalTag, except1);
+ builder.addExportOfKind("ex2", kExternalTag, except2);
let instance = builder.instantiate();
assertTrue(Object.prototype.hasOwnProperty.call(instance.exports, 'ex1a'));
@@ -26,32 +26,32 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
(function TestExportOutOfBounds() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex_oob", kExternalException, except + 1);
+ let except = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex_oob", kExternalTag, except + 1);
assertThrows(
() => builder.instantiate(), WebAssembly.CompileError,
- 'WebAssembly.Module(): exception index 1 out of bounds (1 entry) @+30');
+ 'WebAssembly.Module(): tag index 1 out of bounds (1 entry) @+30');
})();
(function TestExportSameNameTwice() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex", kExternalException, except);
- builder.addExportOfKind("ex", kExternalException, except);
+ let except = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalTag, except);
+ builder.addExportOfKind("ex", kExternalTag, except);
assertThrows(
() => builder.instantiate(), WebAssembly.CompileError,
'WebAssembly.Module(): Duplicate export name \'ex\' ' +
- 'for exception 0 and exception 0 @+28');
+ 'for tag 0 and tag 0 @+28');
})();
(function TestExportModuleGetExports() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex", kExternalException, except);
+ let except = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalTag, except);
let module = new WebAssembly.Module(builder.toBuffer());
let exports = WebAssembly.Module.exports(module);
- assertArrayEquals([{ name: "ex", kind: "exception" }], exports);
+ assertArrayEquals([{ name: "ex", kind: "tag" }], exports);
})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-externref.js b/deps/v8/test/mjsunit/wasm/exceptions-externref.js
index 6bc5ffd71b..c0505599b9 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-externref.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-externref.js
@@ -11,10 +11,10 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowRefNull() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_r);
+ let except = builder.addTag(kSig_v_r);
builder.addFunction("throw_null", kSig_v_v)
.addBody([
- kExprRefNull, kWasmExternRef,
+ kExprRefNull, kExternRefCode,
kExprThrow, except,
]).exportFunc();
let instance = builder.instantiate();
@@ -26,14 +26,14 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowCatchRefNull() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_r);
+ let except = builder.addTag(kSig_v_r);
builder.addFunction("throw_catch_null", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
kExprIf, kWasmI32,
- kExprRefNull, kWasmExternRef,
+ kExprRefNull, kExternRefCode,
kExprThrow, except,
kExprElse,
kExprI32Const, 42,
@@ -57,7 +57,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowRefParam() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_r);
+ let except = builder.addTag(kSig_v_r);
builder.addFunction("throw_param", kSig_v_r)
.addBody([
kExprLocalGet, 0,
@@ -76,10 +76,10 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowCatchRefParam() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_r);
+ let except = builder.addTag(kSig_v_r);
builder.addFunction("throw_catch_param", kSig_r_r)
.addBody([
- kExprTry, kWasmExternRef,
+ kExprTry, kExternRefCode,
kExprLocalGet, 0,
kExprThrow, except,
kExprCatch, except,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-import.js b/deps/v8/test/mjsunit/wasm/exceptions-import.js
index 500a9168de..8297c90ce7 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-import.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-import.js
@@ -9,32 +9,32 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// Helper function to return a new exported exception with the {kSig_v_v} type
// signature from an anonymous module. The underlying module is thrown away.
// This allows tests to reason solely about importing exceptions.
-function NewExportedException() {
+function NewExportedTag() {
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex", kExternalException, except);
+ let tag = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("t", kExternalTag, tag);
let instance = builder.instantiate();
- return instance.exports.ex;
+ return instance.exports.t;
}
(function TestImportSimple() {
print(arguments.callee.name);
- let exported = NewExportedException();
+ let exported = NewExportedTag();
let builder = new WasmModuleBuilder();
- let except = builder.addImportedException("m", "ex", kSig_v_v);
+ let except = builder.addImportedTag("m", "ex", kSig_v_v);
assertDoesNotThrow(() => builder.instantiate({ m: { ex: exported }}));
})();
(function TestImportMultiple() {
print(arguments.callee.name);
- let exported = NewExportedException();
+ let exported = NewExportedTag();
let builder = new WasmModuleBuilder();
- let except1 = builder.addImportedException("m", "ex1", kSig_v_v);
- let except2 = builder.addImportedException("m", "ex2", kSig_v_v);
- let except3 = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex2", kExternalException, except2);
- builder.addExportOfKind("ex3", kExternalException, except3);
+ let except1 = builder.addImportedTag("m", "ex1", kSig_v_v);
+ let except2 = builder.addImportedTag("m", "ex2", kSig_v_v);
+ let except3 = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex2", kExternalTag, except2);
+ builder.addExportOfKind("ex3", kExternalTag, except3);
let instance = builder.instantiate({ m: { ex1: exported, ex2: exported }});
assertTrue(except1 < except3 && except2 < except3);
@@ -46,50 +46,50 @@ function NewExportedException() {
(function TestImportMissing() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addImportedException("m", "ex", kSig_v_v);
+ let except = builder.addImportedTag("m", "ex", kSig_v_v);
assertThrows(
() => builder.instantiate({}), TypeError,
/module is not an object or function/);
assertThrows(
() => builder.instantiate({ m: {}}), WebAssembly.LinkError,
- /exception import requires a WebAssembly.Exception/);
+ /tag import requires a WebAssembly.Tag/);
})();
(function TestImportValueMismatch() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addImportedException("m", "ex", kSig_v_v);
+ let except = builder.addImportedTag("m", "ex", kSig_v_v);
assertThrows(
() => builder.instantiate({ m: { ex: 23 }}), WebAssembly.LinkError,
- /exception import requires a WebAssembly.Exception/);
+ /tag import requires a WebAssembly.Tag/);
assertThrows(
() => builder.instantiate({ m: { ex: {} }}), WebAssembly.LinkError,
- /exception import requires a WebAssembly.Exception/);
- var monkey = Object.create(NewExportedException());
+ /tag import requires a WebAssembly.Tag/);
+ var monkey = Object.create(NewExportedTag());
assertThrows(
() => builder.instantiate({ m: { ex: monkey }}), WebAssembly.LinkError,
- /exception import requires a WebAssembly.Exception/);
+ /tag import requires a WebAssembly.Tag/);
})();
(function TestImportSignatureMismatch() {
print(arguments.callee.name);
- let exported = NewExportedException();
+ let exported = NewExportedTag();
let builder = new WasmModuleBuilder();
- let except = builder.addImportedException("m", "ex", kSig_v_i);
+ let except = builder.addImportedTag("m", "ex", kSig_v_i);
assertThrows(
() => builder.instantiate({ m: { ex: exported }}), WebAssembly.LinkError,
- /imported exception does not match the expected type/);
+ /imported tag does not match the expected type/);
})();
(function TestImportModuleGetImports() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addImportedException("m", "ex", kSig_v_v);
+ let except = builder.addImportedTag("m", "ex", kSig_v_v);
let module = new WebAssembly.Module(builder.toBuffer());
let imports = WebAssembly.Module.imports(module);
- assertArrayEquals([{ module: "m", name: "ex", kind: "exception" }], imports);
+ assertArrayEquals([{ module: "m", name: "ex", kind: "tag" }], imports);
})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
index 7920ac73f8..13421a4cf1 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
@@ -11,7 +11,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestRethrowInCatch() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
kExprTry, kWasmVoid,
@@ -44,7 +44,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestRethrowInCatchAll() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
kExprTry, kWasmVoid,
@@ -78,8 +78,8 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestRethrowNested() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
builder.addFunction("rethrow_nested", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
@@ -116,7 +116,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestRethrowRecatch() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("rethrow_recatch", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-shared.js b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
index 4c2f8a7fde..3bf2883000 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-shared.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
@@ -10,8 +10,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// signature from an anonymous module. The underlying module is thrown away.
function NewExportedException() {
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex", kExternalException, except);
+ let except = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalTag, except);
let instance = builder.instantiate();
return instance.exports.ex;
}
@@ -23,7 +23,7 @@ function NewExportedException() {
let builder = new WasmModuleBuilder();
let sig_index = builder.addType(kSig_v_v);
let fun = builder.addImport("m", "f", sig_index);
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("throw", kSig_v_v)
.addBody([
kExprThrow, except
@@ -38,7 +38,7 @@ function NewExportedException() {
let ex_obj = new Error("my exception");
let instance = builder.instantiate({ m: { f: function() { throw ex_obj }}});
- assertThrows(() => instance.exports.throw(), WebAssembly.RuntimeError);
+ assertThrows(() => instance.exports.throw(), WebAssembly.Exception);
assertThrowsEquals(() => instance.exports.catch(), ex_obj);
try {
instance.exports.throw();
@@ -55,7 +55,7 @@ function NewExportedException() {
let builder = new WasmModuleBuilder();
let sig_index = builder.addType(kSig_v_v);
let fun = builder.addImport("m", "f", sig_index);
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("throw", kSig_v_v)
.addBody([
kExprThrow, except
@@ -71,7 +71,7 @@ function NewExportedException() {
let instance1 = builder.instantiate({ m: { f: assertUnreachable }});
let instance2 = builder.instantiate({ m: { f: function() { throw ex_obj }}});
- assertThrows(() => instance1.exports.throw(), WebAssembly.RuntimeError);
+ assertThrows(() => instance1.exports.throw(), WebAssembly.Exception);
assertThrowsEquals(() => instance2.exports.catch(), ex_obj);
try {
instance1.exports.throw();
@@ -88,9 +88,9 @@ function NewExportedException() {
let builder = new WasmModuleBuilder();
let sig_index = builder.addType(kSig_v_v);
let fun = builder.addImport("m", "f", sig_index);
- let except1 = builder.addImportedException("m", "ex1", kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
- builder.addExportOfKind("ex2", kExternalException, except2);
+ let except1 = builder.addImportedTag("m", "ex1", kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex2", kExternalTag, except2);
builder.addFunction("throw", kSig_v_v)
.addBody([
kExprThrow, except2
@@ -108,7 +108,7 @@ function NewExportedException() {
let instance2 = builder.instantiate({ m: { f: function() { throw ex_obj },
ex1: instance1.exports.ex2 }});
- assertThrows(() => instance1.exports.throw(), WebAssembly.RuntimeError);
+ assertThrows(() => instance1.exports.throw(), WebAssembly.Exception);
assertThrowsEquals(() => instance2.exports.catch(), ex_obj);
try {
instance1.exports.throw();
@@ -123,9 +123,9 @@ function NewExportedException() {
(function TestMultiModuleShared() {
print(arguments.callee.name);
let builder1 = new WasmModuleBuilder();
- let except1 = builder1.addException(kSig_v_v);
- let except2 = builder1.addException(kSig_v_v);
- builder1.addExportOfKind("ex", kExternalException, except2);
+ let except1 = builder1.addTag(kSig_v_v);
+ let except2 = builder1.addTag(kSig_v_v);
+ builder1.addExportOfKind("ex", kExternalTag, except2);
builder1.addFunction("throw", kSig_v_v)
.addBody([
kExprThrow, except2
@@ -133,7 +133,7 @@ function NewExportedException() {
let builder2 = new WasmModuleBuilder();
let sig_index = builder2.addType(kSig_v_v);
let fun = builder2.addImport("m", "f", sig_index);
- let except = builder2.addImportedException("m", "ex", kSig_v_v);
+ let except = builder2.addImportedTag("m", "ex", kSig_v_v);
builder2.addFunction("catch", kSig_v_v)
.addBody([
kExprTry, kWasmVoid,
@@ -146,7 +146,7 @@ function NewExportedException() {
let instance2 = builder2.instantiate({ m: { f: function() { throw ex_obj },
ex: instance1.exports.ex }});
- assertThrows(() => instance1.exports.throw(), WebAssembly.RuntimeError);
+ assertThrows(() => instance1.exports.throw(), WebAssembly.Exception);
assertThrowsEquals(() => instance2.exports.catch(), ex_obj);
try {
instance1.exports.throw();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd.js b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
index ace3322480..ed6b287ddd 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-simd.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
@@ -11,7 +11,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var kSig_v_s = makeSig([kWasmS128], []);
- var except = builder.addException(kSig_v_s);
+ var except = builder.addTag(kSig_v_s);
builder.addFunction("throw_simd", kSig_v_v)
.addLocals(kWasmS128, 1)
.addBody([
@@ -29,7 +29,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var kSig_v_s = makeSig([kWasmS128], []);
- var except = builder.addException(kSig_v_s);
+ var except = builder.addTag(kSig_v_s);
builder.addFunction("throw_catch_simd", kSig_i_v)
.addLocals(kWasmS128, 1)
.addBody([
@@ -52,7 +52,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var kSig_v_s = makeSig([kWasmS128], []);
- var except = builder.addException(kSig_v_s);
+ var except = builder.addTag(kSig_v_s);
const in_idx = 0x10; // Input index in memory.
const out_idx = 0x20; // Output index in memory.
builder.addImportedMemory("env", "memory");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-type-reflection.js b/deps/v8/test/mjsunit/wasm/exceptions-type-reflection.js
new file mode 100644
index 0000000000..a17d30ed10
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-type-reflection.js
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh --experimental-wasm-type-reflection
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+let testcases = [
+{types: {parameters:[]}, sig: kSig_v_v},
+{types: {parameters:["i32"]}, sig: kSig_v_i},
+{types: {parameters:["i64"]}, sig: kSig_v_l},
+{types: {parameters:["f64", "f64", "i32"]}, sig: kSig_v_ddi},
+{types: {parameters:["f32"]}, sig: kSig_v_f},
+];
+
+(function TestExport() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ testcases.forEach(function(expected, i) {
+ let except = builder.addTag(expected.sig);
+ builder.addExportOfKind("ex" + i, kExternalTag, except);
+ });
+
+ let instance = builder.instantiate();
+ testcases.forEach(function(expected, i) {
+ assertEquals(instance.exports["ex" + i].type(), expected.types);
+ });
+})();
+
+(function TestImportExport() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let imports = {m: {}};
+
+ testcases.forEach(function(expected, i) {
+ let t = new WebAssembly.Tag(expected.types);
+ let index = builder.addImportedTag("m", "ex" + i, expected.sig);
+ builder.addExportOfKind("ex" + i, kExternalTag, index);
+ imports.m["ex" + i] = t;
+ });
+
+ let instance = builder.instantiate(imports);
+ testcases.forEach(function(expected, i) {
+ assertEquals(instance.exports["ex" + i].type(), expected.types);
+ })
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-utils.js b/deps/v8/test/mjsunit/wasm/exceptions-utils.js
index 344ca64da1..218f87f33e 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-utils.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-utils.js
@@ -15,8 +15,8 @@ function assertWasmThrows(instance, runtime_id, values, code) {
eval(code);
}
} catch (e) {
- assertInstanceof(e, WebAssembly.RuntimeError);
- var e_runtime_id = %GetWasmExceptionId(e, instance);
+ assertInstanceof(e, WebAssembly.Exception);
+ var e_runtime_id = %GetWasmExceptionTagId(e, instance);
assertTrue(Number.isInteger(e_runtime_id));
assertEquals(e_runtime_id, runtime_id);
var e_values = %GetWasmExceptionValues(e);
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index 578fa09e25..db4d6ed9bb 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -11,7 +11,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowSimple() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("throw_if_param_not_zero", kSig_i_i)
.addBody([
kExprLocalGet, 0,
@@ -33,7 +33,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestCatchEmptyBlocks() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("catch_empty_try", kSig_v_v)
.addBody([
kExprTry, kWasmVoid,
@@ -49,7 +49,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestCatchSimple() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction("simple_throw_catch_to_0_1", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
@@ -173,7 +173,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestExnWithWasmProtoNotCaught() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
let imp = builder.addImport('imp', 'ort', kSig_v_v);
let throw_fn = builder.addFunction('throw', kSig_v_v)
.addBody([kExprThrow, except])
@@ -212,7 +212,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
}
assertTrue(!!caught, 'should have trapped');
assertEquals(caught, wrapped_exn);
- assertInstanceof(caught.__proto__, WebAssembly.RuntimeError);
+ assertInstanceof(caught.__proto__, WebAssembly.Exception);
})();
(function TestStackOverflowNotCaught() {
@@ -241,9 +241,9 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestCatchComplex1() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
- let except3 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
+ let except3 = builder.addTag(kSig_v_v);
builder.addFunction("catch_complex", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
@@ -282,9 +282,9 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestCatchComplex2() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
- let except3 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
+ let except3 = builder.addTag(kSig_v_v);
builder.addFunction("catch_complex", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
@@ -320,7 +320,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowMultipleValues() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_ii);
+ let except = builder.addTag(kSig_v_ii);
builder.addFunction("throw_1_2", kSig_v_v)
.addBody([
kExprI32Const, 1,
@@ -336,7 +336,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowCatchParamI() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_i);
+ let except = builder.addTag(kSig_v_i);
builder.addFunction("throw_catch_param", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
@@ -358,7 +358,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowParamI() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_i);
+ let except = builder.addTag(kSig_v_i);
builder.addFunction("throw_param", kSig_v_i)
.addBody([
kExprLocalGet, 0,
@@ -374,7 +374,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowCatchParamF() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_f);
+ let except = builder.addTag(kSig_v_f);
builder.addFunction("throw_catch_param", kSig_f_f)
.addBody([
kExprTry, kWasmF32,
@@ -395,7 +395,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowParamF() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_f);
+ let except = builder.addTag(kSig_v_f);
builder.addFunction("throw_param", kSig_v_f)
.addBody([
kExprLocalGet, 0,
@@ -411,7 +411,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowCatchParamL() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_l);
+ let except = builder.addTag(kSig_v_l);
builder.addFunction("throw_catch_param", kSig_i_i)
.addLocals(kWasmI64, 1)
.addBody([
@@ -443,7 +443,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowParamL() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_l);
+ let except = builder.addTag(kSig_v_l);
builder.addFunction("throw_param", kSig_v_ii)
.addBody([
kExprLocalGet, 0,
@@ -465,7 +465,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowCatchParamD() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_d);
+ let except = builder.addTag(kSig_v_d);
builder.addFunction("throw_catch_param", kSig_d_d)
.addBody([
kExprTry, kWasmF64,
@@ -486,7 +486,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowParamD() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_d);
+ let except = builder.addTag(kSig_v_d);
builder.addFunction("throw_param", kSig_v_f)
.addBody([
kExprLocalGet, 0,
@@ -503,7 +503,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowParamComputed() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_i);
+ let except = builder.addTag(kSig_v_i);
builder.addFunction("throw_expr_with_params", kSig_v_ddi)
.addBody([
// p2 * (p0 + min(p0, p1))|0 - 20
@@ -530,7 +530,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestCatchCrossFunctions() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_i);
+ let except = builder.addTag(kSig_v_i);
// Helper function for throwing from JS. It is imported by the Wasm module
// as throw_i.
@@ -816,7 +816,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestDelegateNoThrow() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
builder.addFunction('test', kSig_i_v)
.addBody([
kExprTry, kWasmI32,
@@ -835,7 +835,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestDelegateThrow() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
let throw_if = builder.addFunction('throw', kSig_v_i)
.addBody([
kExprLocalGet, 0,
@@ -863,8 +863,8 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestDelegateThrowNoCatch() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
let throw_fn = builder.addFunction('throw', kSig_v_v)
.addBody([kExprThrow, except1])
.exportFunc();
@@ -883,15 +883,15 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
kExprEnd,
]).exportFunc();
instance = builder.instantiate();
- assertTraps(WebAssembly.RuntimeError, instance.exports.test);
+ assertThrows(instance.exports.test, WebAssembly.Exception);
})();
// Check that the exception is merged properly when both scopes can throw.
(function TestDelegateMerge() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
// throw_fn: 0 -> returns
// 1 -> throw except1
// 2 -> throw except2
@@ -925,9 +925,9 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
]).exportFunc();
instance = builder.instantiate();
assertEquals(2, instance.exports.test(1, 0));
- assertTraps(WebAssembly.RuntimeError, () => instance.exports.test(2, 0));
+ assertThrows(() => instance.exports.test(2, 0), WebAssembly.Exception);
assertEquals(2, instance.exports.test(0, 1));
- assertTraps(WebAssembly.RuntimeError, () => instance.exports.test(0, 2));
+ assertThrows(() => instance.exports.test(0, 2), WebAssembly.Exception);
assertEquals(1, instance.exports.test(0, 0));
})();
@@ -935,7 +935,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestDelegate1() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
let throw_fn = builder.addFunction('throw', kSig_v_v)
.addBody([kExprThrow, except])
.exportFunc();
@@ -961,8 +961,8 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestDelegateUnreachable() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
builder.addFunction('test', kSig_i_v)
.addBody([
kExprTry, kWasmI32,
@@ -983,7 +983,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestDelegateToCaller() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction('test', kSig_v_v)
.addBody([
kExprTry, kWasmVoid,
@@ -994,13 +994,13 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
kExprEnd
]).exportFunc();
instance = builder.instantiate();
- assertTraps(WebAssembly.RuntimeError, () => instance.exports.test());
+ assertThrows(() => instance.exports.test(), WebAssembly.Exception);
})();
(function TestThrowBeforeUnreachable() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction('throw_before_unreachable', kSig_i_v)
.addBody([
kExprTry, kWasmI32,
@@ -1018,7 +1018,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestUnreachableInCatchAll() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction('throw_before_unreachable', kSig_i_v)
.addBody([
kExprTry, kWasmI32,
@@ -1035,7 +1035,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowWithLocal() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction('throw_with_local', kSig_i_v)
.addLocals(kWasmI32, 4)
.addBody([
@@ -1058,7 +1058,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestCatchlessTry() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
builder.addFunction('catchless_try', kSig_v_i)
.addBody([
kExprTry, kWasmVoid,
diff --git a/deps/v8/test/mjsunit/wasm/externref.js b/deps/v8/test/mjsunit/wasm/externref.js
index 7c75a2b8c0..43192a7ef7 100644
--- a/deps/v8/test/mjsunit/wasm/externref.js
+++ b/deps/v8/test/mjsunit/wasm/externref.js
@@ -165,7 +165,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_r_v)
- .addBody([kExprRefNull, kWasmExternRef])
+ .addBody([kExprRefNull, kExternRefCode])
.exportFunc();
const instance = builder.instantiate();
@@ -196,7 +196,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_v)
- .addBody([kExprRefNull, kWasmExternRef, kExprRefIsNull])
+ .addBody([kExprRefNull, kExternRefCode, kExprRefIsNull])
.exportFunc();
const instance = builder.instantiate();
@@ -225,7 +225,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_r_v);
builder.addFunction('main', sig_index)
- .addBody([kExprRefNull, kWasmExternRef])
+ .addBody([kExprRefNull, kExternRefCode])
.exportFunc();
const main = builder.instantiate().exports.main;
@@ -237,7 +237,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_r_v);
builder.addFunction('main', sig_index)
- .addBody([kExprRefNull, kWasmExternRef, kExprReturn])
+ .addBody([kExprRefNull, kExternRefCode, kExprReturn])
.exportFunc();
const main = builder.instantiate().exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/gc-nominal.js b/deps/v8/test/mjsunit/wasm/gc-nominal.js
new file mode 100644
index 0000000000..0483b4a78f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-nominal.js
@@ -0,0 +1,31 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc-experiments
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+let struct1 = builder.addStruct([makeField(kWasmI32, true)]);
+let struct2 = builder.addStructExtending(
+ [makeField(kWasmI32, true), makeField(kWasmI32, true)], struct1);
+
+let array1 = builder.addArray(kWasmI32, true);
+let array2 = builder.addArrayExtending(kWasmI32, true, array1);
+
+builder.addFunction("main", kSig_v_v)
+ .addLocals(wasmOptRefType(struct1), 1)
+ .addLocals(wasmOptRefType(array1), 1)
+ .addBody([
+ kGCPrefix, kExprRttCanon, struct2,
+ kGCPrefix, kExprStructNewDefault, struct2,
+ kExprLocalSet, 0,
+ kExprI32Const, 10, // length
+ kGCPrefix, kExprRttCanon, array2,
+ kGCPrefix, kExprArrayNewDefault, array2,
+ kExprLocalSet, 1
+ ]);
+
+// This test is only interested in type checking.
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/load-elimination.js b/deps/v8/test/mjsunit/wasm/load-elimination.js
new file mode 100644
index 0000000000..8ca04ed040
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/load-elimination.js
@@ -0,0 +1,319 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+// This tests are meant to examine if Turbofan CsaLoadElimination works
+// correctly for wasm. The TurboFan graphs can be examined with --trace-turbo.
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Fresh objects, known offsets
+(function LoadEliminationtFreshKnownTest() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true),
+ makeField(kWasmI32, true)]);
+
+ builder.addFunction("main", makeSig([kWasmI32], [kWasmI32]))
+ .addLocals(wasmOptRefType(struct), 1)
+ .addBody([
+ kExprI32Const, 10, // local1 = struct(10, 100);
+ kExprI32Const, 100,
+ kGCPrefix, kExprRttCanon, struct,
+ kGCPrefix, kExprStructNewWithRtt, struct,
+ kExprLocalSet, 1,
+ kExprLocalGet, 0, // Split control based on an unknown value
+ kExprIf, kWasmI32,
+ kExprLocalGet, 1, // local1.field1 = 42
+ kExprI32Const, 42,
+ kGCPrefix, kExprStructSet, struct, 1,
+ kExprLocalGet, 1, // local1.field1
+ kGCPrefix, kExprStructGet, struct, 1,
+ kExprElse,
+ kExprLocalGet, 1, // local1.field1 = 11
+ kExprI32Const, 11,
+ kGCPrefix, kExprStructSet, struct, 1,
+ kExprLocalGet, 1, // local1.field1 = 22
+ kExprI32Const, 22,
+ kGCPrefix, kExprStructSet, struct, 1,
+ kExprLocalGet, 1, // local1.field1 + local1.field1
+ kGCPrefix, kExprStructGet, struct, 1,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprStructGet, struct, 1,
+ kExprI32Add,
+ kExprEnd,
+ kExprLocalGet, 1, // return if-result * (local1.field1 + local1.field0)
+ kGCPrefix, kExprStructGet, struct, 0,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprStructGet, struct, 1,
+ kExprI32Add,
+ kExprI32Mul
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ assertEquals(instance.exports.main(1), 42 * (42 + 10));
+ assertEquals(instance.exports.main(0), (22 + 22) * (22 + 10));
+})();
+
+(function LoadEliminationtConstantKnownTest() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ let replaced_value = 55
+ let param_1_value = 42
+ let init_value_1 = 5
+ let init_value_2 = 17
+
+ let tester = builder.addFunction("tester", makeSig(
+ [wasmRefType(struct), wasmRefType(struct)], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructGet, struct, 0,
+
+ kExprLocalGet, 0,
+ kExprI32Const, replaced_value,
+ kGCPrefix, kExprStructSet, struct, 0,
+
+ // We should eliminate this load and replace it with replaced_value
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructGet, struct, 0,
+
+ kExprLocalGet, 1,
+ kExprI32Const, param_1_value,
+ kGCPrefix, kExprStructSet, struct, 0,
+
+ // Although we could eliminate this load before, we cannot anymore,
+ // because the parameters may alias.
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructGet, struct, 0,
+
+ kExprI32Add, kExprI32Add
+ ]);
+
+ function buildStruct(value) {
+ return [kExprI32Const, value, kGCPrefix, kExprRttCanon, struct,
+ kGCPrefix, kExprStructNewWithRtt, struct];
+ }
+
+ builder.addFunction("main_non_aliasing", kSig_i_v)
+ .addBody([
+ ...buildStruct(init_value_1), ...buildStruct(init_value_2),
+ kExprCallFunction, tester.index])
+ .exportFunc();
+
+ builder.addFunction("main_aliasing", kSig_i_v)
+ .addLocals(wasmOptRefType(struct), 1)
+ .addBody([
+ ...buildStruct(init_value_1), kExprLocalSet, 0,
+ kExprLocalGet, 0, kExprRefAsNonNull,
+ kExprLocalGet, 0, kExprRefAsNonNull,
+ kExprCallFunction, tester.index])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ assertEquals(init_value_1 + replaced_value + replaced_value,
+ instance.exports.main_non_aliasing());
+ assertEquals(init_value_1 + replaced_value + param_1_value,
+ instance.exports.main_aliasing());
+})();
+
+(function LoadEliminationtArbitraryKnownTest() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ let initial_value = 19;
+ let replacing_value_1 = 55;
+ let replacing_value_2 = 37;
+
+ let id = builder.addFunction("id", makeSig([wasmOptRefType(struct)],
+ [wasmOptRefType(struct)]))
+ .addBody([kExprLocalGet, 0])
+
+ builder.addFunction("main", kSig_i_v)
+ .addLocals(wasmOptRefType(struct), 2)
+ .addBody([
+ // We store a fresh struct in local0
+ kExprI32Const, initial_value,
+ kGCPrefix, kExprRttCanon, struct,
+ kGCPrefix, kExprStructNewWithRtt, struct,
+ kExprLocalSet, 0,
+
+ // We pass it through a function and store it to local1. local1 may now
+ // alias with anything.
+ kExprLocalGet, 0, kExprCallFunction, id.index, kExprLocalSet, 1,
+
+ kExprLocalGet, 0,
+ kExprI32Const, replacing_value_1,
+ kGCPrefix, kExprStructSet, struct, 0,
+
+ // We should eliminate this load.
+ kExprLocalGet, 0, kGCPrefix, kExprStructGet, struct, 0,
+
+ kExprLocalGet, 1,
+ kExprI32Const, replacing_value_2,
+ kGCPrefix, kExprStructSet, struct, 0,
+
+ // We should not eliminate this load.
+ kExprLocalGet, 0, kGCPrefix, kExprStructGet, struct, 0,
+
+ kExprI32Add])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ assertEquals(replacing_value_1 + replacing_value_2, instance.exports.main());
+})();
+
+(function LoadEliminationtFreshUnknownTest() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI64, true);
+
+ // parameter: unknown array index
+ builder.addFunction("main", makeSig([kWasmI32], [kWasmI32]))
+ .addLocals(wasmOptRefType(array), 1)
+ .addBody([
+ kExprI32Const, 5,
+ kGCPrefix, kExprRttCanon, array,
+ kGCPrefix, kExprArrayNewDefault, array,
+ kExprLocalSet, 1,
+
+ kExprLocalGet, 1, // a[i] = i for i = {0..4}
+ kExprI32Const, 0,
+ kExprI64Const, 0,
+ kGCPrefix, kExprArraySet, array,
+
+ kExprLocalGet, 1,
+ kExprI32Const, 1,
+ kExprI64Const, 1,
+ kGCPrefix, kExprArraySet, array,
+
+ kExprLocalGet, 1,
+ kExprI32Const, 2,
+ kExprI64Const, 2,
+ kGCPrefix, kExprArraySet, array,
+
+ kExprLocalGet, 1,
+ kExprI32Const, 3,
+ kExprI64Const, 3,
+ kGCPrefix, kExprArraySet, array,
+
+ kExprLocalGet, 1,
+ kExprI32Const, 4,
+ kExprI64Const, 4,
+ kGCPrefix, kExprArraySet, array,
+
+ // Get a constant index a[4] before setting unknown indices
+ kExprLocalGet, 1,
+ kExprI32Const, 4,
+ kGCPrefix, kExprArrayGet, array,
+
+ kExprLocalGet, 1, // Set a[local0] = 33
+ kExprLocalGet, 0,
+ kExprI64Const, 33,
+ kGCPrefix, kExprArraySet, array,
+
+ kExprLocalGet, 1, // Get a[local0]
+ kExprLocalGet, 0,
+ kGCPrefix, kExprArrayGet, array,
+
+ kExprLocalGet, 1, // Known index load cannot be eliminated anymore
+ kExprI32Const, 3,
+ kGCPrefix, kExprArrayGet, array,
+
+ // A load from different unknown index a[local0 + 1] cannot be eliminated
+ kExprLocalGet, 1,
+ kExprLocalGet, 0,
+ kExprI32Const, 1,
+ kExprI32Add,
+ kGCPrefix, kExprArrayGet, array,
+
+ kExprI64Add, // return a[4] * (a[local0] - (a[3] + a[local0 + 1]))
+ kExprI64Sub,
+ kExprI64Mul,
+ kExprI32ConvertI64 // To not have to worry about BigInts in JS world
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ assertEquals(4 * (33 - (3 + 1)), instance.exports.main(0));
+ assertEquals(4 * (33 - (3 + 2)), instance.exports.main(1));
+ assertEquals(4 * (33 - (3 + 3)), instance.exports.main(2));
+ assertEquals(4 * (33 - (33 + 4)), instance.exports.main(3));
+})();
+
+(function LoadEliminationtAllBetsAreOffTest() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let array = builder.addArray(kWasmI32, true);
+
+ let value_0 = 19;
+ let value_1 = 55;
+ let value_2 = 2;
+
+ let id = builder.addFunction("id", makeSig([wasmOptRefType(array)],
+ [wasmOptRefType(array)]))
+ .addBody([kExprLocalGet, 0])
+
+ // parameters: array, index
+ let tester = builder.addFunction("tester",
+ makeSig([wasmRefType(array), kWasmI32], [kWasmI32]))
+ .addLocals(wasmOptRefType(struct), 1)
+ .addLocals(wasmOptRefType(array), 1)
+ .addBody([
+ // We store a fresh struct in local1
+ kExprI32Const, 0,
+ kGCPrefix, kExprRttCanon, struct,
+ kGCPrefix, kExprStructNewWithRtt, struct,
+ kExprLocalSet, 2,
+
+ // We pass the array parameter through a function and store it to local2.
+ kExprLocalGet, 0, kExprCallFunction, id.index, kExprLocalSet, 3,
+
+ // Set the parameter array, the fresh struct, then the arbitrary array to
+ // an unknown offset.
+ kExprLocalGet, 0,
+ kExprI32Const, 5,
+ kExprI32Const, value_0,
+ kGCPrefix, kExprArraySet, array,
+
+ kExprLocalGet, 2,
+ kExprI32Const, value_1,
+ kGCPrefix, kExprStructSet, struct, 0,
+
+ kExprLocalGet, 3,
+ kExprLocalGet, 1,
+ kExprI32Const, value_2,
+ kGCPrefix, kExprArraySet, array,
+
+ // Neither load can be eliminated.
+ kExprLocalGet, 0,
+ kExprI32Const, 5,
+ kGCPrefix, kExprArrayGet, array,
+
+ kExprLocalGet, 2,
+ kGCPrefix, kExprStructGet, struct, 0,
+
+ kExprI32Add]);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprI32Const, 10, kGCPrefix, kExprRttCanon, array,
+ kGCPrefix, kExprArrayNewDefault, array,
+ kExprI32Const, 7,
+ kExprCallFunction, tester.index,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ assertEquals(value_0 + value_1, instance.exports.main());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/loop-unrolling.js b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
index a20701f381..881a4567cc 100644
--- a/deps/v8/test/mjsunit/wasm/loop-unrolling.js
+++ b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
@@ -93,8 +93,8 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestRethrowNested() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
- let except2 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
+ let except2 = builder.addTag(kSig_v_v);
builder.addFunction("rethrow_nested", kSig_i_i)
.addBody([
kExprLoop, kWasmI32,
@@ -139,7 +139,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrow() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
builder.addFunction("throw", kSig_i_i)
.addBody([
kExprLoop, kWasmVoid,
@@ -167,7 +167,7 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
(function TestThrowCatch() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except1 = builder.addException(kSig_v_v);
+ let except1 = builder.addTag(kSig_v_v);
builder.addFunction("throw_catch", kSig_i_i)
.addBody([
kExprLoop, kWasmI32,
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index f0328274e8..5547c9c62f 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -23,7 +23,7 @@ function verifyStack(frames, expected) {
assertContains(exp[4], frames[i].getFileName(), "["+i+"].getFileName()");
var toString;
if (exp[0]) {
- toString = "<anonymous>:wasm-function[" + exp[6] + "]:" + exp[5];
+ toString = exp[4] + ":wasm-function[" + exp[6] + "]:" + exp[5];
if (exp[1] !== null) toString = exp[1] + " (" + toString + ")";
} else {
toString = exp[4] + ":" + exp[2] + ":";
@@ -68,10 +68,10 @@ var module = builder.instantiate({mod: {func: STACK}});
(function testSimpleStack() {
var expected_string = 'Error\n' +
// The line numbers below will change as this test gains / loses lines..
- ' at STACK (stack.js:38:11)\n' + // --
- ' at main (<anonymous>:wasm-function[1]:0x72)\n' + // --
- ' at testSimpleStack (stack.js:76:18)\n' + // --
- ' at stack.js:78:3'; // --
+ ' at STACK (stack.js:38:11)\n' + // --
+ ' at main (wasm://wasm/862e1cf6:wasm-function[1]:0x72)\n' + // --
+ ' at testSimpleStack (stack.js:76:18)\n' + // --
+ ' at stack.js:78:3'; // --
module.exports.main();
assertEquals(expected_string, stripPath(stack));
diff --git a/deps/v8/test/mjsunit/wasm/test-partial-serialization.js b/deps/v8/test/mjsunit/wasm/test-partial-serialization.js
new file mode 100644
index 0000000000..150c5c8e69
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/test-partial-serialization.js
@@ -0,0 +1,56 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --liftoff --no-wasm-tier-up --expose-gc
+// Compile functions 0 and 2 with Turbofan, the rest with Liftoff:
+// Flags: --wasm-tier-mask-for-testing=5
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const num_functions = 5;
+
+function create_builder() {
+ const builder = new WasmModuleBuilder();
+ for (let i = 0; i < num_functions; ++i) {
+ builder.addFunction('f' + i, kSig_i_v)
+ .addBody(wasmI32Const(i))
+ .exportFunc();
+ }
+ return builder;
+}
+
+function check(instance) {
+ for (let i = 0; i < num_functions; ++i) {
+ const expect_liftoff = i != 0 && i != 2;
+ assertEquals(
+ expect_liftoff, %IsLiftoffFunction(instance.exports['f' + i]),
+ 'function ' + i);
+ }
+}
+
+const wire_bytes = create_builder().toBuffer();
+
+function testTierTestingFlag() {
+ print(arguments.callee.name);
+ const module = new WebAssembly.Module(wire_bytes);
+ const buff = %SerializeWasmModule(module);
+ const instance = new WebAssembly.Instance(module);
+ check(instance);
+ return buff;
+};
+
+const serialized_module = testTierTestingFlag();
+// Do some GCs to make sure the first module got collected and removed from the
+// module cache.
+gc();
+gc();
+gc();
+
+(function testSerializedModule() {
+ print(arguments.callee.name);
+ const module = %DeserializeWasmModule(serialized_module, wire_bytes);
+
+ const instance = new WebAssembly.Instance(module);
+ check(instance);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js b/deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js
new file mode 100644
index 0000000000..ad1d54a594
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --wasm-lazy-compilation --expose-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const num_functions = 2;
+
+function create_builder() {
+ const builder = new WasmModuleBuilder();
+ for (let i = 0; i < num_functions; ++i) {
+ builder.addFunction('f' + i, kSig_i_v)
+ .addBody(wasmI32Const(i))
+ .exportFunc();
+ }
+ return builder;
+}
+
+const wire_bytes = create_builder().toBuffer();
+
+function serializeModule() {
+ const module = new WebAssembly.Module(wire_bytes);
+ const buff = %SerializeWasmModule(module);
+ return buff;
+};
+
+const serialized_module = serializeModule();
+// Do some GCs to make sure the first module got collected and removed from the
+// module cache.
+gc();
+gc();
+gc();
+
+(function testSerializedModule() {
+ print(arguments.callee.name);
+ const module = %DeserializeWasmModule(serialized_module, wire_bytes);
+
+ const instance = new WebAssembly.Instance(module);
+ assertEquals(0, instance.exports.f0());
+ assertEquals(1, instance.exports.f1());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index 0b01df4427..3915109bbe 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --expose-wasm --experimental-wasm-typed-funcref
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
@@ -183,3 +183,24 @@ function instantiate(buffer, ffi) {
assertEquals(i, instance.exports.main());
}
})();
+
+(function TestBigTypeIndices() {
+ print(arguments.callee.name);
+ // These are all positive type indices (e.g. kI31RefCode and not kWasmI31Ref)
+ // and should be treated as such.
+ let indices = [kI31RefCode, kDataRefCode, 200, 400];
+ let kMaxIndex = 400;
+ let builder = new WasmModuleBuilder();
+ for (let i = 0; i <= kMaxIndex; i++) {
+ builder.addType(kSig_i_i);
+ builder.addFunction(undefined, i)
+ .addBody([kExprLocalGet, 0]);
+ builder.addGlobal(wasmRefType(i), false, WasmInitExpr.RefFunc(i));
+ }
+ for (let i of indices) {
+ builder.addFunction('f_' + i, makeSig([], [wasmRefType(i)]))
+ .addBody([kExprRefFunc, ...wasmSignedLeb(i, 5)])
+ .exportFunc();
+ }
+ builder.instantiate();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-array-js-interop.js b/deps/v8/test/mjsunit/wasm/wasm-array-js-interop.js
index 7f7c411c34..e3f0891256 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-array-js-interop.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-array-js-interop.js
@@ -9,9 +9,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
const kIterationsCountForICProgression = 20;
-// TODO(ishell): remove once leaked maps could keep NativeModule alive.
-let instances = [];
-
function createArray_i() {
let builder = new WasmModuleBuilder();
@@ -49,7 +46,6 @@ function createArray_i() {
.exportAs("array_set");
let instance = builder.instantiate();
- instances.push(instance);
let new_array = instance.exports.new_array;
let array_get = instance.exports.array_get;
let array_set = instance.exports.array_set;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
index 70156d262a..7ed8769d50 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
@@ -57,7 +57,7 @@ let instance = (() => {
.addBody([kExprLocalGet, 0])
.exportFunc();
builder.addFunction(key + '_null', makeSig([], [type]))
- .addBody([kExprRefNull, test_types[key]])
+ .addBody([kExprRefNull, ...wasmSignedLeb(test_types[key])])
.exportFunc();
}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 563af6f8b7..4f0c32fbab 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -67,7 +67,7 @@ let kElementSectionCode = 9; // Elements section
let kCodeSectionCode = 10; // Function code
let kDataSectionCode = 11; // Data segments
let kDataCountSectionCode = 12; // Data segment count (between Element & Code)
-let kExceptionSectionCode = 13; // Exception section (between Memory & Global)
+let kTagSectionCode = 13; // Tag section (between Memory & Global)
// Name section types
let kModuleNameCode = 0;
@@ -77,6 +77,9 @@ let kLocalNamesCode = 2;
let kWasmFunctionTypeForm = 0x60;
let kWasmStructTypeForm = 0x5f;
let kWasmArrayTypeForm = 0x5e;
+let kWasmFunctionExtendingTypeForm = 0x5d;
+let kWasmStructExtendingTypeForm = 0x5c;
+let kWasmArrayExtendingTypeForm = 0x5b;
let kLimitsNoMaximum = 0x00;
let kLimitsWithMaximum = 0x01;
@@ -108,35 +111,52 @@ let kWasmF64 = 0x7c;
let kWasmS128 = 0x7b;
let kWasmI8 = 0x7a;
let kWasmI16 = 0x79;
-let kWasmFuncRef = 0x70;
+
+// These are defined as negative integers to distinguish them from positive type
+// indices.
+let kWasmFuncRef = -0x10;
let kWasmAnyFunc = kWasmFuncRef; // Alias named as in the JS API spec
-let kWasmExternRef = 0x6f;
-let kWasmAnyRef = 0x6e;
-let kWasmEqRef = 0x6d;
+let kWasmExternRef = -0x11;
+let kWasmAnyRef = -0x12;
+let kWasmEqRef = -0x13;
+let kWasmI31Ref = -0x16;
+let kWasmDataRef = -0x19;
+
+// Use the positive-byte versions inside function bodies.
+let kLeb128Mask = 0x7f;
+let kFuncRefCode = kWasmFuncRef & kLeb128Mask;
+let kAnyFuncCode = kFuncRefCode; // Alias named as in the JS API spec
+let kExternRefCode = kWasmExternRef & kLeb128Mask;
+let kAnyRefCode = kWasmAnyRef & kLeb128Mask;
+let kEqRefCode = kWasmEqRef & kLeb128Mask;
+let kI31RefCode = kWasmI31Ref & kLeb128Mask;
+let kDataRefCode = kWasmDataRef & kLeb128Mask;
+
let kWasmOptRef = 0x6c;
let kWasmRef = 0x6b;
-function wasmOptRefType(index) {
- return {opcode: kWasmOptRef, index: index};
+function wasmOptRefType(heap_type) {
+ return {opcode: kWasmOptRef, heap_type: heap_type};
}
-function wasmRefType(index) {
- return {opcode: kWasmRef, index: index};
+function wasmRefType(heap_type) {
+ return {opcode: kWasmRef, heap_type: heap_type};
}
-let kWasmI31Ref = 0x6a;
+
let kWasmRttWithDepth = 0x69;
function wasmRtt(index, depth) {
+ if (index < 0) throw new Error("Expecting non-negative type index");
return {opcode: kWasmRttWithDepth, index: index, depth: depth};
}
let kWasmRtt = 0x68;
function wasmRttNoDepth(index) {
+ if (index < 0) throw new Error("Expecting non-negative type index");
return {opcode: kWasmRtt, index: index};
}
-let kWasmDataRef = 0x67;
let kExternalFunction = 0;
let kExternalTable = 1;
let kExternalMemory = 2;
let kExternalGlobal = 3;
-let kExternalException = 4;
+let kExternalTag = 4;
let kTableZero = 0;
let kMemoryZero = 0;
@@ -199,6 +219,10 @@ function makeSig_v_x(x) {
return makeSig([x], []);
}
+function makeSig_x_v(x) {
+ return makeSig([], [x]);
+}
+
function makeSig_v_xx(x) {
return makeSig([x, x], []);
}
@@ -936,16 +960,21 @@ class Binary {
}
}
+ emit_heap_type(heap_type) {
+ this.emit_bytes(wasmSignedLeb(heap_type, kMaxVarInt32Size));
+ }
+
emit_type(type) {
if ((typeof type) == 'number') {
- this.emit_u8(type);
+ this.emit_u8(type >= 0 ? type : type & kLeb128Mask);
} else {
this.emit_u8(type.opcode);
if ('depth' in type) this.emit_u8(type.depth);
- this.emit_u32v(type.index);
+ this.emit_heap_type(type.heap_type);
}
}
+
emit_init_expr_recursive(expr) {
switch (expr.kind) {
case kExprGlobalGet:
@@ -973,7 +1002,7 @@ class Binary {
break;
case kExprRefNull:
this.emit_u8(kExprRefNull);
- this.emit_u32v(expr.value);
+ this.emit_heap_type(expr.value);
break;
case kExprStructNewWithRtt:
for (let operand of expr.operands) {
@@ -1170,14 +1199,14 @@ class WasmInitExpr {
if ((typeof type) != 'number' && type.opcode != kWasmOptRef) {
throw new Error("Non-defaultable type");
}
- let heap_type = (typeof type) == 'number' ? type : type.index;
+ let heap_type = (typeof type) == 'number' ? type : type.heap_type;
return this.RefNull(heap_type);
}
}
}
class WasmGlobalBuilder {
- // {init} a pair {type, immediate}. Construct it with WasmInitExpr.
+ // {init} should be constructed with WasmInitExpr.
constructor(module, type, mutable, init) {
this.module = module;
this.type = type;
@@ -1223,6 +1252,15 @@ class WasmStruct {
throw new Error('struct fields must be an array');
}
this.fields = fields;
+ this.type_form = kWasmStructTypeForm;
+ }
+}
+
+class WasmStructExtending extends WasmStruct {
+ constructor(fields, supertype_idx) {
+ super(fields);
+ this.supertype = supertype_idx;
+ this.type_form = kWasmStructExtendingTypeForm;
}
}
@@ -1231,9 +1269,17 @@ class WasmArray {
this.type = type;
if (!mutability) throw new Error("Immutable arrays are not supported yet");
this.mutability = mutability;
+ this.type_form = kWasmArrayTypeForm;
}
}
+class WasmArrayExtending extends WasmArray {
+ constructor(type, mutability, supertype_idx) {
+ super(type, mutability);
+ this.supertype = supertype_idx;
+ this.type_form = kWasmArrayExtendingTypeForm;
+ }
+}
class WasmElemSegment {
constructor(table, offset, type, elements, is_decl) {
this.table = table;
@@ -1276,7 +1322,7 @@ class WasmModuleBuilder {
this.exports = [];
this.globals = [];
this.tables = [];
- this.exceptions = [];
+ this.tags = [];
this.functions = [];
this.compilation_hints = [];
this.element_segments = [];
@@ -1285,7 +1331,7 @@ class WasmModuleBuilder {
this.num_imported_funcs = 0;
this.num_imported_globals = 0;
this.num_imported_tables = 0;
- this.num_imported_exceptions = 0;
+ this.num_imported_tags = 0;
return this;
}
@@ -1356,11 +1402,21 @@ class WasmModuleBuilder {
return this.types.length - 1;
}
+ addStructExtending(fields, supertype_idx) {
+ this.types.push(new WasmStructExtending(fields, supertype_idx));
+ return this.types.length - 1;
+ }
+
addArray(type, mutability) {
this.types.push(new WasmArray(type, mutability));
return this.types.length - 1;
}
+ addArrayExtending(type, mutability, supertype_idx) {
+ this.types.push(new WasmArrayExtending(type, mutability, supertype_idx));
+ return this.types.length - 1;
+ }
+
addGlobal(type, mutable, init) {
if (init === undefined) init = WasmInitExpr.defaultFor(type);
let glob = new WasmGlobalBuilder(this, type, mutable, init);
@@ -1382,11 +1438,11 @@ class WasmModuleBuilder {
return table;
}
- addException(type) {
+ addTag(type) {
let type_index = (typeof type) == 'number' ? type : this.addType(type);
- let except_index = this.exceptions.length + this.num_imported_exceptions;
- this.exceptions.push(type_index);
- return except_index;
+ let tag_index = this.tags.length + this.num_imported_tags;
+ this.tags.push(type_index);
+ return tag_index;
}
addFunction(name, type, arg_names) {
@@ -1461,19 +1517,19 @@ class WasmModuleBuilder {
return this.num_imported_tables++;
}
- addImportedException(module, name, type) {
- if (this.exceptions.length != 0) {
- throw new Error('Imported exceptions must be declared before local ones');
+ addImportedTag(module, name, type) {
+ if (this.tags.length != 0) {
+ throw new Error('Imported tags must be declared before local ones');
}
let type_index = (typeof type) == 'number' ? type : this.addType(type);
let o = {
module: module,
name: name,
- kind: kExternalException,
+ kind: kExternalTag,
type_index: type_index
};
this.imports.push(o);
- return this.num_imported_exceptions++;
+ return this.num_imported_tags++;
}
addExport(name, index) {
@@ -1589,16 +1645,22 @@ class WasmModuleBuilder {
section.emit_u32v(wasm.types.length);
for (let type of wasm.types) {
if (type instanceof WasmStruct) {
- section.emit_u8(kWasmStructTypeForm);
+ section.emit_u8(type.type_form);
section.emit_u32v(type.fields.length);
for (let field of type.fields) {
section.emit_type(field.type);
section.emit_u8(field.mutability ? 1 : 0);
}
+ if (type instanceof WasmStructExtending) {
+ section.emit_u32v(type.supertype);
+ }
} else if (type instanceof WasmArray) {
- section.emit_u8(kWasmArrayTypeForm);
+ section.emit_u8(type.type_form);
section.emit_type(type.type);
section.emit_u8(type.mutability ? 1 : 0);
+ if (type instanceof WasmArrayExtending) {
+ section.emit_u32v(type.supertype);
+ }
} else {
section.emit_u8(kWasmFunctionTypeForm);
section.emit_u32v(type.params.length);
@@ -1644,7 +1706,7 @@ class WasmModuleBuilder {
section.emit_u8(has_max ? 1 : 0); // flags
section.emit_u32v(imp.initial); // initial
if (has_max) section.emit_u32v(imp.maximum); // maximum
- } else if (imp.kind == kExternalException) {
+ } else if (imp.kind == kExternalTag) {
section.emit_u32v(kExceptionAttribute);
section.emit_u32v(imp.type_index);
} else {
@@ -1708,12 +1770,12 @@ class WasmModuleBuilder {
});
}
- // Add event section.
- if (wasm.exceptions.length > 0) {
- if (debug) print('emitting events @ ' + binary.length);
- binary.emit_section(kExceptionSectionCode, section => {
- section.emit_u32v(wasm.exceptions.length);
- for (let type_index of wasm.exceptions) {
+ // Add tag section.
+ if (wasm.tags.length > 0) {
+ if (debug) print('emitting tags @ ' + binary.length);
+ binary.emit_section(kTagSectionCode, section => {
+ section.emit_u32v(wasm.tags.length);
+ for (let type_index of wasm.tags) {
section.emit_u32v(kExceptionAttribute);
section.emit_u32v(type_index);
}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-struct-js-interop.js b/deps/v8/test/mjsunit/wasm/wasm-struct-js-interop.js
index 2545fa68d8..ec53dbe370 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-struct-js-interop.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-struct-js-interop.js
@@ -9,9 +9,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
const kIterationsCountForICProgression = 20;
-// TODO(ishell): remove once leaked maps could keep NativeModule alive.
-let instances = [];
-
function createSimpleStruct(field_type, value1, value2) {
const builder = new WasmModuleBuilder();
@@ -52,7 +49,6 @@ function createSimpleStruct(field_type, value1, value2) {
.exportAs("set_field");
let instance = builder.instantiate();
- instances.push(instance);
let new_struct = instance.exports.new_struct;
let get_field = instance.exports.get_field;
let set_field = instance.exports.set_field;
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot.js
index d1ddcf1dec..6c607af00c 100644
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot.js
+++ b/deps/v8/test/mjsunit/web-snapshot/web-snapshot.js
@@ -13,13 +13,13 @@ function use(exports) {
function takeAndUseWebSnapshot(createObjects, exports) {
// Take a snapshot in Realm r1.
const r1 = Realm.create();
- Realm.eval(r1, createObjects, {type: 'function'});
+ Realm.eval(r1, createObjects, { type: 'function' });
const snapshot = Realm.takeWebSnapshot(r1, exports);
// Use the snapshot in Realm r2.
const r2 = Realm.create();
const success = Realm.useWebSnapshot(r2, snapshot);
assertTrue(success);
- return Realm.eval(r2, use, {type: 'function', arguments: [exports]});
+ return Realm.eval(r2, use, { type: 'function', arguments: [exports] });
}
(function TestMinimal() {
@@ -191,7 +191,7 @@ function takeAndUseWebSnapshot(createObjects, exports) {
(function TestObjectReferencingObject() {
function createObjects() {
globalThis.foo = {
- bar: {baz: 11525}
+ bar: { baz: 11525 }
};
}
const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
@@ -201,7 +201,7 @@ function takeAndUseWebSnapshot(createObjects, exports) {
(function TestContextReferencingObject() {
function createObjects() {
function outer() {
- let o = {value: 11525};
+ let o = { value: 11525 };
function inner() { return o; }
return inner;
}
@@ -259,7 +259,7 @@ function takeAndUseWebSnapshot(createObjects, exports) {
(function TestArrayContainingObject() {
function createObjects() {
globalThis.foo = {
- array: [{a: 1}, {b: 2}]
+ array: [{ a: 1 }, { b: 2 }]
};
}
const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
@@ -270,14 +270,13 @@ function takeAndUseWebSnapshot(createObjects, exports) {
(function TestArrayContainingFunction() {
function createObjects() {
globalThis.foo = {
- array: [function() { return 5; }]
+ array: [function () { return 5; }]
};
}
const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
assertEquals(5, foo.array[0]());
})();
-
(function TestContextReferencingArray() {
function createObjects() {
function outer() {
@@ -292,3 +291,46 @@ function takeAndUseWebSnapshot(createObjects, exports) {
const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
assertEquals(11525, foo.func()[0]);
})();
+
+(function TestEmptyClass() {
+ function createObjects() {
+ globalThis.Foo = class Foo { };
+ }
+ const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
+ const x = new Foo();
+})();
+
+(function TestClassWithConstructor() {
+ function createObjects() {
+ globalThis.Foo = class {
+ constructor() {
+ this.n = 42;
+ }
+ };
+ }
+ const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
+ const x = new Foo(2);
+ assertEquals(42, x.n);
+})();
+
+(function TestClassWithMethods() {
+ function createObjects() {
+ globalThis.Foo = class {
+ f() { return 7; };
+ };
+ }
+ const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
+ const x = new Foo();
+ assertEquals(7, x.f());
+})();
+
+(async function TestClassWithAsyncMethods() {
+ function createObjects() {
+ globalThis.Foo = class {
+ async g() { return 6; };
+ };
+ }
+ const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
+ const x = new Foo();
+ assertEquals(6, await x.g());
+})();
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 24d9cc7e43..61c6339330 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -498,33 +498,12 @@
'language/expressions/coalesce/tco-pos-undefined': [FAIL],
'language/expressions/coalesce/tco-pos-null': [FAIL],
- # https://github.com/tc39/test262/issues/2034
- 'language/expressions/postfix-decrement/arguments': [SKIP],
- 'language/expressions/postfix-decrement/arguments-nostrict': [SKIP],
- 'language/expressions/postfix-decrement/eval': [SKIP],
- 'language/expressions/postfix-decrement/eval-nostrict': [SKIP],
- 'language/expressions/postfix-increment/arguments': [SKIP],
- 'language/expressions/postfix-increment/arguments-nostrict': [SKIP],
- 'language/expressions/postfix-increment/eval': [SKIP],
- 'language/expressions/postfix-increment/eval-nostrict': [SKIP],
- 'language/expressions/prefix-decrement/arguments': [SKIP],
- 'language/expressions/prefix-decrement/arguments-nostrict': [SKIP],
- 'language/expressions/prefix-decrement/eval': [SKIP],
- 'language/expressions/prefix-decrement/eval-nostrict': [SKIP],
- 'language/expressions/prefix-increment/arguments': [SKIP],
- 'language/expressions/prefix-increment/arguments-nostrict': [SKIP],
- 'language/expressions/prefix-increment/eval': [SKIP],
- 'language/expressions/prefix-increment/eval-nostrict': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=9049
'language/comments/hashbang/use-strict': [SKIP],
# http://crbug/v8/10447
'intl402/Intl/getCanonicalLocales/unicode-ext-canonicalize-yes-to-true': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=9818
- 'built-ins/AsyncFunction/proto-from-ctor-realm': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=10381
'built-ins/Array/prototype/concat/arg-length-near-integer-limit': [FAIL],
@@ -564,15 +543,9 @@
# http://crbug/v8/10905
'language/identifier-resolution/assign-to-global-undefined': [FAIL],
- # http://crbug/v8/11530
- 'built-ins/Function/internals/Call/class-ctor-realm': [FAIL],
-
# http://crbug/v8/11531
'built-ins/RegExp/prototype/flags/get-order': [FAIL],
- # http://crbug/v8/11532
- 'language/expressions/object/dstr/object-rest-proxy-gopd-not-called-on-excluded-keys': [FAIL],
-
# http://crbug/v8/11533
'language/statements/class/subclass/default-constructor-spread-override': [FAIL],
@@ -583,17 +556,13 @@
'language/module-code/export-expname-binding-index': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11111
- 'built-ins/ArrayBuffer/options-maxbytelength-diminuitive': [FAIL],
- 'built-ins/ArrayBuffer/options-maxbytelength-excessive': [FAIL],
- 'built-ins/ArrayBuffer/options-maxbytelength-negative': [FAIL],
- 'built-ins/ArrayBuffer/options-maxbytelength-object': [FAIL],
- 'built-ins/ArrayBuffer/options-maxbytelength-poisoned': [FAIL],
- 'built-ins/ArrayBuffer/options-maxbytelength-undefined': [FAIL],
- 'built-ins/ArrayBuffer/options-non-object': [FAIL],
- 'built-ins/ArrayBuffer/prototype/maxByteLength/*': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resizable/*': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/*': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/this-is-sharedarraybuffer': [PASS],
+ 'built-ins/ArrayBuffer/prototype/resize/resize-grow': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/resize/resize-same-size': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/resize/resize-same-size-zero-explicit': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/resize/resize-same-size-zero-implicit': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/resize/resize-shrink': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/resize/resize-shrink-zero-explicit': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/resize/resize-shrink-zero-implicit': [FAIL],
'built-ins/ArrayBuffer/prototype/transfer/*': [FAIL],
'built-ins/ArrayBuffer/prototype/transfer/this-is-sharedarraybuffer': [PASS],
'built-ins/DataView/prototype/byteLength/resizable-array-buffer-auto': [FAIL],
@@ -620,69 +589,55 @@
'built-ins/DataView/prototype/setUint16/resizable-buffer': [FAIL],
'built-ins/DataView/prototype/setUint32/resizable-buffer': [FAIL],
'built-ins/DataView/prototype/setUint8/resizable-buffer': [FAIL],
- 'built-ins/SharedArrayBuffer/options-maxbytelength-diminuitive': [FAIL],
- 'built-ins/SharedArrayBuffer/options-maxbytelength-excessive': [FAIL],
- 'built-ins/SharedArrayBuffer/options-maxbytelength-negative': [FAIL],
- 'built-ins/SharedArrayBuffer/options-maxbytelength-object': [FAIL],
- 'built-ins/SharedArrayBuffer/options-maxbytelength-poisoned': [FAIL],
- 'built-ins/SharedArrayBuffer/options-maxbytelength-undefined': [FAIL],
- 'built-ins/SharedArrayBuffer/options-non-object': [FAIL],
- 'built-ins/SharedArrayBuffer/prototype/growable/*': [FAIL],
- 'built-ins/SharedArrayBuffer/prototype/grow/*': [FAIL],
- 'built-ins/SharedArrayBuffer/prototype/grow/this-is-sharedarraybuffer': [PASS],
- 'built-ins/SharedArrayBuffer/prototype/maxByteLength/*': [FAIL],
'built-ins/TypedArrayConstructors/ctors/typedarray-arg/out-of-bounds-when-species-retrieved-different-type': [FAIL],
'built-ins/TypedArrayConstructors/ctors/typedarray-arg/out-of-bounds-when-species-retrieved-same-type': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/resizable-array-buffer-auto': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/resizable-array-buffer-fixed': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/OwnPropertyKeys/integer-indexes-resizable-array-buffer-auto': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/OwnPropertyKeys/integer-indexes-resizable-array-buffer-fixed': [FAIL],
- 'built-ins/TypedArray/prototype/at/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/byteLength/resizable-array-buffer-auto': [FAIL],
- 'built-ins/TypedArray/prototype/byteLength/resizable-array-buffer-fixed': [FAIL],
- 'built-ins/TypedArray/prototype/byteOffset/resizable-array-buffer-auto': [FAIL],
- 'built-ins/TypedArray/prototype/byteOffset/resizable-array-buffer-fixed': [FAIL],
- 'built-ins/TypedArray/prototype/copyWithin/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/entries/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/every/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/fill/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/filter/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/findIndex/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/find/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/forEach/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/includes/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/indexOf/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/join/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/keys/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/lastIndexOf/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/length/resizable-array-buffer-auto': [FAIL],
- 'built-ins/TypedArray/prototype/length/resizable-array-buffer-fixed': [FAIL],
- 'built-ins/TypedArray/prototype/map/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/reduce/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/reduceRight/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/reverse/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/set/typedarray-arg-set-values-same-buffer-same-type-resized': [FAIL],
- 'built-ins/TypedArray/prototype/set/typedarray-arg-target-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/slice/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/some/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/sort/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/toLocaleString/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/values/return-abrupt-from-this-out-of-bounds': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11544
- 'built-ins/Temporal/now/timeZone/extensible': [FAIL],
- 'built-ins/Temporal/now/timeZone/length': [FAIL],
- 'built-ins/Temporal/now/timeZone/name': [FAIL],
- 'built-ins/Temporal/now/timeZone/new-object': [FAIL],
- 'built-ins/Temporal/now/timeZone/not-a-constructor': [FAIL],
- 'built-ins/Temporal/now/timeZone/prop-desc': [FAIL],
- 'built-ins/Temporal/now/timeZone/return-value': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=11934
- 'language/expressions/delete/super-property-null-base': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=7051
- 'intl402/supportedLocalesOf-unicode-extensions-ignored': [FAIL],
+ 'built-ins/Temporal/*': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12044
+ 'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=11989
+ 'built-ins/Realm/constructor': [FAIL],
+ 'built-ins/Realm/descriptor': [FAIL],
+ 'built-ins/Realm/instance': [FAIL],
+ 'built-ins/Realm/instance-extensibility': [FAIL],
+ 'built-ins/Realm/length': [FAIL],
+ 'built-ins/Realm/name': [FAIL],
+ 'built-ins/Realm/proto': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/descriptor': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/errors-from-the-other-realm-is-wrapped-into-a-typeerror': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/length': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/name': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/not-constructor': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/proto': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/returns-primitive-values': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/returns-symbol-values': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/throws-typeerror-if-evaluation-resolves-to-non-primitive': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/throws-when-argument-is-not-a-string': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/validates-realm-object': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm-extended': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-function-from-return-values-share-no-identity': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-function-observing-their-scopes': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-functions-accepts-callable-objects': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-functions-can-resolve-callable-returns': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-functions-new-wrapping-on-each-evaluation': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-functions-share-no-properties': [FAIL],
+ 'built-ins/Realm/prototype/evaluate/wrapped-functions-share-no-properties-extended': [FAIL],
+ 'built-ins/Realm/prototype/importValue/descriptor': [FAIL],
+ 'built-ins/Realm/prototype/importValue/exportName-tostring': [FAIL],
+ 'built-ins/Realm/prototype/importValue/import-value': [FAIL],
+ 'built-ins/Realm/prototype/importValue/length': [FAIL],
+ 'built-ins/Realm/prototype/importValue/name': [FAIL],
+ 'built-ins/Realm/prototype/importValue/not-constructor': [FAIL],
+ 'built-ins/Realm/prototype/importValue/proto': [FAIL],
+ 'built-ins/Realm/prototype/importValue/specifier-tostring': [FAIL],
+ 'built-ins/Realm/prototype/importValue/throws-if-import-value-does-not-exist': [FAIL],
+ 'built-ins/Realm/prototype/importValue/validates-realm-object': [FAIL],
+ 'built-ins/Realm/prototype/proto': [FAIL],
+ 'built-ins/Realm/prototype/Symbol.toStringTag': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -722,6 +677,38 @@
'harness/detachArrayBuffer': [SKIP],
'harness/detachArrayBuffer-host-detachArrayBuffer': [SKIP],
+
+ # https://github.com/tc39/test262/issues/3111
+ 'built-ins/TypedArray/prototype/at/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/copyWithin/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/entries/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/every/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/fill/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/filter/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLast/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLastIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/find/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/forEach/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/includes/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/indexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/join/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/keys/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/lastIndexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/map/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/reduce/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reduceRight/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reverse/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/set/typedarray-arg-target-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/slice/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/some/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/sort/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/toLocaleString/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/values/return-abrupt-from-this-out-of-bounds': [FAIL],
+
+ # Pending update after https://github.com/tc39/proposal-resizablearraybuffer/issues/68
+ 'built-ins/TypedArray/prototype/byteOffset/resizable-array-buffer-auto': [FAIL],
+
############################ SKIPPED TESTS #############################
# These tests take a looong time to run.
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index e69c9e291b..139af67196 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -46,6 +46,8 @@ from testrunner.outproc import test262
FEATURE_FLAGS = {
'Intl.DateTimeFormat-dayPeriod': '--harmony-intl-dateformat-day-period',
'Intl.Locale-info': '--harmony_intl_locale_info',
+ 'Intl.DateTimeFormat-extend-timezonename': '--harmony_intl_more_timezone',
+ 'Intl.DisplayNames-v2': '--harmony_intl_displaynames_v2',
'Symbol.prototype.description': '--harmony-symbol-description',
'FinalizationRegistry': '--harmony-weak-refs-with-cleanup-some',
'WeakRef': '--harmony-weak-refs-with-cleanup-some',
@@ -53,12 +55,13 @@ FEATURE_FLAGS = {
'IsHTMLDDA': '--allow-natives-syntax',
'top-level-await': '--harmony-top-level-await',
'regexp-match-indices': '--harmony-regexp-match-indices',
- # https://github.com/tc39/test262/pull/2395
'regexp-named-groups': '--harmony-regexp-match-indices',
'error-cause': '--harmony-error-cause',
'import-assertions': '--harmony-import-assertions',
- # https://github.com/tc39/test262/pull/2995
'Object.hasOwn': '--harmony-object-has-own',
+ 'class-static-block': '--harmony-class-static-blocks',
+ 'resizable-arraybuffer': '--harmony-rab-gsab',
+ 'array-find-from-last': '--harmony_array_find_last',
}
SKIPPED_FEATURES = set([])
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index c115daa047..5331944241 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -106,6 +106,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/heap-statistics-collector-unittest.cc",
"heap/cppgc/heap-unittest.cc",
"heap/cppgc/incremental-marking-schedule-unittest.cc",
+ "heap/cppgc/liveness-broker-unittest.cc",
"heap/cppgc/logging-unittest.cc",
"heap/cppgc/marker-unittest.cc",
"heap/cppgc/marking-verifier-unittest.cc",
@@ -201,6 +202,7 @@ v8_source_set("unittests_sources") {
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
"api/access-check-unittest.cc",
+ "api/deserialize-unittest.cc",
"api/exception-unittest.cc",
"api/interceptor-unittest.cc",
"api/isolate-unittest.cc",
@@ -300,6 +302,7 @@ v8_source_set("unittests_sources") {
"compiler/value-numbering-reducer-unittest.cc",
"compiler/zone-stats-unittest.cc",
"date/date-cache-unittest.cc",
+ "debug/debug-property-iterator-unittest.cc",
"diagnostics/eh-frame-iterator-unittest.cc",
"diagnostics/eh-frame-writer-unittest.cc",
"execution/microtask-queue-unittest.cc",
@@ -369,6 +372,7 @@ v8_source_set("unittests_sources") {
"regress/regress-crbug-1056054-unittest.cc",
"regress/regress-crbug-938251-unittest.cc",
"run-all-unittests.cc",
+ "runtime/runtime-debug-unittest.cc",
"strings/char-predicates-unittest.cc",
"strings/unicode-unittest.cc",
"tasks/background-compile-task-unittest.cc",
@@ -488,6 +492,12 @@ v8_source_set("unittests_sources") {
sources += [ "wasm/trap-handler-win-unittest.cc" ]
}
+ # Include this test only on arm64 simulator builds on x64 on Linux.
+ if (current_cpu == "x64" && v8_current_cpu == "arm64" && is_linux &&
+ v8_enable_webassembly) {
+ sources += [ "wasm/trap-handler-simulator-unittest.cc" ]
+ }
+
configs = [
"../..:cppgc_base_config",
"../..:external_config",
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index 1216100b23..cfd258aec0 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -179,46 +179,4 @@ TEST_F(AccessRegressionTest,
ASSERT_EQ(getter_c2->native_context(), *Utils::OpenHandle(*context2));
}
-namespace {
-bool failed_access_check_callback_called;
-
-class AccessCheckTestConsoleDelegate : public debug::ConsoleDelegate {
- public:
- void Log(const debug::ConsoleCallArguments& args,
- const debug::ConsoleContext& context) {
- FAIL();
- }
-};
-
-} // namespace
-
-// Ensure that {console.log} does an access check for its arguments.
-TEST_F(AccessCheckTest, ConsoleLog) {
- isolate()->SetFailedAccessCheckCallbackFunction(
- [](v8::Local<v8::Object> host, v8::AccessType type,
- v8::Local<v8::Value> data) {
- failed_access_check_callback_called = true;
- });
- AccessCheckTestConsoleDelegate console{};
- debug::SetConsoleDelegate(isolate(), &console);
-
- Local<ObjectTemplate> object_template = ObjectTemplate::New(isolate());
- object_template->SetAccessCheckCallback(AccessCheck);
-
- Local<Context> context1 = Context::New(isolate(), nullptr);
- Local<Context> context2 = Context::New(isolate(), nullptr);
-
- Local<Object> object1 =
- object_template->NewInstance(context1).ToLocalChecked();
- EXPECT_TRUE(context2->Global()
- ->Set(context2, v8_str("object_from_context1"), object1)
- .IsJust());
-
- Context::Scope context_scope(context2);
- failed_access_check_callback_called = false;
- CompileRun(isolate(), "console.log(object_from_context1);").ToLocalChecked();
-
- ASSERT_TRUE(failed_access_check_callback_called);
-}
-
} // namespace v8
diff --git a/deps/v8/test/unittests/api/deserialize-unittest.cc b/deps/v8/test/unittests/api/deserialize-unittest.cc
new file mode 100644
index 0000000000..53146ea549
--- /dev/null
+++ b/deps/v8/test/unittests/api/deserialize-unittest.cc
@@ -0,0 +1,236 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+
+class DeserializeTest : public testing::Test {
+ public:
+ class IsolateAndContextScope {
+ public:
+ explicit IsolateAndContextScope(DeserializeTest* test)
+ : test_(test),
+ isolate_wrapper_(kNoCounters),
+ isolate_scope_(isolate_wrapper_.isolate()),
+ handle_scope_(isolate_wrapper_.isolate()),
+ context_(Context::New(isolate_wrapper_.isolate())),
+ context_scope_(context_) {
+ CHECK_NULL(test->isolate_);
+ CHECK(test->context_.IsEmpty());
+ test->isolate_ = isolate_wrapper_.isolate();
+ test->context_ = context_;
+ }
+ ~IsolateAndContextScope() {
+ test_->isolate_ = nullptr;
+ test_->context_ = {};
+ }
+
+ private:
+ DeserializeTest* test_;
+ v8::IsolateWrapper isolate_wrapper_;
+ v8::Isolate::Scope isolate_scope_;
+ v8::HandleScope handle_scope_;
+ v8::Local<v8::Context> context_;
+ v8::Context::Scope context_scope_;
+ };
+
+ Local<String> NewString(const char* val) {
+ return String::NewFromUtf8(isolate(), val).ToLocalChecked();
+ }
+
+ Local<Value> RunGlobalFunc(const char* name) {
+ Local<Value> func_val =
+ context()->Global()->Get(context(), NewString(name)).ToLocalChecked();
+ CHECK(func_val->IsFunction());
+ Local<Function> func = Local<Function>::Cast(func_val);
+ return func->Call(context(), Undefined(isolate()), 0, nullptr)
+ .ToLocalChecked();
+ }
+
+ Isolate* isolate() { return isolate_; }
+ v8::Local<v8::Context> context() { return context_.ToLocalChecked(); }
+
+ private:
+ Isolate* isolate_ = nullptr;
+ v8::MaybeLocal<v8::Context> context_;
+};
+
+// Check that deserialization works.
+TEST_F(DeserializeTest, Deserialize) {
+ std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
+
+ {
+ IsolateAndContextScope scope(this);
+
+ Local<String> source_code = NewString("function foo() { return 42; }");
+ Local<Script> script =
+ Script::Compile(context(), source_code).ToLocalChecked();
+
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("foo"), Integer::New(isolate(), 42));
+
+ cached_data.reset(
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript()));
+ }
+
+ {
+ IsolateAndContextScope scope(this);
+
+ Local<String> source_code = NewString("function foo() { return 42; }");
+ ScriptCompiler::Source source(source_code, cached_data.release());
+ Local<Script> script =
+ ScriptCompiler::Compile(context(), &source,
+ ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+
+ CHECK(!source.GetCachedData()->rejected);
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("foo"), v8::Integer::New(isolate(), 42));
+ }
+}
+
+// Check that deserialization with a different script rejects the cache but
+// still works via standard compilation.
+TEST_F(DeserializeTest, DeserializeRejectsDifferentSource) {
+ std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
+
+ {
+ IsolateAndContextScope scope(this);
+
+ Local<String> source_code = NewString("function foo() { return 42; }");
+ Local<Script> script =
+ Script::Compile(context(), source_code).ToLocalChecked();
+
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("foo"), Integer::New(isolate(), 42));
+
+ cached_data.reset(
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript()));
+ }
+
+ {
+ IsolateAndContextScope scope(this);
+
+ // The source hash is based on the source length, so have to make sure that
+ // this is different here.
+ Local<String> source_code = NewString("function bar() { return 142; }");
+ ScriptCompiler::Source source(source_code, cached_data.release());
+ Local<Script> script =
+ ScriptCompiler::Compile(context(), &source,
+ ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+
+ CHECK(source.GetCachedData()->rejected);
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("bar"), v8::Integer::New(isolate(), 142));
+ }
+}
+
+class DeserializeThread : public base::Thread {
+ public:
+ explicit DeserializeThread(ScriptCompiler::ConsumeCodeCacheTask* task)
+ : Thread(base::Thread::Options("DeserializeThread")), task_(task) {}
+
+ void Run() override { task_->Run(); }
+
+ std::unique_ptr<ScriptCompiler::ConsumeCodeCacheTask> TakeTask() {
+ return std::move(task_);
+ }
+
+ private:
+ std::unique_ptr<ScriptCompiler::ConsumeCodeCacheTask> task_;
+};
+
+// Check that off-thread deserialization works.
+TEST_F(DeserializeTest, OffThreadDeserialize) {
+ std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
+
+ {
+ IsolateAndContextScope scope(this);
+
+ Local<String> source_code = NewString("function foo() { return 42; }");
+ Local<Script> script =
+ Script::Compile(context(), source_code).ToLocalChecked();
+
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("foo"), Integer::New(isolate(), 42));
+
+ cached_data.reset(
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript()));
+ }
+
+ {
+ IsolateAndContextScope scope(this);
+
+ DeserializeThread deserialize_thread(
+ ScriptCompiler::StartConsumingCodeCache(
+ isolate(), std::make_unique<ScriptCompiler::CachedData>(
+ cached_data->data, cached_data->length,
+ ScriptCompiler::CachedData::BufferNotOwned)));
+ CHECK(deserialize_thread.Start());
+ deserialize_thread.Join();
+
+ Local<String> source_code = NewString("function foo() { return 42; }");
+ ScriptCompiler::Source source(source_code, cached_data.release(),
+ deserialize_thread.TakeTask().release());
+ Local<Script> script =
+ ScriptCompiler::Compile(context(), &source,
+ ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+
+ CHECK(!source.GetCachedData()->rejected);
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("foo"), v8::Integer::New(isolate(), 42));
+ }
+}
+
+// Check that off-thread deserialization works.
+TEST_F(DeserializeTest, OffThreadDeserializeRejectsDifferentSource) {
+ std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
+
+ {
+ IsolateAndContextScope scope(this);
+
+ Local<String> source_code = NewString("function foo() { return 42; }");
+ Local<Script> script =
+ Script::Compile(context(), source_code).ToLocalChecked();
+
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("foo"), Integer::New(isolate(), 42));
+
+ cached_data.reset(
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript()));
+ }
+
+ {
+ IsolateAndContextScope scope(this);
+
+ DeserializeThread deserialize_thread(
+ ScriptCompiler::StartConsumingCodeCache(
+ isolate(), std::make_unique<ScriptCompiler::CachedData>(
+ cached_data->data, cached_data->length,
+ ScriptCompiler::CachedData::BufferNotOwned)));
+ CHECK(deserialize_thread.Start());
+ deserialize_thread.Join();
+
+ Local<String> source_code = NewString("function bar() { return 142; }");
+ ScriptCompiler::Source source(source_code, cached_data.release(),
+ deserialize_thread.TakeTask().release());
+ Local<Script> script =
+ ScriptCompiler::Compile(context(), &source,
+ ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+
+ CHECK(source.GetCachedData()->rejected);
+ CHECK(!script->Run(context()).IsEmpty());
+ CHECK_EQ(RunGlobalFunc("bar"), v8::Integer::New(isolate(), 142));
+ }
+}
+
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index c3a2874864..cc4ee6188f 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -6,6 +6,10 @@
#include "testing/gtest/include/gtest/gtest.h"
+#if V8_OS_WIN
+#include <windows.h>
+#endif
+
namespace v8 {
namespace base {
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index eedabed934..882afea314 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -12,6 +12,8 @@
#endif
#if V8_OS_WIN
+#include <windows.h>
+
#include "src/base/win32-headers.h"
#endif
@@ -377,6 +379,115 @@ TEST(TimeTicks, IsMonotonic) {
}
}
+namespace {
+void Sleep(TimeDelta wait_time) {
+ ElapsedTimer waiter;
+ waiter.Start();
+ while (!waiter.HasExpired(wait_time)) {
+ OS::Sleep(TimeDelta::FromMilliseconds(1));
+ }
+}
+} // namespace
+
+TEST(ElapsedTimer, StartStop) {
+ TimeDelta wait_time = TimeDelta::FromMilliseconds(100);
+ TimeDelta noise = TimeDelta::FromMilliseconds(100);
+ ElapsedTimer timer;
+ DCHECK(!timer.IsStarted());
+
+ timer.Start();
+ DCHECK(timer.IsStarted());
+
+ Sleep(wait_time);
+ TimeDelta delta = timer.Elapsed();
+ DCHECK(timer.IsStarted());
+ EXPECT_GE(delta, wait_time);
+ EXPECT_LT(delta, wait_time + noise);
+
+ DCHECK(!timer.IsPaused());
+ timer.Pause();
+ DCHECK(timer.IsPaused());
+ Sleep(wait_time);
+
+ timer.Resume();
+ DCHECK(timer.IsStarted());
+ delta = timer.Elapsed();
+ DCHECK(!timer.IsPaused());
+ timer.Pause();
+ DCHECK(timer.IsPaused());
+ EXPECT_GE(delta, wait_time);
+ EXPECT_LT(delta, wait_time + noise);
+
+ Sleep(wait_time);
+ timer.Resume();
+ DCHECK(!timer.IsPaused());
+ DCHECK(timer.IsStarted());
+ delta = timer.Elapsed();
+ EXPECT_GE(delta, wait_time);
+ EXPECT_LT(delta, wait_time + noise);
+
+ timer.Stop();
+ DCHECK(!timer.IsStarted());
+}
+
+TEST(ElapsedTimer, StartStopArgs) {
+ TimeDelta wait_time = TimeDelta::FromMilliseconds(100);
+ ElapsedTimer timer1;
+ ElapsedTimer timer2;
+ DCHECK(!timer1.IsStarted());
+ DCHECK(!timer2.IsStarted());
+
+ TimeTicks now = TimeTicks::HighResolutionNow();
+ timer1.Start(now);
+ timer2.Start(now);
+ DCHECK(timer1.IsStarted());
+ DCHECK(timer2.IsStarted());
+
+ Sleep(wait_time);
+ now = TimeTicks::HighResolutionNow();
+ TimeDelta delta1 = timer1.Elapsed(now);
+ Sleep(wait_time);
+ TimeDelta delta2 = timer2.Elapsed(now);
+ DCHECK(timer1.IsStarted());
+ DCHECK(timer2.IsStarted());
+ EXPECT_GE(delta1, delta2);
+ Sleep(wait_time);
+ EXPECT_NE(delta1, timer2.Elapsed());
+
+ TimeTicks now2 = TimeTicks::HighResolutionNow();
+ EXPECT_NE(timer1.Elapsed(now), timer1.Elapsed(now2));
+ EXPECT_NE(delta1, timer1.Elapsed(now2));
+ EXPECT_NE(delta2, timer2.Elapsed(now2));
+ EXPECT_GE(timer1.Elapsed(now2), timer2.Elapsed(now2));
+
+ now = TimeTicks::HighResolutionNow();
+ timer1.Pause(now);
+ timer2.Pause(now);
+ DCHECK(timer1.IsPaused());
+ DCHECK(timer2.IsPaused());
+ Sleep(wait_time);
+
+ now = TimeTicks::HighResolutionNow();
+ timer1.Resume(now);
+ DCHECK(!timer1.IsPaused());
+ DCHECK(timer2.IsPaused());
+ Sleep(wait_time);
+ timer2.Resume(now);
+ DCHECK(!timer1.IsPaused());
+ DCHECK(!timer2.IsPaused());
+ DCHECK(timer1.IsStarted());
+ DCHECK(timer2.IsStarted());
+
+ delta1 = timer1.Elapsed(now);
+ Sleep(wait_time);
+ delta2 = timer2.Elapsed(now);
+ EXPECT_GE(delta1, delta2);
+
+ timer1.Stop();
+ timer2.Stop();
+ DCHECK(!timer1.IsStarted());
+ DCHECK(!timer2.IsStarted());
+}
#if V8_OS_ANDROID
#define MAYBE_ThreadNow DISABLED_ThreadNow
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index f8704d36a0..b7560e5d41 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler-dispatcher/compiler-dispatcher.h"
-
#include <sstream>
#include "include/v8-platform.h"
@@ -13,6 +11,7 @@
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
#include "src/codegen/compiler.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/flags/flags.h"
#include "src/handles/handles.h"
#include "src/init/v8.h"
@@ -27,17 +26,18 @@
namespace v8 {
namespace internal {
-class CompilerDispatcherTestFlags {
+class LazyCompilerDispatcherTestFlags {
public:
- CompilerDispatcherTestFlags(const CompilerDispatcherTestFlags&) = delete;
- CompilerDispatcherTestFlags& operator=(const CompilerDispatcherTestFlags&) =
+ LazyCompilerDispatcherTestFlags(const LazyCompilerDispatcherTestFlags&) =
delete;
+ LazyCompilerDispatcherTestFlags& operator=(
+ const LazyCompilerDispatcherTestFlags&) = delete;
static void SetFlagsForTest() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
FLAG_single_threaded = true;
FlagList::EnforceFlagImplications();
- FLAG_compiler_dispatcher = true;
+ FLAG_lazy_compile_dispatcher = true;
FLAG_finalize_streaming_on_background = false;
}
@@ -51,28 +51,30 @@ class CompilerDispatcherTestFlags {
static SaveFlags* save_flags_;
};
-SaveFlags* CompilerDispatcherTestFlags::save_flags_ = nullptr;
+SaveFlags* LazyCompilerDispatcherTestFlags::save_flags_ = nullptr;
-class CompilerDispatcherTest : public TestWithNativeContext {
+class LazyCompilerDispatcherTest : public TestWithNativeContext {
public:
- CompilerDispatcherTest() = default;
- ~CompilerDispatcherTest() override = default;
- CompilerDispatcherTest(const CompilerDispatcherTest&) = delete;
- CompilerDispatcherTest& operator=(const CompilerDispatcherTest&) = delete;
+ LazyCompilerDispatcherTest() = default;
+ ~LazyCompilerDispatcherTest() override = default;
+ LazyCompilerDispatcherTest(const LazyCompilerDispatcherTest&) = delete;
+ LazyCompilerDispatcherTest& operator=(const LazyCompilerDispatcherTest&) =
+ delete;
static void SetUpTestCase() {
- CompilerDispatcherTestFlags::SetFlagsForTest();
+ LazyCompilerDispatcherTestFlags::SetFlagsForTest();
TestWithNativeContext::SetUpTestCase();
}
static void TearDownTestCase() {
TestWithNativeContext::TearDownTestCase();
- CompilerDispatcherTestFlags::RestoreFlags();
+ LazyCompilerDispatcherTestFlags::RestoreFlags();
}
- static base::Optional<CompilerDispatcher::JobId> EnqueueUnoptimizedCompileJob(
- CompilerDispatcher* dispatcher, Isolate* isolate,
- Handle<SharedFunctionInfo> shared) {
+ static base::Optional<LazyCompileDispatcher::JobId>
+ EnqueueUnoptimizedCompileJob(LazyCompileDispatcher* dispatcher,
+ Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
UnoptimizedCompileState state(isolate);
std::unique_ptr<ParseInfo> outer_parse_info =
test::OuterParseInfoForShared(isolate, shared, &state);
@@ -333,22 +335,22 @@ class MockPlatform : public v8::Platform {
} // namespace
-TEST_F(CompilerDispatcherTest, Construct) {
+TEST_F(LazyCompilerDispatcherTest, Construct) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, IsEnqueued) {
+TEST_F(LazyCompilerDispatcherTest, IsEnqueued) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_TRUE(job_id);
@@ -368,15 +370,15 @@ TEST_F(CompilerDispatcherTest, IsEnqueued) {
platform.ClearWorkerTasks();
}
-TEST_F(CompilerDispatcherTest, FinishNow) {
+TEST_F(LazyCompilerDispatcherTest, FinishNow) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
@@ -391,16 +393,16 @@ TEST_F(CompilerDispatcherTest, FinishNow) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, CompileAndFinalize) {
+TEST_F(LazyCompilerDispatcherTest, CompileAndFinalize) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_TRUE(platform.WorkerTasksPending());
@@ -426,16 +428,16 @@ TEST_F(CompilerDispatcherTest, CompileAndFinalize) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, IdleTaskNoIdleTime) {
+TEST_F(LazyCompilerDispatcherTest, IdleTaskNoIdleTime) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
@@ -468,9 +470,9 @@ TEST_F(CompilerDispatcherTest, IdleTaskNoIdleTime) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
+TEST_F(LazyCompilerDispatcherTest, IdleTaskSmallIdleTime) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared_1 =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
@@ -479,9 +481,9 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared_2->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_1 =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
- base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_2 =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
@@ -518,9 +520,9 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, IdleTaskException) {
+TEST_F(LazyCompilerDispatcherTest, IdleTaskException) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, 50);
std::string raw_script("(x) { var a = ");
for (int i = 0; i < 1000; i++) {
@@ -534,7 +536,7 @@ TEST_F(CompilerDispatcherTest, IdleTaskException) {
test::CreateSharedFunctionInfo(i_isolate(), script);
ASSERT_FALSE(shared->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
@@ -549,15 +551,15 @@ TEST_F(CompilerDispatcherTest, IdleTaskException) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, FinishNowWithWorkerTask) {
+TEST_F(LazyCompilerDispatcherTest, FinishNowWithWorkerTask) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
@@ -582,9 +584,9 @@ TEST_F(CompilerDispatcherTest, FinishNowWithWorkerTask) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
+TEST_F(LazyCompilerDispatcherTest, IdleTaskMultipleJobs) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared_1 =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
@@ -593,9 +595,9 @@ TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared_2->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_1 =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
- base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_2 =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
@@ -617,9 +619,9 @@ TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, FinishNowException) {
+TEST_F(LazyCompilerDispatcherTest, FinishNowException) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, 50);
std::string raw_script("(x) { var a = ");
for (int i = 0; i < 1000; i++) {
@@ -633,7 +635,7 @@ TEST_F(CompilerDispatcherTest, FinishNowException) {
test::CreateSharedFunctionInfo(i_isolate(), script);
ASSERT_FALSE(shared->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
@@ -649,15 +651,15 @@ TEST_F(CompilerDispatcherTest, FinishNowException) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, AbortJobNotStarted) {
+TEST_F(LazyCompilerDispatcherTest, AbortJobNotStarted) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
@@ -679,15 +681,15 @@ TEST_F(CompilerDispatcherTest, AbortJobNotStarted) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, AbortJobAlreadyStarted) {
+TEST_F(LazyCompilerDispatcherTest, AbortJobAlreadyStarted) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
@@ -744,10 +746,10 @@ TEST_F(CompilerDispatcherTest, AbortJobAlreadyStarted) {
dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
+TEST_F(LazyCompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
// Use the real dispatcher so that CompileLazy checks the same one for
// enqueued functions.
- CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
+ LazyCompileDispatcher* dispatcher = i_isolate()->lazy_compile_dispatcher();
const char raw_script[] = "function lazy() { return 42; }; lazy;";
test::ScriptResource* script =
@@ -756,7 +758,7 @@ TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
ASSERT_FALSE(shared->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id =
+ base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared);
dispatcher->RegisterSharedFunctionInfo(*job_id, *shared);
@@ -767,10 +769,10 @@ TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
ASSERT_FALSE(dispatcher->IsEnqueued(shared));
}
-TEST_F(CompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
+TEST_F(LazyCompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
// Use the real dispatcher so that CompileLazy checks the same one for
// enqueued functions.
- CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
+ LazyCompileDispatcher* dispatcher = i_isolate()->lazy_compile_dispatcher();
const char raw_source_2[] = "function lazy2() { return 42; }; lazy2;";
test::ScriptResource* source_2 =
@@ -786,11 +788,11 @@ TEST_F(CompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
Handle<SharedFunctionInfo> shared_1(lazy1->shared(), i_isolate());
ASSERT_FALSE(shared_1->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_1 =
EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared_1);
dispatcher->RegisterSharedFunctionInfo(*job_id_1, *shared_1);
- base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_2 =
EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared_2);
dispatcher->RegisterSharedFunctionInfo(*job_id_2, *shared_2);
@@ -804,9 +806,9 @@ TEST_F(CompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
ASSERT_FALSE(dispatcher->IsEnqueued(shared_2));
}
-TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
+TEST_F(LazyCompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ LazyCompileDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Handle<SharedFunctionInfo> shared_1 =
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
@@ -816,11 +818,11 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared_2->is_compiled());
- base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_1 =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
- base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ base::Optional<LazyCompileDispatcher::JobId> job_id_2 =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 9a69dce916..1ecf511149 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -36,7 +36,7 @@ class JSCallReducerTest : public TypedGraphTest {
&machine);
GraphReducer graph_reducer(zone(), graph(), tick_counter(), broker());
JSCallReducer reducer(&graph_reducer, &jsgraph, broker(), zone(),
- JSCallReducer::kNoFlags, &deps_);
+ JSCallReducer::kNoFlags);
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index de1271bc4c..c397be78d8 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -157,7 +157,7 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
Node* const control = graph()->start();
Reduction const r = Reduce(graph()->NewNode(
javascript()->CreateFunctionContext(
- handle(ScopeInfo::Empty(isolate()), isolate()), 8, FUNCTION_SCOPE),
+ MakeRef(broker(), ScopeInfo::Empty(isolate())), 8, FUNCTION_SCOPE),
context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
@@ -171,8 +171,8 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
// JSCreateWithContext
TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
- Handle<ScopeInfo> scope_info =
- ReadOnlyRoots(isolate()).empty_function_scope_info_handle();
+ ScopeInfoRef scope_info =
+ MakeRef(broker(), ReadOnlyRoots(isolate()).empty_function_scope_info());
Node* const object = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
@@ -193,8 +193,8 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
// JSCreateCatchContext
TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
- Handle<ScopeInfo> scope_info =
- ReadOnlyRoots(isolate()).empty_function_scope_info_handle();
+ ScopeInfoRef scope_info =
+ MakeRef(broker(), ReadOnlyRoots(isolate()).empty_function_scope_info());
Node* const exception = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index c20149d21e..9f5f4d5aa0 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -6,6 +6,7 @@
#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-operator.h"
@@ -39,7 +40,8 @@ Type const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
class JSTypedLoweringTest : public TypedGraphTest {
public:
- JSTypedLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
+ JSTypedLoweringTest()
+ : TypedGraphTest(3), javascript_(zone()), deps_(broker(), zone()) {}
~JSTypedLoweringTest() override = default;
protected:
@@ -59,6 +61,7 @@ class JSTypedLoweringTest : public TypedGraphTest {
private:
JSOperatorBuilder javascript_;
+ CompilationDependencies deps_;
};
@@ -397,7 +400,7 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
- Handle<Name> name = factory()->length_string();
+ NameRef name = MakeRef(broker(), factory()->length_string());
Node* const receiver = Parameter(Type::String(), 0);
Node* const feedback = UndefinedConstant();
Node* const context = UndefinedConstant();
diff --git a/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc b/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
new file mode 100644
index 0000000000..1b1fcf21c3
--- /dev/null
+++ b/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
@@ -0,0 +1,99 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+#include "src/api/api.h"
+#include "src/objects/objects-inl.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace debug {
+namespace {
+
+using DebugPropertyIteratorTest = TestWithContext;
+
+TEST_F(DebugPropertyIteratorTest, WalksPrototypeChain) {
+ TryCatch try_catch(isolate());
+
+ Local<Object> object = Object::New(isolate());
+
+ ASSERT_TRUE(object
+ ->CreateDataProperty(
+ context(),
+ String::NewFromUtf8Literal(isolate(), "own_property"),
+ Number::New(isolate(), 42))
+ .FromMaybe(false));
+
+ Local<Object> prototype = Object::New(isolate());
+ ASSERT_TRUE(object->SetPrototype(context(), prototype).FromMaybe(false));
+ ASSERT_TRUE(prototype
+ ->CreateDataProperty(context(),
+ String::NewFromUtf8Literal(
+ isolate(), "prototype_property"),
+ Number::New(isolate(), 21))
+ .FromMaybe(false));
+
+ auto iterator = PropertyIterator::Create(context(), object);
+ ASSERT_NE(iterator, nullptr);
+ ASSERT_FALSE(iterator->Done());
+ EXPECT_TRUE(iterator->is_own());
+ char name_buffer[100];
+ iterator->name().As<v8::String>()->WriteUtf8(isolate(), name_buffer);
+ EXPECT_EQ("own_property", std::string(name_buffer));
+ ASSERT_TRUE(iterator->Advance().FromMaybe(false));
+
+ ASSERT_FALSE(iterator->Done());
+ EXPECT_TRUE(iterator->is_own());
+ iterator->name().As<v8::String>()->WriteUtf8(isolate(), name_buffer);
+ EXPECT_EQ("own_property", std::string(name_buffer));
+ ASSERT_TRUE(iterator->Advance().FromMaybe(false));
+
+ ASSERT_FALSE(iterator->Done());
+ EXPECT_FALSE(iterator->is_own());
+ iterator->name().As<v8::String>()->WriteUtf8(isolate(), name_buffer);
+ EXPECT_EQ("prototype_property", std::string(name_buffer));
+ ASSERT_TRUE(iterator->Advance().FromMaybe(false));
+
+ ASSERT_FALSE(iterator->Done());
+}
+
+bool may_access = true;
+
+bool AccessCheck(Local<Context> accessing_context,
+ Local<Object> accessed_object, Local<Value> data) {
+ return may_access;
+}
+
+TEST_F(DebugPropertyIteratorTest, DoestWalksPrototypeChainIfInaccesible) {
+ TryCatch try_catch(isolate());
+
+ Local<ObjectTemplate> object_template = ObjectTemplate::New(isolate());
+ object_template->SetAccessCheckCallback(AccessCheck);
+
+ Local<Object> object =
+ object_template->NewInstance(context()).ToLocalChecked();
+ ASSERT_TRUE(object
+ ->CreateDataProperty(
+ context(),
+ String::NewFromUtf8Literal(isolate(), "own_property"),
+ Number::New(isolate(), 42))
+ .FromMaybe(false));
+
+ auto iterator = PropertyIterator::Create(context(), object);
+ may_access = false;
+ ASSERT_NE(iterator, nullptr);
+ ASSERT_FALSE(iterator->Done());
+ EXPECT_TRUE(iterator->is_own());
+ char name_buffer[100];
+ iterator->name().As<v8::String>()->WriteUtf8(isolate(), name_buffer);
+ EXPECT_EQ("own_property", std::string(name_buffer));
+ ASSERT_TRUE(iterator->Advance().FromMaybe(false));
+
+ ASSERT_TRUE(iterator->Done());
+}
+
+} // namespace
+} // namespace debug
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
index 32fdd6311a..a94e5357dd 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
@@ -116,10 +116,10 @@ class ConcurrentSweeperTest : public testing::TestWithHeap {
}
}
- void CheckPageRemoved(const BasePage* page) {
+ bool PageInBackend(const BasePage* page) {
const Heap* heap = Heap::From(GetHeap());
const PageBackend* backend = heap->page_backend();
- EXPECT_EQ(nullptr, backend->Lookup(reinterpret_cast<ConstAddress>(page)));
+ return backend->Lookup(reinterpret_cast<ConstAddress>(page));
}
bool FreeListContains(const BaseSpace& space,
@@ -179,7 +179,8 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfNormalPage) {
}
TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) {
- // Non finalizable objects are swept right away.
+ // Non finalizable objects are swept right away but the page is only returned
+ // from the main thread.
using GCedType = LargeNonFinalizable;
auto* unmarked_object = MakeGarbageCollected<GCedType>(GetAllocationHandle());
@@ -205,14 +206,17 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) {
EXPECT_TRUE(HeapObjectHeader::FromObject(marked_object).IsMarked());
#endif
+ // The page should not have been removed on the background threads.
+ EXPECT_TRUE(PageInBackend(unmarked_page));
+
+ FinishSweeping();
+
// Check that free list entries are created right away for non-finalizable
// objects, but not immediately returned to the space's freelist.
- CheckPageRemoved(unmarked_page);
+ EXPECT_FALSE(PageInBackend(unmarked_page));
// Check that marked pages are returned to space right away.
EXPECT_NE(space.end(), std::find(space.begin(), space.end(), marked_page));
-
- FinishSweeping();
}
TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfNormalPage) {
@@ -279,7 +283,29 @@ TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfLargePage) {
// Check that the destructor was executed.
EXPECT_EQ(1u, g_destructor_callcount);
// Check that page was unmapped.
- CheckPageRemoved(page);
+ EXPECT_FALSE(PageInBackend(page));
+}
+
+TEST_F(ConcurrentSweeperTest, DestroyLargePageOnMainThread) {
+ // This test fails with TSAN when large pages are destroyed concurrently
+ // without proper support by the backend.
+ using GCedType = LargeNonFinalizable;
+
+ auto* object = MakeGarbageCollected<GCedType>(GetAllocationHandle());
+ auto* page = BasePage::FromPayload(object);
+
+ StartSweeping();
+
+ // Allocating another large object should not race here.
+ MakeGarbageCollected<GCedType>(GetAllocationHandle());
+
+ // Wait for concurrent sweeping to finish.
+ WaitForConcurrentSweeping();
+
+ FinishSweeping();
+
+ // Check that page was unmapped.
+ EXPECT_FALSE(PageInBackend(page));
}
TEST_F(ConcurrentSweeperTest, IncrementalSweeping) {
diff --git a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
index c759308723..534f744e7e 100644
--- a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
@@ -162,9 +162,12 @@ TEST_F(EphemeronPairTest, EmptyKey) {
using EphemeronPairGCTest = testing::TestWithHeap;
TEST_F(EphemeronPairGCTest, EphemeronPairValueIsCleared) {
+ GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
- Persistent<EphemeronHolder> holder = MakeGarbageCollected<EphemeronHolder>(
- GetAllocationHandle(), nullptr, value);
+ Persistent<EphemeronHolder> holder =
+ MakeGarbageCollected<EphemeronHolder>(GetAllocationHandle(), key, value);
+ // The precise GC will not find the `key` anywhere and thus clear the
+ // ephemeron.
PreciseGC();
EXPECT_EQ(nullptr, holder->ephemeron_pair().value.Get());
}
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
index e3a1d671df..bc9149e9b2 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
@@ -367,16 +367,5 @@ TEST_F(GCHeapDeathTest, LargeChainOfNewStates) {
EXPECT_DEATH_IF_SUPPORTED(Heap::From(GetHeap())->Terminate(), "");
}
-TEST_F(GCHeapTest, IsHeapObjectAliveForConstPointer) {
- // Regression test: http://crbug.com/661363.
- GCed<64>* object = MakeGarbageCollected<GCed<64>>(GetAllocationHandle());
- HeapObjectHeader& header = HeapObjectHeader::FromObject(object);
- LivenessBroker broker = internal::LivenessBrokerFactory::Create();
- EXPECT_TRUE(header.TryMarkAtomic());
- EXPECT_TRUE(broker.IsHeapObjectAlive(object));
- const GCed<64>* const_object = const_cast<const GCed<64>*>(object);
- EXPECT_TRUE(broker.IsHeapObjectAlive(const_object));
-}
-
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/liveness-broker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/liveness-broker-unittest.cc
new file mode 100644
index 0000000000..f337134e69
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/liveness-broker-unittest.cc
@@ -0,0 +1,45 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/liveness-broker.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/liveness-broker.h"
+#include "test/unittests/heap/cppgc/tests.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+using LivenessBrokerTest = testing::TestSupportingAllocationOnly;
+
+class GCed : public GarbageCollected<GCed> {
+ public:
+ void Trace(cppgc::Visitor*) const {}
+};
+
+} // namespace
+
+TEST_F(LivenessBrokerTest, IsHeapObjectAliveForConstPointer) {
+ // Regression test: http://crbug.com/661363.
+ GCed* object = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ HeapObjectHeader& header = HeapObjectHeader::FromObject(object);
+ LivenessBroker broker = internal::LivenessBrokerFactory::Create();
+ EXPECT_TRUE(header.TryMarkAtomic());
+ EXPECT_TRUE(broker.IsHeapObjectAlive(object));
+ const GCed* const_object = const_cast<const GCed*>(object);
+ EXPECT_TRUE(broker.IsHeapObjectAlive(const_object));
+}
+
+TEST_F(LivenessBrokerTest, IsHeapObjectAliveNullptr) {
+ GCed* object = nullptr;
+ LivenessBroker broker = internal::LivenessBrokerFactory::Create();
+ EXPECT_TRUE(broker.IsHeapObjectAlive(object));
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
index 8f74cd0769..5d071ad130 100644
--- a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "include/cppgc/allocation.h"
+#include "include/cppgc/cross-thread-persistent.h"
#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -303,7 +304,7 @@ TEST_F(SweeperTest, LazySweepingDuringAllocation) {
Heap::Config::MarkingType::kAtomic,
Heap::Config::SweepingType::kIncrementalAndConcurrent};
Heap::From(GetHeap())->CollectGarbage(config);
- // Incremetal sweeping is active and the space should have two pages with
+ // Incremental sweeping is active and the space should have two pages with
// no room for an additional GCedObject. Allocating a new GCedObject should
// trigger sweeping. All objects other than the 2nd object on each page are
// marked. Lazy sweeping on allocation should reclaim the object on one of
@@ -400,5 +401,94 @@ TEST_F(SweeperTest, DiscardingNormalPageMemory) {
USE(holder);
}
+namespace {
+
+class Holder final : public GarbageCollected<Holder> {
+ public:
+ static size_t destructor_callcount;
+
+ void Trace(Visitor*) const {}
+
+ ~Holder() {
+ EXPECT_FALSE(ref);
+ EXPECT_FALSE(weak_ref);
+ destructor_callcount++;
+ }
+
+ cppgc::subtle::CrossThreadPersistent<GCed<1>> ref;
+ cppgc::subtle::WeakCrossThreadPersistent<GCed<1>> weak_ref;
+};
+
+// static
+size_t Holder::destructor_callcount;
+
+} // namespace
+
+TEST_F(SweeperTest, CrossThreadPersistentCanBeClearedFromOtherThread) {
+ Holder::destructor_callcount = 0;
+ auto* holder = MakeGarbageCollected<Holder>(GetAllocationHandle());
+
+ auto remote_heap = cppgc::Heap::Create(GetPlatformHandle());
+ // The case below must be able to clear both, the CTP and WCTP.
+ holder->ref =
+ MakeGarbageCollected<GCed<1>>(remote_heap->GetAllocationHandle());
+ holder->weak_ref =
+ MakeGarbageCollected<GCed<1>>(remote_heap->GetAllocationHandle());
+
+ testing::TestPlatform::DisableBackgroundTasksScope no_concurrent_sweep_scope(
+ GetPlatformHandle().get());
+ Heap::From(GetHeap())->CollectGarbage(
+ {Heap::Config::CollectionType::kMajor,
+ Heap::Config::StackState::kNoHeapPointers,
+ Heap::Config::MarkingType::kAtomic,
+ Heap::Config::SweepingType::kIncrementalAndConcurrent});
+ // `holder` is unreachable (as the stack is not scanned) and will be
+ // reclaimed. Its payload memory is generally poisoned at this point. The
+ // CrossThreadPersistent slot should be unpoisoned.
+
+ // Terminate the remote heap which should also clear `holder->ref`. The slot
+ // for `ref` should have been unpoisoned by the GC.
+ Heap::From(remote_heap.get())->Terminate();
+
+ // Finish the sweeper which will find the CrossThreadPersistent in cleared
+ // state.
+ Heap::From(GetHeap())->sweeper().FinishIfRunning();
+ EXPECT_EQ(1u, Holder::destructor_callcount);
+}
+
+TEST_F(SweeperTest, WeakCrossThreadPersistentCanBeClearedFromOtherThread) {
+ Holder::destructor_callcount = 0;
+ auto* holder = MakeGarbageCollected<Holder>(GetAllocationHandle());
+
+ auto remote_heap = cppgc::Heap::Create(GetPlatformHandle());
+ holder->weak_ref =
+ MakeGarbageCollected<GCed<1>>(remote_heap->GetAllocationHandle());
+
+ testing::TestPlatform::DisableBackgroundTasksScope no_concurrent_sweep_scope(
+ GetPlatformHandle().get());
+ static constexpr Heap::Config config = {
+ Heap::Config::CollectionType::kMajor,
+ Heap::Config::StackState::kNoHeapPointers,
+ Heap::Config::MarkingType::kAtomic,
+ Heap::Config::SweepingType::kIncrementalAndConcurrent};
+ Heap::From(GetHeap())->CollectGarbage(config);
+ // `holder` is unreachable (as the stack is not scanned) and will be
+ // reclaimed. Its payload memory is generally poisoned at this point. The
+ // WeakCrossThreadPersistent slot should be unpoisoned during clearing.
+
+ // GC in the remote heap should also clear `holder->weak_ref`. The slot for
+ // `weak_ref` should be unpoisoned by the GC.
+ Heap::From(remote_heap.get())
+ ->CollectGarbage({Heap::Config::CollectionType::kMajor,
+ Heap::Config::StackState::kNoHeapPointers,
+ Heap::Config::MarkingType::kAtomic,
+ Heap::Config::SweepingType::kAtomic});
+
+ // Finish the sweeper which will find the CrossThreadPersistent in cleared
+ // state.
+ Heap::From(GetHeap())->sweeper().FinishIfRunning();
+ EXPECT_EQ(1u, Holder::destructor_callcount);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
index eb4e219d27..577e9ff050 100644
--- a/deps/v8/test/unittests/heap/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
@@ -5,7 +5,10 @@
#include <memory>
#include "include/cppgc/allocation.h"
+#include "include/cppgc/explicit-management.h"
#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/heap-consistency.h"
+#include "include/cppgc/internal/api-constants.h"
#include "include/cppgc/persistent.h"
#include "include/cppgc/platform.h"
#include "include/cppgc/testing.h"
@@ -132,6 +135,43 @@ TEST_F(UnifiedHeapTest, WriteBarrierCppToV8Reference) {
wrappable->wrapper()->GetAlignedPointerFromInternalField(1));
}
+#if DEBUG
+namespace {
+class Unreferenced : public cppgc::GarbageCollected<Unreferenced> {
+ public:
+ void Trace(cppgc::Visitor*) const {}
+};
+} // namespace
+
+TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ auto* unreferenced = cppgc::MakeGarbageCollected<Unreferenced>(
+ allocation_handle(),
+ cppgc::AdditionalBytes(cppgc::internal::api_constants::kMB));
+ // Force safepoint to force flushing of cached allocated/freed sizes in cppgc.
+ cpp_heap().stats_collector()->NotifySafePointForTesting();
+ {
+ cppgc::subtle::NoGarbageCollectionScope scope(cpp_heap());
+ cppgc::internal::FreeUnreferencedObject(cpp_heap(), unreferenced);
+ // Force safepoint to make sure allocated size decrease due to freeing
+ // unreferenced object is reported to CppHeap. Due to
+ // NoGarbageCollectionScope, CppHeap will cache the reported decrease and
+ // won't report it further.
+ cpp_heap().stats_collector()->NotifySafePointForTesting();
+ }
+ // Running a GC resets the allocated size counters to the current marked bytes
+ // counter.
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ // If CppHeap didn't clear it's cached values when the counters were reset,
+ // the next safepoint will try to decrease the cached value from the last
+ // marked bytes (which is smaller than the cached value) and crash.
+ cppgc::MakeGarbageCollected<Unreferenced>(allocation_handle());
+ cpp_heap().stats_collector()->NotifySafePointForTesting();
+}
+#endif // DEBUG
+
#if !V8_OS_FUCHSIA
TEST_F(UnifiedHeapTest, TracedReferenceRetainsFromStack) {
v8::HandleScope scope(v8_isolate());
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 7122462537..16c6f5ebcd 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -1516,22 +1516,14 @@ TEST_F(ValueSerializerTest, DecodeLinearRegExp) {
}
TEST_F(ValueSerializerTest, DecodeHasIndicesRegExp) {
- bool flag_was_enabled = i::FLAG_harmony_regexp_match_indices;
-
// The last byte encodes the regexp flags.
std::vector<uint8_t> regexp_encoding = {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03,
0x66, 0x6F, 0x6F, 0xAD, 0x01};
- i::FLAG_harmony_regexp_match_indices = true;
Local<Value> value = DecodeTest(regexp_encoding);
ASSERT_TRUE(value->IsRegExp());
ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
ExpectScriptTrue("result.toString() === '/foo/dgmsy'");
-
- i::FLAG_harmony_regexp_match_indices = false;
- InvalidDecodeTest(regexp_encoding);
-
- i::FLAG_harmony_regexp_match_indices = flag_was_enabled;
}
TEST_F(ValueSerializerTest, RoundTripMap) {
diff --git a/deps/v8/test/unittests/runtime/runtime-debug-unittest.cc b/deps/v8/test/unittests/runtime/runtime-debug-unittest.cc
new file mode 100644
index 0000000000..dbc4a76f88
--- /dev/null
+++ b/deps/v8/test/unittests/runtime/runtime-debug-unittest.cc
@@ -0,0 +1,58 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+#include "src/api/api.h"
+#include "src/objects/objects-inl.h"
+#include "src/runtime/runtime.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace {
+
+using RuntimeTest = TestWithContext;
+
+TEST_F(RuntimeTest, ReturnsPrototype) {
+ TryCatch try_catch(isolate());
+
+ Local<v8::Object> object = v8::Object::New(isolate());
+ Handle<JSArray> i_result =
+ Runtime::GetInternalProperties(i_isolate(), Utils::OpenHandle(*object))
+ .ToHandleChecked();
+ Local<Array> result = Utils::ToLocal(i_result);
+ EXPECT_GE(result->Length(), 1u);
+
+ char name_buffer[100];
+ result->Get(context(), 0)
+ .ToLocalChecked()
+ .As<v8::String>()
+ ->WriteUtf8(isolate(), name_buffer);
+ EXPECT_EQ("[[Prototype]]", std::string(name_buffer));
+}
+
+bool AccessCheck(Local<v8::Context> accessing_context,
+ Local<v8::Object> accessed_object, Local<Value> data) {
+ return false;
+}
+
+TEST_F(RuntimeTest, DoesNotReturnPrototypeWhenInacessible) {
+ TryCatch try_catch(isolate());
+
+ Local<ObjectTemplate> object_template = ObjectTemplate::New(isolate());
+ object_template->SetAccessCheckCallback(AccessCheck);
+
+ Local<v8::Object> object =
+ object_template->NewInstance(context()).ToLocalChecked();
+ Handle<JSArray> i_result =
+ Runtime::GetInternalProperties(i_isolate(), Utils::OpenHandle(*object))
+ .ToHandleChecked();
+ Local<Array> result = Utils::ToLocal(i_result);
+ EXPECT_EQ(0u, result->Length());
+}
+
+} // namespace
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index d2362e6e03..cd62f37083 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -49,6 +49,7 @@ type TaggedIndex extends StrongTagged;
type TaggedZeroPattern extends TaggedIndex;
@abstract
+@doNotGenerateCppClass
extern class HeapObject extends StrongTagged {
map: Map;
}
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 2c4cec927b..89dd4f29e8 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -109,10 +109,10 @@ class TestModuleBuilder {
mod.functions[result].imported = true;
return result;
}
- byte AddException(WasmExceptionSig* sig) {
- mod.exceptions.emplace_back(sig);
+ byte AddException(WasmTagSig* sig) {
+ mod.tags.emplace_back(sig);
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
- return static_cast<byte>(mod.exceptions.size() - 1);
+ return static_cast<byte>(mod.tags.size() - 1);
}
byte AddTable(ValueType type, uint32_t initial_size, bool has_maximum_size,
@@ -2673,12 +2673,10 @@ TEST_F(FunctionBodyDecoderTest, BrTableSubtyping) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
byte supertype1 = builder.AddStruct({F(kWasmI8, true), F(kWasmI16, false)});
byte supertype2 = builder.AddStruct({F(kWasmI8, true)});
byte subtype = builder.AddStruct(
{F(kWasmI8, true), F(kWasmI16, false), F(kWasmI32, true)});
- module = builder.module();
ExpectValidates(
sigs.v_v(),
{WASM_BLOCK_R(
@@ -3762,8 +3760,6 @@ TEST_F(FunctionBodyDecoderTest, RefAsNonNull) {
WASM_FEATURE_SCOPE(simd);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
uint32_t heap_types[] = {
@@ -3804,8 +3800,6 @@ TEST_F(FunctionBodyDecoderTest, RefNull) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
uint32_t type_reprs[] = {
@@ -3834,8 +3828,6 @@ TEST_F(FunctionBodyDecoderTest, RefIsNull) {
sigs.i_i(), {WASM_REF_IS_NULL(WASM_LOCAL_GET(0))}, kAppendEnd,
"ref.is_null[0] expected reference type, found local.get of type i32");
- TestModuleBuilder builder;
- module = builder.module();
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
uint32_t heap_types[] = {
@@ -3910,8 +3902,6 @@ TEST_F(FunctionBodyDecoderTest, GCStruct) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
byte immutable_struct_type_index = builder.AddStruct({F(kWasmI32, false)});
@@ -4045,8 +4035,6 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
byte array_type_index = builder.AddArray(kWasmFuncRef, true);
byte struct_type_index = builder.AddStruct({F(kWasmI32, false)});
@@ -4185,8 +4173,6 @@ TEST_F(FunctionBodyDecoderTest, PackedFields) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
byte array_type_index = builder.AddArray(kWasmI8, true);
byte struct_type_index = builder.AddStruct({F(kWasmI16, true)});
byte field_index = 0;
@@ -4281,8 +4267,6 @@ TEST_F(FunctionBodyDecoderTest, RttCanon) {
WASM_FEATURE_SCOPE(gc);
WASM_FEATURE_SCOPE(eh);
- TestModuleBuilder builder;
- module = builder.module();
uint8_t array_type_index = builder.AddArray(kWasmI32, true);
uint8_t struct_type_index = builder.AddStruct({F(kWasmI64, true)});
@@ -4305,8 +4289,6 @@ TEST_F(FunctionBodyDecoderTest, RttSub) {
WASM_FEATURE_SCOPE(gc);
WASM_FEATURE_SCOPE(gc_experiments);
- TestModuleBuilder builder;
- module = builder.module();
uint8_t array_type_index = builder.AddArray(kWasmI8, true);
uint8_t super_struct_type_index = builder.AddStruct({F(kWasmI16, true)});
uint8_t sub_struct_type_index =
@@ -4396,8 +4378,6 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
HeapType::Representation array_heap =
static_cast<HeapType::Representation>(builder.AddArray(kWasmI8, true));
HeapType::Representation super_struct_heap =
@@ -4492,9 +4472,6 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
-
byte super_struct = builder.AddStruct({F(kWasmI16, true)});
byte sub_struct = builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)});
@@ -4563,9 +4540,6 @@ TEST_F(FunctionBodyDecoderTest, BrOnAbstractType) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
-
ValueType kNonNullableFunc = ValueType::Ref(HeapType::kFunc, kNonNullable);
ExpectValidates(
@@ -4623,8 +4597,6 @@ TEST_F(FunctionBodyDecoderTest, LocalTeeTyping) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- TestModuleBuilder builder;
- module = builder.module();
byte array_type = builder.AddArray(kWasmI8, true);
ValueType types[] = {ValueType::Ref(array_type, kNonNullable)};
@@ -4639,6 +4611,26 @@ TEST_F(FunctionBodyDecoderTest, LocalTeeTyping) {
kAppendEnd, "expected (ref 0), got (ref null 0)");
}
+TEST_F(FunctionBodyDecoderTest, MergeNullableTypes) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+
+ byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
+ ValueType struct_type = optref(struct_type_index);
+ FunctionSig loop_sig(0, 1, &struct_type);
+ byte loop_sig_index = builder.AddSignature(&loop_sig);
+ // Verifies that when a loop consuming a nullable type is entered with a
+ // statically known non-null value on the stack, its {start_merge_} can
+ // consume null values later.
+ // Regression test for crbug.com/1234453.
+ ExpectValidates(sigs.v_v(),
+ {WASM_GC_OP(kExprRttCanon), struct_type_index,
+ WASM_GC_OP(kExprStructNewDefault), struct_type_index,
+ WASM_LOOP_X(loop_sig_index, kExprDrop, kExprRefNull,
+ struct_type_index, kExprBr, 0)});
+}
+
// This tests that num_locals_ in decoder remains consistent, even if we fail
// mid-DecodeLocals().
TEST_F(FunctionBodyDecoderTest, Regress_1154439) {
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 762725cfea..271d6fe1b9 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -1303,28 +1303,28 @@ TEST_F(WasmModuleVerifyTest, InvalidArrayTypeDef) {
}
TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
- static const byte data[] = {SECTION(Exception, ENTRY_COUNT(0))};
+ static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(0u, result.value()->exceptions.size());
+ EXPECT_EQ(0u, result.value()->tags.size());
}
TEST_F(WasmModuleVerifyTest, OneI32Exception) {
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_x(kI32Code)), // sig#0 (i32)
- SECTION(Exception, ENTRY_COUNT(1),
+ SECTION(Tag, ENTRY_COUNT(1),
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.value()->exceptions.size());
+ EXPECT_EQ(1u, result.value()->tags.size());
- const WasmException& e0 = result.value()->exceptions.front();
+ const WasmTag& e0 = result.value()->tags.front();
EXPECT_EQ(1u, e0.sig->parameter_count());
EXPECT_EQ(kWasmI32, e0.sig->GetParam(0));
}
@@ -1334,7 +1334,7 @@ TEST_F(WasmModuleVerifyTest, TwoExceptions) {
SECTION(Type, ENTRY_COUNT(2),
SIG_ENTRY_v_x(kI32Code), // sig#0 (i32)
SIG_ENTRY_v_xx(kF32Code, kI64Code)), // sig#1 (f32, i64)
- SECTION(Exception, ENTRY_COUNT(2),
+ SECTION(Tag, ENTRY_COUNT(2),
EXCEPTION_ENTRY(SIG_INDEX(1)), // except[0] (sig#1)
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[1] (sig#0)
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -1342,19 +1342,19 @@ TEST_F(WasmModuleVerifyTest, TwoExceptions) {
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(2u, result.value()->exceptions.size());
- const WasmException& e0 = result.value()->exceptions.front();
+ EXPECT_EQ(2u, result.value()->tags.size());
+ const WasmTag& e0 = result.value()->tags.front();
EXPECT_EQ(2u, e0.sig->parameter_count());
EXPECT_EQ(kWasmF32, e0.sig->GetParam(0));
EXPECT_EQ(kWasmI64, e0.sig->GetParam(1));
- const WasmException& e1 = result.value()->exceptions.back();
+ const WasmTag& e1 = result.value()->tags.back();
EXPECT_EQ(kWasmI32, e1.sig->GetParam(0));
}
TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_index) {
static const byte data[] = {
TYPE_SECTION_ONE_SIG_VOID_VOID,
- SECTION(Exception, ENTRY_COUNT(1),
+ SECTION(Tag, ENTRY_COUNT(1),
EXCEPTION_ENTRY(
SIG_INDEX(23)))}; // except[0] (sig#23 [out-of-bounds])
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -1368,7 +1368,7 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_index) {
TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) {
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_i_i),
- SECTION(Exception, ENTRY_COUNT(1),
+ SECTION(Tag, ENTRY_COUNT(1),
EXCEPTION_ENTRY(
SIG_INDEX(0)))}; // except[0] (sig#0 [invalid-return-type])
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -1376,13 +1376,13 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) {
// Should fail decoding exception section.
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "exception signature 0 has non-void return");
+ EXPECT_NOT_OK(result, "tag signature 0 has non-void return");
}
TEST_F(WasmModuleVerifyTest, Exception_invalid_attribute) {
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_i_i),
- SECTION(Exception, ENTRY_COUNT(1), 23,
+ SECTION(Tag, ENTRY_COUNT(1), 23,
SIG_INDEX(0))}; // except[0] (sig#0) [invalid-attribute]
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -1392,9 +1392,9 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_attribute) {
EXPECT_NOT_OK(result, "exception attribute 23 not supported");
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
+TEST_F(WasmModuleVerifyTest, TagSectionCorrectPlacement) {
static const byte data[] = {SECTION(Memory, ENTRY_COUNT(0)),
- SECTION(Exception, ENTRY_COUNT(0)),
+ SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Global, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -1403,19 +1403,19 @@ TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
EXPECT_OK(result);
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterGlobal) {
+TEST_F(WasmModuleVerifyTest, TagSectionAfterGlobal) {
static const byte data[] = {SECTION(Global, ENTRY_COUNT(0)),
- SECTION(Exception, ENTRY_COUNT(0))};
+ SECTION(Tag, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result,
- "The Exception section must appear before the Global section");
+ "The Tag section must appear before the Global section");
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeMemory) {
- static const byte data[] = {SECTION(Exception, ENTRY_COUNT(0)),
+TEST_F(WasmModuleVerifyTest, TagSectionBeforeMemory) {
+ static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Memory, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -1424,10 +1424,10 @@ TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeMemory) {
EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterTableBeforeMemory) {
+TEST_F(WasmModuleVerifyTest, TagSectionAfterTableBeforeMemory) {
STATIC_ASSERT(kMemorySectionCode + 1 == kGlobalSectionCode);
static const byte data[] = {SECTION(Table, ENTRY_COUNT(0)),
- SECTION(Exception, ENTRY_COUNT(0)),
+ SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Memory, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
@@ -1436,39 +1436,39 @@ TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterTableBeforeMemory) {
EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
-TEST_F(WasmModuleVerifyTest, ExceptionImport) {
+TEST_F(WasmModuleVerifyTest, TagImport) {
static const byte data[] = {
TYPE_SECTION_ONE_SIG_VOID_VOID,
SECTION(Import, // section header
ENTRY_COUNT(1), // number of imports
ADD_COUNT('m'), // module name
- ADD_COUNT('e', 'x'), // exception name
- kExternalException, // import kind
+ ADD_COUNT('e', 'x'), // tag name
+ kExternalTag, // import kind
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.value()->exceptions.size());
+ EXPECT_EQ(1u, result.value()->tags.size());
EXPECT_EQ(1u, result.value()->import_table.size());
}
TEST_F(WasmModuleVerifyTest, ExceptionExport) {
static const byte data[] = {
TYPE_SECTION_ONE_SIG_VOID_VOID,
- SECTION(Exception, ENTRY_COUNT(1),
+ SECTION(Tag, ENTRY_COUNT(1),
EXCEPTION_ENTRY(SIG_INDEX(0))), // except[0] (sig#0)
SECTION(Export, ENTRY_COUNT(1), // --
NO_NAME, // --
- kExternalException, // --
+ kExternalTag, // --
EXCEPTION_INDEX(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.value()->exceptions.size());
+ EXPECT_EQ(1u, result.value()->tags.size());
EXPECT_EQ(1u, result.value()->export_table.size());
}
diff --git a/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
new file mode 100644
index 0000000000..e5b71d956f
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/trap-handler/trap-handler-simulator.h"
+
+#include "include/v8.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/execution/simulator.h"
+#include "src/trap-handler/trap-handler.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+
+#if !V8_HOST_ARCH_X64 || !V8_TARGET_ARCH_ARM64
+#error "Only include this file on arm64 simulator builds on x64."
+#endif
+
+namespace v8 {
+namespace internal {
+namespace trap_handler {
+
+constexpr uintptr_t kFakePc = 11;
+
+class SimulatorTrapHandlerTest : public TestWithIsolate {
+ public:
+ void SetThreadInWasm() {
+ EXPECT_EQ(0, *thread_in_wasm);
+ *thread_in_wasm = 1;
+ }
+
+ void ResetThreadInWasm() {
+ EXPECT_EQ(1, *thread_in_wasm);
+ *thread_in_wasm = 0;
+ }
+
+ int* thread_in_wasm = trap_handler::GetThreadInWasmThreadLocalAddress();
+};
+
+TEST_F(SimulatorTrapHandlerTest, ProbeMemorySuccess) {
+ int x = 47;
+ EXPECT_EQ(0u, ProbeMemory(reinterpret_cast<uintptr_t>(&x), kFakePc));
+}
+
+TEST_F(SimulatorTrapHandlerTest, ProbeMemoryFail) {
+ constexpr uintptr_t kNullAddress = 0;
+ EXPECT_DEATH_IF_SUPPORTED(ProbeMemory(kNullAddress, kFakePc), "");
+}
+
+TEST_F(SimulatorTrapHandlerTest, ProbeMemoryFailWhileInWasm) {
+ // Test that we still crash if the trap handler is set up and the "thread in
+ // wasm" flag is set, but the PC is not registered as a protected instruction.
+ constexpr bool kUseDefaultHandler = true;
+ CHECK(v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultHandler));
+
+ constexpr uintptr_t kNullAddress = 0;
+ SetThreadInWasm();
+ EXPECT_DEATH_IF_SUPPORTED(ProbeMemory(kNullAddress, kFakePc), "");
+}
+
+TEST_F(SimulatorTrapHandlerTest, ProbeMemoryWithTrapHandled) {
+ constexpr uintptr_t kNullAddress = 0;
+ constexpr uintptr_t kFakeLandingPad = 19;
+
+ constexpr bool kUseDefaultHandler = true;
+ CHECK(v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultHandler));
+
+ ProtectedInstructionData fake_protected_instruction{kFakePc, kFakeLandingPad};
+ int handler_data_index =
+ RegisterHandlerData(0, 128, 1, &fake_protected_instruction);
+
+ SetThreadInWasm();
+ EXPECT_EQ(kFakeLandingPad, ProbeMemory(kNullAddress, kFakePc));
+
+ // Reset everything.
+ ResetThreadInWasm();
+ ReleaseHandlerData(handler_data_index);
+ RemoveTrapHandler();
+}
+
+TEST_F(SimulatorTrapHandlerTest, ProbeMemoryWithLandingPad) {
+ EXPECT_EQ(0u, GetRecoveredTrapCount());
+
+ // Test that the trap handler can recover a memory access violation in
+ // wasm code (we fake the wasm code and the access violation).
+ std::unique_ptr<TestingAssemblerBuffer> buffer = AllocateAssemblerBuffer();
+ constexpr Register scratch = x0;
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ // Generate an illegal memory access.
+ masm.Mov(scratch, 0);
+ uint32_t crash_offset = masm.pc_offset();
+ masm.Str(scratch, MemOperand(scratch, 0)); // nullptr access
+ uint32_t recovery_offset = masm.pc_offset();
+ // Return.
+ masm.Ret();
+
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+
+ constexpr bool kUseDefaultHandler = true;
+ CHECK(v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultHandler));
+
+ ProtectedInstructionData protected_instruction{crash_offset, recovery_offset};
+ int handler_data_index =
+ RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
+ desc.instr_size, 1, &protected_instruction);
+
+ // Now execute the code.
+ buffer->MakeExecutable();
+ GeneratedCode<void> code = GeneratedCode<void>::FromAddress(
+ i_isolate(), reinterpret_cast<Address>(desc.buffer));
+
+ SetThreadInWasm();
+ code.Call();
+ ResetThreadInWasm();
+
+ ReleaseHandlerData(handler_data_index);
+ RemoveTrapHandler();
+
+ EXPECT_EQ(1u, GetRecoveredTrapCount());
+}
+
+} // namespace trap_handler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
index 38b9d85a51..25766ee78d 100644
--- a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
+++ b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -29,7 +29,6 @@
//
// Google C++ Testing and Mocking Framework definitions useful in production code.
-// GOOGLETEST_CM0003 DO NOT DELETE
#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
#define GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
diff --git a/deps/v8/third_party/zlib/google/zip_internal.cc b/deps/v8/third_party/zlib/google/zip_internal.cc
index 653a2ab388..cea1e88d03 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.cc
+++ b/deps/v8/third_party/zlib/google/zip_internal.cc
@@ -8,9 +8,13 @@
#include <string.h>
#include <algorithm>
+#include <unordered_set>
+#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/no_destructor.h"
#include "base/notreached.h"
+#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#if defined(USE_SYSTEM_MINIZIP)
@@ -36,9 +40,9 @@ typedef struct {
} WIN32FILE_IOWIN;
// This function is derived from third_party/minizip/iowin32.c.
-// Its only difference is that it treats the char* as UTF8 and
+// Its only difference is that it treats the filename as UTF-8 and
// uses the Unicode version of CreateFile.
-void* ZipOpenFunc(void *opaque, const char* filename, int mode) {
+void* ZipOpenFunc(void* opaque, const void* filename, int mode) {
DWORD desired_access = 0, creation_disposition = 0;
DWORD share_mode = 0, flags_and_attributes = 0;
HANDLE file = 0;
@@ -56,10 +60,11 @@ void* ZipOpenFunc(void *opaque, const char* filename, int mode) {
creation_disposition = CREATE_ALWAYS;
}
- std::wstring filenamew = base::UTF8ToWide(filename);
- if ((filename != NULL) && (desired_access != 0)) {
- file = CreateFile(filenamew.c_str(), desired_access, share_mode, NULL,
- creation_disposition, flags_and_attributes, NULL);
+ if (filename != nullptr && desired_access != 0) {
+ file = CreateFileW(
+ base::UTF8ToWide(static_cast<const char*>(filename)).c_str(),
+ desired_access, share_mode, nullptr, creation_disposition,
+ flags_and_attributes, nullptr);
}
if (file == INVALID_HANDLE_VALUE)
@@ -83,7 +88,7 @@ void* ZipOpenFunc(void *opaque, const char* filename, int mode) {
// Callback function for zlib that opens a file stream from a file descriptor.
// Since we do not own the file descriptor, dup it so that we can fdopen/fclose
// a file stream.
-void* FdOpenFileFunc(void* opaque, const char* filename, int mode) {
+void* FdOpenFileFunc(void* opaque, const void* filename, int mode) {
FILE* file = NULL;
const char* mode_fopen = NULL;
@@ -105,15 +110,15 @@ void* FdOpenFileFunc(void* opaque, const char* filename, int mode) {
int FdCloseFileFunc(void* opaque, void* stream) {
fclose(static_cast<FILE*>(stream));
- free(opaque); // malloc'ed in FillFdOpenFileFunc()
+ free(opaque); // malloc'ed in FillFdOpenFileFunc()
return 0;
}
// Fills |pzlib_filecunc_def| appropriately to handle the zip file
// referred to by |fd|.
-void FillFdOpenFileFunc(zlib_filefunc_def* pzlib_filefunc_def, int fd) {
- fill_fopen_filefunc(pzlib_filefunc_def);
- pzlib_filefunc_def->zopen_file = FdOpenFileFunc;
+void FillFdOpenFileFunc(zlib_filefunc64_def* pzlib_filefunc_def, int fd) {
+ fill_fopen64_filefunc(pzlib_filefunc_def);
+ pzlib_filefunc_def->zopen64_file = FdOpenFileFunc;
pzlib_filefunc_def->zclose_file = FdCloseFileFunc;
int* ptr_fd = static_cast<int*>(malloc(sizeof(fd)));
*ptr_fd = fd;
@@ -124,7 +129,7 @@ void FillFdOpenFileFunc(zlib_filefunc_def* pzlib_filefunc_def, int fd) {
#if defined(OS_WIN)
// Callback function for zlib that opens a file stream from a Windows handle.
// Does not take ownership of the handle.
-void* HandleOpenFileFunc(void* opaque, const char* filename, int mode) {
+void* HandleOpenFileFunc(void* opaque, const void* /*filename*/, int mode) {
WIN32FILE_IOWIN file_ret;
file_ret.hf = static_cast<HANDLE>(opaque);
file_ret.error = 0;
@@ -138,7 +143,7 @@ void* HandleOpenFileFunc(void* opaque, const char* filename, int mode) {
}
int HandleCloseFileFunc(void* opaque, void* stream) {
- free(stream); // malloc'ed in HandleOpenFileFunc()
+ free(stream); // malloc'ed in HandleOpenFileFunc()
return 0;
}
#endif
@@ -148,8 +153,8 @@ int HandleCloseFileFunc(void* opaque, void* stream) {
// expect their opaque parameters refer to this struct.
struct ZipBuffer {
const char* data; // weak
- size_t length;
- size_t offset;
+ ZPOS64_T length;
+ ZPOS64_T offset;
};
// Opens the specified file. When this function returns a non-NULL pointer, zlib
@@ -158,7 +163,7 @@ struct ZipBuffer {
// given opaque parameter and returns it because this parameter stores all
// information needed for uncompressing data. (This function does not support
// writing compressed data and it returns NULL for this case.)
-void* OpenZipBuffer(void* opaque, const char* /*filename*/, int mode) {
+void* OpenZipBuffer(void* opaque, const void* /*filename*/, int mode) {
if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER) != ZLIB_FILEFUNC_MODE_READ) {
NOTREACHED();
return NULL;
@@ -175,10 +180,11 @@ void* OpenZipBuffer(void* opaque, const char* /*filename*/, int mode) {
uLong ReadZipBuffer(void* opaque, void* /*stream*/, void* buf, uLong size) {
ZipBuffer* buffer = static_cast<ZipBuffer*>(opaque);
DCHECK_LE(buffer->offset, buffer->length);
- size_t remaining_bytes = buffer->length - buffer->offset;
+ ZPOS64_T remaining_bytes = buffer->length - buffer->offset;
if (!buffer || !buffer->data || !remaining_bytes)
return 0;
- size = std::min(size, static_cast<uLong>(remaining_bytes));
+ if (size > remaining_bytes)
+ size = remaining_bytes;
memcpy(buf, &buffer->data[buffer->offset], size);
buffer->offset += size;
return size;
@@ -195,21 +201,23 @@ uLong WriteZipBuffer(void* /*opaque*/,
}
// Returns the offset from the beginning of the data.
-long GetOffsetOfZipBuffer(void* opaque, void* /*stream*/) {
+ZPOS64_T GetOffsetOfZipBuffer(void* opaque, void* /*stream*/) {
ZipBuffer* buffer = static_cast<ZipBuffer*>(opaque);
if (!buffer)
return -1;
- return static_cast<long>(buffer->offset);
+ return buffer->offset;
}
// Moves the current offset to the specified position.
-long SeekZipBuffer(void* opaque, void* /*stream*/, uLong offset, int origin) {
+long SeekZipBuffer(void* opaque,
+ void* /*stream*/,
+ ZPOS64_T offset,
+ int origin) {
ZipBuffer* buffer = static_cast<ZipBuffer*>(opaque);
if (!buffer)
return -1;
if (origin == ZLIB_FILEFUNC_SEEK_CUR) {
- buffer->offset = std::min(buffer->offset + static_cast<size_t>(offset),
- buffer->length);
+ buffer->offset = std::min(buffer->offset + offset, buffer->length);
return 0;
}
if (origin == ZLIB_FILEFUNC_SEEK_END) {
@@ -217,7 +225,7 @@ long SeekZipBuffer(void* opaque, void* /*stream*/, uLong offset, int origin) {
return 0;
}
if (origin == ZLIB_FILEFUNC_SEEK_SET) {
- buffer->offset = std::min(buffer->length, static_cast<size_t>(offset));
+ buffer->offset = std::min(buffer->length, offset);
return 0;
}
NOTREACHED();
@@ -268,33 +276,33 @@ namespace zip {
namespace internal {
unzFile OpenForUnzipping(const std::string& file_name_utf8) {
- zlib_filefunc_def* zip_func_ptrs = NULL;
+ zlib_filefunc64_def* zip_func_ptrs = nullptr;
#if defined(OS_WIN)
- zlib_filefunc_def zip_funcs;
- fill_win32_filefunc(&zip_funcs);
- zip_funcs.zopen_file = ZipOpenFunc;
+ zlib_filefunc64_def zip_funcs;
+ fill_win32_filefunc64(&zip_funcs);
+ zip_funcs.zopen64_file = ZipOpenFunc;
zip_func_ptrs = &zip_funcs;
#endif
- return unzOpen2(file_name_utf8.c_str(), zip_func_ptrs);
+ return unzOpen2_64(file_name_utf8.c_str(), zip_func_ptrs);
}
#if defined(OS_POSIX)
unzFile OpenFdForUnzipping(int zip_fd) {
- zlib_filefunc_def zip_funcs;
+ zlib_filefunc64_def zip_funcs;
FillFdOpenFileFunc(&zip_funcs, zip_fd);
// Passing dummy "fd" filename to zlib.
- return unzOpen2("fd", &zip_funcs);
+ return unzOpen2_64("fd", &zip_funcs);
}
#endif
#if defined(OS_WIN)
unzFile OpenHandleForUnzipping(HANDLE zip_handle) {
- zlib_filefunc_def zip_funcs;
- fill_win32_filefunc(&zip_funcs);
- zip_funcs.zopen_file = HandleOpenFileFunc;
+ zlib_filefunc64_def zip_funcs;
+ fill_win32_filefunc64(&zip_funcs);
+ zip_funcs.zopen64_file = HandleOpenFileFunc;
zip_funcs.zclose_file = HandleCloseFileFunc;
zip_funcs.opaque = zip_handle;
- return unzOpen2("fd", &zip_funcs);
+ return unzOpen2_64("fd", &zip_funcs);
}
#endif
@@ -310,72 +318,152 @@ unzFile PrepareMemoryForUnzipping(const std::string& data) {
buffer->length = data.length();
buffer->offset = 0;
- zlib_filefunc_def zip_functions;
- zip_functions.zopen_file = OpenZipBuffer;
+ zlib_filefunc64_def zip_functions;
+ zip_functions.zopen64_file = OpenZipBuffer;
zip_functions.zread_file = ReadZipBuffer;
zip_functions.zwrite_file = WriteZipBuffer;
- zip_functions.ztell_file = GetOffsetOfZipBuffer;
- zip_functions.zseek_file = SeekZipBuffer;
+ zip_functions.ztell64_file = GetOffsetOfZipBuffer;
+ zip_functions.zseek64_file = SeekZipBuffer;
zip_functions.zclose_file = CloseZipBuffer;
zip_functions.zerror_file = GetErrorOfZipBuffer;
- zip_functions.opaque = static_cast<void*>(buffer);
- return unzOpen2(NULL, &zip_functions);
+ zip_functions.opaque = buffer;
+ return unzOpen2_64(nullptr, &zip_functions);
}
zipFile OpenForZipping(const std::string& file_name_utf8, int append_flag) {
- zlib_filefunc_def* zip_func_ptrs = NULL;
+ zlib_filefunc64_def* zip_func_ptrs = nullptr;
#if defined(OS_WIN)
- zlib_filefunc_def zip_funcs;
- fill_win32_filefunc(&zip_funcs);
- zip_funcs.zopen_file = ZipOpenFunc;
+ zlib_filefunc64_def zip_funcs;
+ fill_win32_filefunc64(&zip_funcs);
+ zip_funcs.zopen64_file = ZipOpenFunc;
zip_func_ptrs = &zip_funcs;
#endif
- return zipOpen2(file_name_utf8.c_str(),
- append_flag,
- NULL, // global comment
- zip_func_ptrs);
+ return zipOpen2_64(file_name_utf8.c_str(), append_flag, nullptr,
+ zip_func_ptrs);
}
#if defined(OS_POSIX)
zipFile OpenFdForZipping(int zip_fd, int append_flag) {
- zlib_filefunc_def zip_funcs;
+ zlib_filefunc64_def zip_funcs;
FillFdOpenFileFunc(&zip_funcs, zip_fd);
// Passing dummy "fd" filename to zlib.
- return zipOpen2("fd", append_flag, NULL, &zip_funcs);
+ return zipOpen2_64("fd", append_flag, nullptr, &zip_funcs);
}
#endif
bool ZipOpenNewFileInZip(zipFile zip_file,
const std::string& str_path,
- base::Time last_modified_time) {
+ base::Time last_modified_time,
+ Compression compression) {
// Section 4.4.4 http://www.pkware.com/documents/casestudies/APPNOTE.TXT
// Setting the Language encoding flag so the file is told to be in utf-8.
const uLong LANGUAGE_ENCODING_FLAG = 0x1 << 11;
- zip_fileinfo file_info = TimeToZipFileInfo(last_modified_time);
- if (ZIP_OK != zipOpenNewFileInZip4(zip_file, // file
- str_path.c_str(), // filename
- &file_info, // zip_fileinfo
- NULL, // extrafield_local,
- 0u, // size_extrafield_local
- NULL, // extrafield_global
- 0u, // size_extrafield_global
- NULL, // comment
- Z_DEFLATED, // method
- Z_DEFAULT_COMPRESSION, // level
- 0, // raw
- -MAX_WBITS, // windowBits
- DEF_MEM_LEVEL, // memLevel
- Z_DEFAULT_STRATEGY, // strategy
- NULL, // password
- 0, // crcForCrypting
- 0, // versionMadeBy
- LANGUAGE_ENCODING_FLAG)) { // flagBase
- DLOG(ERROR) << "Could not open zip file entry " << str_path;
+ const zip_fileinfo file_info = TimeToZipFileInfo(last_modified_time);
+ const int err = zipOpenNewFileInZip4_64(
+ /*file=*/zip_file,
+ /*filename=*/str_path.c_str(),
+ /*zip_fileinfo=*/&file_info,
+ /*extrafield_local=*/nullptr,
+ /*size_extrafield_local=*/0u,
+ /*extrafield_global=*/nullptr,
+ /*size_extrafield_global=*/0u,
+ /*comment=*/nullptr,
+ /*method=*/compression,
+ /*level=*/Z_DEFAULT_COMPRESSION,
+ /*raw=*/0,
+ /*windowBits=*/-MAX_WBITS,
+ /*memLevel=*/DEF_MEM_LEVEL,
+ /*strategy=*/Z_DEFAULT_STRATEGY,
+ /*password=*/nullptr,
+ /*crcForCrypting=*/0,
+ /*versionMadeBy=*/0,
+ /*flagBase=*/LANGUAGE_ENCODING_FLAG,
+ /*zip64=*/1);
+
+ if (err != ZIP_OK) {
+ DLOG(ERROR) << "Cannot open ZIP file entry '" << str_path
+ << "': zipOpenNewFileInZip4_64 returned " << err;
return false;
}
+
return true;
}
+Compression GetCompressionMethod(const base::FilePath& path) {
+ // Get the filename extension in lower case.
+ const base::FilePath::StringType ext =
+ base::ToLowerASCII(path.FinalExtension());
+
+ if (ext.empty())
+ return kDeflated;
+
+ using StringPiece = base::FilePath::StringPieceType;
+
+ // Skip the leading dot.
+ StringPiece ext_without_dot = ext;
+ DCHECK_EQ(ext_without_dot.front(), FILE_PATH_LITERAL('.'));
+ ext_without_dot.remove_prefix(1);
+
+ // Well known filename extensions of files that a likely to be already
+ // compressed. The extensions are in lower case without the leading dot.
+ static const base::NoDestructor<
+ std::unordered_set<StringPiece, base::StringPieceHashImpl<StringPiece>>>
+ exts(std::initializer_list<StringPiece>{
+ FILE_PATH_LITERAL("3g2"), //
+ FILE_PATH_LITERAL("3gp"), //
+ FILE_PATH_LITERAL("7z"), //
+ FILE_PATH_LITERAL("7zip"), //
+ FILE_PATH_LITERAL("aac"), //
+ FILE_PATH_LITERAL("avi"), //
+ FILE_PATH_LITERAL("bz"), //
+ FILE_PATH_LITERAL("bz2"), //
+ FILE_PATH_LITERAL("crx"), //
+ FILE_PATH_LITERAL("gif"), //
+ FILE_PATH_LITERAL("gz"), //
+ FILE_PATH_LITERAL("jar"), //
+ FILE_PATH_LITERAL("jpeg"), //
+ FILE_PATH_LITERAL("jpg"), //
+ FILE_PATH_LITERAL("lz"), //
+ FILE_PATH_LITERAL("m2v"), //
+ FILE_PATH_LITERAL("m4p"), //
+ FILE_PATH_LITERAL("m4v"), //
+ FILE_PATH_LITERAL("mng"), //
+ FILE_PATH_LITERAL("mov"), //
+ FILE_PATH_LITERAL("mp2"), //
+ FILE_PATH_LITERAL("mp3"), //
+ FILE_PATH_LITERAL("mp4"), //
+ FILE_PATH_LITERAL("mpe"), //
+ FILE_PATH_LITERAL("mpeg"), //
+ FILE_PATH_LITERAL("mpg"), //
+ FILE_PATH_LITERAL("mpv"), //
+ FILE_PATH_LITERAL("ogg"), //
+ FILE_PATH_LITERAL("ogv"), //
+ FILE_PATH_LITERAL("png"), //
+ FILE_PATH_LITERAL("qt"), //
+ FILE_PATH_LITERAL("rar"), //
+ FILE_PATH_LITERAL("taz"), //
+ FILE_PATH_LITERAL("tb2"), //
+ FILE_PATH_LITERAL("tbz"), //
+ FILE_PATH_LITERAL("tbz2"), //
+ FILE_PATH_LITERAL("tgz"), //
+ FILE_PATH_LITERAL("tlz"), //
+ FILE_PATH_LITERAL("tz"), //
+ FILE_PATH_LITERAL("tz2"), //
+ FILE_PATH_LITERAL("vob"), //
+ FILE_PATH_LITERAL("webm"), //
+ FILE_PATH_LITERAL("wma"), //
+ FILE_PATH_LITERAL("wmv"), //
+ FILE_PATH_LITERAL("xz"), //
+ FILE_PATH_LITERAL("z"), //
+ FILE_PATH_LITERAL("zip"), //
+ });
+
+ if (exts->count(ext_without_dot))
+ return kStored;
+
+ return kDeflated;
+}
+
} // namespace internal
} // namespace zip
diff --git a/deps/v8/third_party/zlib/google/zip_internal.h b/deps/v8/third_party/zlib/google/zip_internal.h
index 49fb902a74..ef5b5d0906 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.h
+++ b/deps/v8/third_party/zlib/google/zip_internal.h
@@ -60,10 +60,24 @@ zipFile OpenForZipping(const std::string& file_name_utf8, int append_flag);
zipFile OpenFdForZipping(int zip_fd, int append_flag);
#endif
-// Wrapper around zipOpenNewFileInZip4 which passes most common options.
+// Compression methods.
+enum Compression {
+ kStored = 0, // Stored (no compression)
+ kDeflated = Z_DEFLATED, // Deflated
+};
+
+// Adds a file (or directory) entry to the ZIP archive.
bool ZipOpenNewFileInZip(zipFile zip_file,
const std::string& str_path,
- base::Time last_modified_time);
+ base::Time last_modified_time,
+ Compression compression);
+
+// Selects the best compression method for the given file. The heuristic is
+// based on the filename extension. By default, the compression method is
+// kDeflated. But if the given path has an extension indicating a well known
+// file format which is likely to be already compressed (eg ZIP, RAR, JPG,
+// PNG...) then the compression method is simply kStored.
+Compression GetCompressionMethod(const base::FilePath& path);
const int kZipMaxPath = 256;
const int kZipBufSize = 8192;
diff --git a/deps/v8/third_party/zlib/google/zip_unittest.cc b/deps/v8/third_party/zlib/google/zip_unittest.cc
index 03d389ee2b..876f3eb181 100644
--- a/deps/v8/third_party/zlib/google/zip_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_unittest.cc
@@ -26,8 +26,12 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
#include "third_party/zlib/google/zip.h"
+#include "third_party/zlib/google/zip_internal.h"
#include "third_party/zlib/google/zip_reader.h"
+// Convenience macro to create a file path from a string literal.
+#define FP(path) base::FilePath(FILE_PATH_LITERAL(path))
+
namespace {
bool CreateFile(const std::string& content,
@@ -58,7 +62,7 @@ class VirtualFileSystem : public zip::FileAccessor {
VirtualFileSystem() {
base::FilePath test_dir;
- base::FilePath foo_txt_path = test_dir.Append(FILE_PATH_LITERAL("foo.txt"));
+ base::FilePath foo_txt_path = test_dir.AppendASCII("foo.txt");
base::FilePath file_path;
base::File file;
@@ -66,15 +70,13 @@ class VirtualFileSystem : public zip::FileAccessor {
DCHECK(success);
files_[foo_txt_path] = std::move(file);
- base::FilePath bar_dir = test_dir.Append(FILE_PATH_LITERAL("bar"));
- base::FilePath bar1_txt_path =
- bar_dir.Append(FILE_PATH_LITERAL("bar1.txt"));
+ base::FilePath bar_dir = test_dir.AppendASCII("bar");
+ base::FilePath bar1_txt_path = bar_dir.AppendASCII("bar1.txt");
success = CreateFile(kBar1Content, &file_path, &file);
DCHECK(success);
files_[bar1_txt_path] = std::move(file);
- base::FilePath bar2_txt_path =
- bar_dir.Append(FILE_PATH_LITERAL("bar2.txt"));
+ base::FilePath bar2_txt_path = bar_dir.AppendASCII("bar2.txt");
success = CreateFile(kBar2Content, &file_path, &file);
DCHECK(success);
files_[bar2_txt_path] = std::move(file);
@@ -172,22 +174,20 @@ class ZipTest : public PlatformTest {
test_dir_ = temp_dir_.GetPath();
base::FilePath zip_path(test_dir_);
- zip_contents_.insert(zip_path.Append(FILE_PATH_LITERAL("foo.txt")));
- zip_path = zip_path.Append(FILE_PATH_LITERAL("foo"));
+ zip_contents_.insert(zip_path.AppendASCII("foo.txt"));
+ zip_path = zip_path.AppendASCII("foo");
zip_contents_.insert(zip_path);
- zip_contents_.insert(zip_path.Append(FILE_PATH_LITERAL("bar.txt")));
- zip_path = zip_path.Append(FILE_PATH_LITERAL("bar"));
+ zip_contents_.insert(zip_path.AppendASCII("bar.txt"));
+ zip_path = zip_path.AppendASCII("bar");
zip_contents_.insert(zip_path);
- zip_contents_.insert(zip_path.Append(FILE_PATH_LITERAL("baz.txt")));
- zip_contents_.insert(zip_path.Append(FILE_PATH_LITERAL("quux.txt")));
- zip_contents_.insert(zip_path.Append(FILE_PATH_LITERAL(".hidden")));
+ zip_contents_.insert(zip_path.AppendASCII("baz.txt"));
+ zip_contents_.insert(zip_path.AppendASCII("quux.txt"));
+ zip_contents_.insert(zip_path.AppendASCII(".hidden"));
// Include a subset of files in |zip_file_list_| to test ZipFiles().
- zip_file_list_.push_back(base::FilePath(FILE_PATH_LITERAL("foo.txt")));
- zip_file_list_.push_back(
- base::FilePath(FILE_PATH_LITERAL("foo/bar/quux.txt")));
- zip_file_list_.push_back(
- base::FilePath(FILE_PATH_LITERAL("foo/bar/.hidden")));
+ zip_file_list_.push_back(FP("foo.txt"));
+ zip_file_list_.push_back(FP("foo/bar/quux.txt"));
+ zip_file_list_.push_back(FP("foo/bar/.hidden"));
}
virtual void TearDown() { PlatformTest::TearDown(); }
@@ -224,7 +224,7 @@ class ZipTest : public PlatformTest {
base::FileEnumerator::FILES | base::FileEnumerator::DIRECTORIES);
base::FilePath unzipped_entry_path = files.Next();
size_t count = 0;
- while (!unzipped_entry_path.value().empty()) {
+ while (!unzipped_entry_path.empty()) {
EXPECT_EQ(zip_contents_.count(unzipped_entry_path), 1U)
<< "Couldn't find " << unzipped_entry_path.value();
count++;
@@ -232,28 +232,15 @@ class ZipTest : public PlatformTest {
if (base::PathExists(unzipped_entry_path) &&
!base::DirectoryExists(unzipped_entry_path)) {
// It's a file, check its contents are what we zipped.
- // TODO(774156): figure out why the commented out EXPECT_TRUE below
- // fails on the build bots (but not on the try-bots).
base::FilePath relative_path;
- bool append_relative_path_success =
- test_dir_.AppendRelativePath(unzipped_entry_path, &relative_path);
- if (!append_relative_path_success) {
- LOG(ERROR) << "Append relative path failed, params: " << test_dir_
- << " and " << unzipped_entry_path;
- }
+ ASSERT_TRUE(
+ test_dir_.AppendRelativePath(unzipped_entry_path, &relative_path))
+ << "Cannot append relative path failed, params: '" << test_dir_
+ << "' and '" << unzipped_entry_path << "'";
base::FilePath original_path = original_dir.Append(relative_path);
- const bool equal =
- base::ContentsEqual(original_path, unzipped_entry_path);
- if (equal) {
- LOG(INFO) << "Original and unzipped file '" << relative_path
- << "' are equal";
- } else {
- LOG(ERROR) << "Original and unzipped file '" << relative_path
- << "' are different";
- }
- // EXPECT_TRUE(base::ContentsEqual(original_path, unzipped_entry_path))
- // << "Contents differ between original " << original_path.value()
- // << " and unzipped file " << unzipped_entry_path.value();
+ EXPECT_TRUE(base::ContentsEqual(original_path, unzipped_entry_path))
+ << "Original file '" << original_path << "' and unzipped file '"
+ << unzipped_entry_path << "' have different contents";
}
unzipped_entry_path = files.Next();
}
@@ -575,17 +562,17 @@ TEST_F(ZipTest, ZipWithFileAccessor) {
ASSERT_TRUE(scoped_temp_dir.CreateUniqueTempDir());
const base::FilePath& temp_dir = scoped_temp_dir.GetPath();
ASSERT_TRUE(zip::Unzip(zip_file, temp_dir));
- base::FilePath bar_dir = temp_dir.Append(FILE_PATH_LITERAL("bar"));
+ base::FilePath bar_dir = temp_dir.AppendASCII("bar");
EXPECT_TRUE(base::DirectoryExists(bar_dir));
std::string file_content;
- EXPECT_TRUE(base::ReadFileToString(
- temp_dir.Append(FILE_PATH_LITERAL("foo.txt")), &file_content));
+ EXPECT_TRUE(
+ base::ReadFileToString(temp_dir.AppendASCII("foo.txt"), &file_content));
EXPECT_EQ(VirtualFileSystem::kFooContent, file_content);
- EXPECT_TRUE(base::ReadFileToString(
- bar_dir.Append(FILE_PATH_LITERAL("bar1.txt")), &file_content));
+ EXPECT_TRUE(
+ base::ReadFileToString(bar_dir.AppendASCII("bar1.txt"), &file_content));
EXPECT_EQ(VirtualFileSystem::kBar1Content, file_content);
- EXPECT_TRUE(base::ReadFileToString(
- bar_dir.Append(FILE_PATH_LITERAL("bar2.txt")), &file_content));
+ EXPECT_TRUE(
+ base::ReadFileToString(bar_dir.AppendASCII("bar2.txt"), &file_content));
EXPECT_EQ(VirtualFileSystem::kBar2Content, file_content);
}
@@ -710,4 +697,121 @@ TEST_F(ZipTest, ZipCancel) {
}
}
+// Tests zip::internal::GetCompressionMethod()
+TEST_F(ZipTest, GetCompressionMethod) {
+ using zip::internal::GetCompressionMethod;
+ using zip::internal::kDeflated;
+ using zip::internal::kStored;
+
+ EXPECT_EQ(GetCompressionMethod(FP("")), kDeflated);
+ EXPECT_EQ(GetCompressionMethod(FP("NoExtension")), kDeflated);
+ EXPECT_EQ(GetCompressionMethod(FP("Folder.zip").Append(FP("NoExtension"))),
+ kDeflated);
+ EXPECT_EQ(GetCompressionMethod(FP("Name.txt")), kDeflated);
+ EXPECT_EQ(GetCompressionMethod(FP("Name.zip")), kStored);
+ EXPECT_EQ(GetCompressionMethod(FP("Name....zip")), kStored);
+ EXPECT_EQ(GetCompressionMethod(FP("Name.zip")), kStored);
+ EXPECT_EQ(GetCompressionMethod(FP("NAME.ZIP")), kStored);
+ EXPECT_EQ(GetCompressionMethod(FP("Name.gz")), kStored);
+ EXPECT_EQ(GetCompressionMethod(FP("Name.tar.gz")), kStored);
+ EXPECT_EQ(GetCompressionMethod(FP("Name.tar")), kDeflated);
+
+ // This one is controversial.
+ EXPECT_EQ(GetCompressionMethod(FP(".zip")), kStored);
+}
+
+// Tests that files put inside a ZIP are effectively compressed.
+TEST_F(ZipTest, Compressed) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ const base::FilePath src_dir = temp_dir.GetPath().AppendASCII("input");
+ EXPECT_TRUE(base::CreateDirectory(src_dir));
+
+ // Create some dummy source files.
+ for (const base::StringPiece s : {"foo", "bar.txt", ".hidden"}) {
+ base::File f(src_dir.AppendASCII(s),
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(f.SetLength(5000));
+ }
+
+ // Zip the source files.
+ const base::FilePath dest_file = temp_dir.GetPath().AppendASCII("dest.zip");
+ EXPECT_TRUE(zip::Zip({.src_dir = src_dir,
+ .dest_file = dest_file,
+ .include_hidden_files = true}));
+
+ // Since the source files compress well, the destination ZIP file should be
+ // smaller than the source files.
+ int64_t dest_file_size;
+ ASSERT_TRUE(base::GetFileSize(dest_file, &dest_file_size));
+ EXPECT_GT(dest_file_size, 300);
+ EXPECT_LT(dest_file_size, 1000);
+}
+
+// Tests that a ZIP put inside a ZIP is simply stored instead of being
+// compressed.
+TEST_F(ZipTest, NestedZip) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ const base::FilePath src_dir = temp_dir.GetPath().AppendASCII("input");
+ EXPECT_TRUE(base::CreateDirectory(src_dir));
+
+ // Create a dummy ZIP file. This is not a valid ZIP file, but for the purpose
+ // of this test, it doesn't really matter.
+ const int64_t src_size = 5000;
+
+ {
+ base::File f(src_dir.AppendASCII("src.zip"),
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(f.SetLength(src_size));
+ }
+
+ // Zip the dummy ZIP file.
+ const base::FilePath dest_file = temp_dir.GetPath().AppendASCII("dest.zip");
+ EXPECT_TRUE(zip::Zip({.src_dir = src_dir, .dest_file = dest_file}));
+
+ // Since the dummy source (inner) ZIP file should simply be stored in the
+ // destination (outer) ZIP file, the destination file should be bigger than
+ // the source file, but not much bigger.
+ int64_t dest_file_size;
+ ASSERT_TRUE(base::GetFileSize(dest_file, &dest_file_size));
+ EXPECT_GT(dest_file_size, src_size + 100);
+ EXPECT_LT(dest_file_size, src_size + 300);
+}
+
+// Tests that there is no 2GB or 4GB limits. Tests that big files can be zipped
+// (crbug.com/1207737) and that big ZIP files can be created
+// (crbug.com/1221447).
+TEST_F(ZipTest, DISABLED_BigFile) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ const base::FilePath src_dir = temp_dir.GetPath().AppendASCII("input");
+ EXPECT_TRUE(base::CreateDirectory(src_dir));
+
+ // Create a big dummy ZIP file. This is not a valid ZIP file, but for the
+ // purpose of this test, it doesn't really matter.
+ const int64_t src_size = 5'000'000'000;
+
+ {
+ base::File f(src_dir.AppendASCII("src.zip"),
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ ASSERT_TRUE(f.SetLength(src_size));
+ }
+
+ // Zip the dummy ZIP file.
+ const base::FilePath dest_file = temp_dir.GetPath().AppendASCII("dest.zip");
+ EXPECT_TRUE(zip::Zip({.src_dir = src_dir, .dest_file = dest_file}));
+
+ // Since the dummy source (inner) ZIP file should simply be stored in the
+ // destination (outer) ZIP file, the destination file should be bigger than
+ // the source file, but not much bigger.
+ int64_t dest_file_size;
+ ASSERT_TRUE(base::GetFileSize(dest_file, &dest_file_size));
+ EXPECT_GT(dest_file_size, src_size + 100);
+ EXPECT_LT(dest_file_size, src_size + 300);
+}
+
} // namespace
diff --git a/deps/v8/third_party/zlib/google/zip_writer.cc b/deps/v8/third_party/zlib/google/zip_writer.cc
index 90b56eda5b..3e2345a46f 100644
--- a/deps/v8/third_party/zlib/google/zip_writer.cc
+++ b/deps/v8/third_party/zlib/google/zip_writer.cc
@@ -74,13 +74,21 @@ bool ZipWriter::OpenNewFileEntry(const base::FilePath& path,
bool is_directory,
base::Time last_modified) {
std::string str_path = path.AsUTF8Unsafe();
+
#if defined(OS_WIN)
base::ReplaceSubstringsAfterOffset(&str_path, 0u, "\\", "/");
#endif
- if (is_directory)
+
+ Compression compression = kDeflated;
+
+ if (is_directory) {
str_path += "/";
+ } else {
+ compression = GetCompressionMethod(path);
+ }
- return zip::internal::ZipOpenNewFileInZip(zip_file_, str_path, last_modified);
+ return zip::internal::ZipOpenNewFileInZip(zip_file_, str_path, last_modified,
+ compression);
}
bool ZipWriter::CloseNewFileEntry() {
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index b85bbc0590..de6013d5f8 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -509,7 +509,7 @@ class AddInfoVisitor : public TqObjectVisitor {
// On JSObject instances, this value is the start of in-object properties.
// The constructor function index option is only for primitives.
auto start_offset =
- map.GetInObjectPropertiesStartOrConstructorFunctionIndexValue(
+ map.GetInobjectPropertiesStartOrConstructorFunctionIndexValue(
accessor_);
// The total size of the object in memory. This may include over-allocated
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index aa4afa3da9..3d52b70cdf 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -156,6 +156,7 @@ v8_enable_backtrace = true
v8_enable_disassembler = true
v8_enable_object_print = true
v8_enable_verify_heap = true
+dcheck_always_on = false
""".replace("{GOMA}", USE_GOMA)
DEBUG_ARGS_TEMPLATE = """\
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 468490d819..7b3dcedc92 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -259,7 +259,7 @@ extras_accessors = [
'JSTypedArray, external_pointer, uintptr_t, kExternalPointerOffset',
'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_size_in_words, char, kInstanceSizeInWordsOffset',
- 'Map, inobject_properties_start_or_constructor_function_index, char, kInObjectPropertiesStartOrConstructorFunctionIndexOffset',
+ 'Map, inobject_properties_start_or_constructor_function_index, char, kInobjectPropertiesStartOrConstructorFunctionIndexOffset',
'Map, instance_type, uint16_t, kInstanceTypeOffset',
'Map, bit_field, char, kBitFieldOffset',
'Map, bit_field2, char, kBitField2Offset',
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index 2171ee8a0d..250b741068 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -33,6 +33,8 @@ AUTO_EXCLUDE = [
'src/flags/flag-definitions.h',
# recorder.h should only be included conditionally.
'src/libplatform/tracing/recorder.h',
+ # trap-handler-simulator.h can only be included in simulator builds.
+ 'src/trap-handler/trap-handler-simulator.h',
]
AUTO_EXCLUDE_PATTERNS = [
'src/base/atomicops_internals_.*',
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index fd69075872..5049cc4534 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -689,7 +689,7 @@ class UploadStep(Step):
self.GitUpload(reviewer, self._options.force_upload,
bypass_hooks=self._options.bypass_upload_hooks,
- cc=self._options.cc, tbr_reviewer=tbr_reviewer)
+ tbr_reviewer=tbr_reviewer)
def MakeStep(step_class=Step, number=0, state=None, config=None,
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index c323542525..20a666fb83 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -137,7 +137,7 @@ class CommitBranch(Step):
def RunStep(self):
self["commit_title"] = "Version %s" % self["version"]
- text = "%s\n\nTBR=%s" % (self["commit_title"], self._options.reviewer)
+ text = "%s" % (self["commit_title"])
TextToFile(text, self.Config("COMMITMSG_FILE"))
self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
@@ -153,7 +153,10 @@ class LandBranch(Step):
self.GitUpload(force=True,
bypass_hooks=True,
no_autocc=True,
+ set_bot_commit=True,
message_file=self.Config("COMMITMSG_FILE"))
+ # TODO(crbug.com/1176141): This might need to go through CQ.
+ # We'd need to wait for it to land and then tag it.
cmd = "cl land --bypass-hooks -f"
if self._options.dry_run:
print("Dry run. Command:\ngit %s" % cmd)
diff --git a/deps/v8/tools/deprecation_stats.py b/deps/v8/tools/release/list_deprecated.py
index 56b26c39ad..bc479e1653 100755
--- a/deps/v8/tools/deprecation_stats.py
+++ b/deps/v8/tools/release/list_deprecated.py
@@ -1,26 +1,41 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# for py2/py3 compatibility
-from __future__ import print_function
-
import argparse
from datetime import datetime
import re
import subprocess
import sys
-from os import path
+from pathlib import Path
RE_GITHASH = re.compile(r"^[0-9a-f]{40}")
RE_AUTHOR_TIME = re.compile(r"^author-time (\d+)$")
RE_FILENAME = re.compile(r"^filename (.+)$")
+VERSION_CACHE = dict()
+RE_VERSION_MAJOR = re.compile(r".*V8_MAJOR_VERSION ([0-9]+)")
+RE_VERSION_MINOR = re.compile(r".*V8_MINOR_VERSION ([0-9]+)")
+
+
+def extract_version(hash):
+ if hash in VERSION_CACHE:
+ return VERSION_CACHE[hash]
+ if hash == '0000000000000000000000000000000000000000':
+ return 'HEAD'
+ result = subprocess.check_output(
+ ['git', 'show', f"{hash}:include/v8-version.h"], encoding='UTF-8')
+ major = RE_VERSION_MAJOR.search(result).group(1)
+ minor = RE_VERSION_MINOR.search(result).group(1)
+ version = f"{major}.{minor}"
+ VERSION_CACHE[hash] = version
+ return version
+
-def GetBlame(file_path):
+def get_blame(file_path):
result = subprocess.check_output(
- ['git', 'blame', '-t', '--line-porcelain', file_path])
+ ['git', 'blame', '-t', '--line-porcelain', file_path], encoding='UTF-8')
line_iter = iter(result.splitlines())
blame_list = list()
current_blame = None
@@ -31,11 +46,18 @@ def GetBlame(file_path):
if RE_GITHASH.match(line):
if current_blame is not None:
blame_list.append(current_blame)
- current_blame = {'time': 0, 'filename': None, 'content': None}
+ hash = line.split(" ")[0]
+ current_blame = {
+ 'datetime': 0,
+ 'filename': None,
+ 'content': None,
+ 'hash': hash
+ }
continue
match = RE_AUTHOR_TIME.match(line)
if match:
- current_blame['time'] = datetime.fromtimestamp(int(match.groups()[0]))
+ current_blame['datetime'] = datetime.fromtimestamp(int(
+ match.groups()[0]))
continue
match = RE_FILENAME.match(line)
if match:
@@ -50,18 +72,19 @@ RE_MACRO_END = re.compile(r"\);")
RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
-def FilterAndPrint(blame_list, macro, options):
+def filter_and_print(blame_list, macro, options):
before = options.before
index = 0
re_macro = re.compile(macro)
deprecated = list()
while index < len(blame_list):
blame = blame_list[index]
- time = blame['time']
- if time >= before:
+ commit_datetime = blame['datetime']
+ if commit_datetime >= before:
index += 1
continue
line = blame['content']
+ commit_hash = blame['hash']
match = re_macro.search(line)
if match:
pos = match.end()
@@ -69,7 +92,7 @@ def FilterAndPrint(blame_list, macro, options):
parens = 0
while True:
if pos >= len(line):
- # extend to next line
+ # Extend to next line
index = index + 1
blame = blame_list[index]
line = line + blame['content']
@@ -78,7 +101,7 @@ def FilterAndPrint(blame_list, macro, options):
elif line[pos] == ')':
parens = parens - 1
if parens == 0:
- # Exclud closing ")
+ # Exclude closing ")
pos = pos - 2
break
elif line[pos] == '"' and start == -1:
@@ -86,17 +109,19 @@ def FilterAndPrint(blame_list, macro, options):
pos = pos + 1
# Extract content and replace double quotes from merged lines
content = line[start:pos].strip().replace('""', '')
- deprecated.append([index + 1, time, content])
+ deprecated.append((index + 1, commit_datetime, commit_hash, content))
index = index + 1
- print("Marked as " + macro + ": " + str(len(deprecated)))
- for linenumber, time, content in deprecated:
- print(" " + (options.v8_header + ":" +
- str(linenumber)).rjust(len(options.v8_header) + 5) + "\t" +
- str(time) + "\t" + content)
+ print(f"# Marked as {macro}: {len(deprecated)}")
+ for linenumber, commit_datetime, commit_hash, content in deprecated:
+ commit_date = commit_datetime.date()
+ file_position = (
+ f"{options.v8_header}:{linenumber}").rjust(len(options.v8_header) + 5)
+ print(f" {file_position}\t{commit_date}\t{commit_hash[:8]}"
+ f"\t{extract_version(commit_hash)}\t{content}")
return len(deprecated)
-def ParseOptions(args):
+def parse_options(args):
parser = argparse.ArgumentParser(
description="Collect deprecation statistics")
parser.add_argument("v8_header", nargs='?', help="Path to v8.h")
@@ -107,16 +132,19 @@ def ParseOptions(args):
else:
options.before = datetime.now()
if options.v8_header is None:
- options.v8_header = path.join(path.dirname(__file__), '..', 'include', 'v8.h')
+ base_path = Path(__file__).parent.parent
+ options.v8_header = str(
+ (base_path / 'include' / 'v8.h').relative_to(base_path))
return options
-def Main(args):
- options = ParseOptions(args)
- blame_list = GetBlame(options.v8_header)
- FilterAndPrint(blame_list, "V8_DEPRECATE_SOON", options)
- FilterAndPrint(blame_list, "V8_DEPRECATED", options)
+def main(args):
+ options = parse_options(args)
+ blame_list = get_blame(options.v8_header)
+ filter_and_print(blame_list, "V8_DEPRECATE_SOON", options)
+ print("\n")
+ filter_and_print(blame_list, "V8_DEPRECATED", options)
if __name__ == "__main__":
- Main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index ab6967208f..44f933e541 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -224,8 +224,6 @@ class MergeToBranch(ScriptsBase):
print("You must specify a merge comment if no patches are specified")
return False
options.bypass_upload_hooks = True
- # CC ulan to make sure that fixes are merged to Google3.
- options.cc = "ulan@chromium.org"
if len(options.branch.split('.')) > 2:
print ("This script does not support merging to roll branches. "
diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py
index 064ba73df2..636c882980 100755
--- a/deps/v8/tools/release/roll_merge.py
+++ b/deps/v8/tools/release/roll_merge.py
@@ -250,8 +250,6 @@ class RollMerge(ScriptsBase):
print("You must specify a merge comment if no patches are specified")
return False
options.bypass_upload_hooks = True
- # CC ulan to make sure that fixes are merged to Google3.
- options.cc = "ulan@chromium.org"
# Make sure to use git hashes in the new workflows.
for revision in options.revisions:
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 7cf5d14130..e8664cb2f1 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -433,9 +433,7 @@ test_tag
# The version file on master has build level 5.
self.WriteFakeVersionFile(build=5)
- commit_msg = """Version 3.22.5
-
-TBR=reviewer@chromium.org"""
+ commit_msg = """Version 3.22.5"""
def CheckVersionCommit():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
@@ -469,7 +467,7 @@ TBR=reviewer@chromium.org"""
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git cl upload --send-mail "
- "-f --bypass-hooks --no-autocc --message-file "
+ "-f --set-bot-commit --bypass-hooks --no-autocc --message-file "
"\"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
Cmd("git cl land --bypass-hooks -f", ""),
Cmd("git fetch", ""),
@@ -776,7 +774,7 @@ BUG=123,234,345,456,567,v8:123
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
- "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
+ "--bypass-hooks", ""),
Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
@@ -912,7 +910,7 @@ NOTREECHECKS=true
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
- "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
+ "--bypass-hooks", ""),
Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index cf5854c32c..009893a23a 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -370,9 +370,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -812,9 +809,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 1d39f6951d..ba4eff451a 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -53,16 +53,18 @@ ALL_VARIANT_FLAGS = {
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_VARIANT = {
"jitless": ["--opt", "--always-opt", "--liftoff", "--track-field-types",
- "--validate-asm", "--sparkplug", "--always-sparkplug"],
+ "--validate-asm", "--sparkplug", "--always-sparkplug",
+ "--regexp-tier-up"],
"nooptimization": ["--always-opt"],
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
- "stress_concurrent_inlining": ["--single-threaded", "--predictable"],
+ "stress_concurrent_inlining": ["--single-threaded", "--predictable", "--turboprop"],
+ "turboprop": ["--stress_concurrent_inlining"],
# The fast API tests initialize an embedder object that never needs to be
# serialized to the snapshot, so we don't have a
# SerializeInternalFieldsCallback for it, so they are incompatible with
# stress_snapshot.
- "stress_snapshot": [["--turbo-fast-api-calls"]],
+ "stress_snapshot": ["--turbo-fast-api-calls"],
"stress": ["--always-opt", "--no-always-opt",
"--max-inlined-bytecode-size=*",
"--max-inlined-bytecode-size-cumulative=*", "--stress-inline",
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 5b901b8a53..41352b34e8 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -57,7 +57,7 @@ GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
'--concurrent-recompilation-queue-length=64',
'--concurrent-recompilation-delay=500',
'--concurrent-recompilation',
- '--stress-flush-bytecode',
+ '--stress-flush-code', '--flush-bytecode',
'--wasm-code-gc', '--stress-wasm-code-gc']
RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',
diff --git a/deps/v8/tools/testrunner/testproc/filter.py b/deps/v8/tools/testrunner/testproc/filter.py
index e2a5e972a9..20af0f8407 100644
--- a/deps/v8/tools/testrunner/testproc/filter.py
+++ b/deps/v8/tools/testrunner/testproc/filter.py
@@ -4,6 +4,7 @@
from collections import defaultdict
import fnmatch
+import os
from . import base
@@ -80,4 +81,9 @@ class NameFilterProc(base.TestProcFilter):
if fnmatch.fnmatch(test.path, g):
return False
exact_matches = self._exact_matches.get(test.suite.name, {})
- return test.path not in exact_matches
+ if test.path in exact_matches: return False
+ if os.sep != '/':
+ unix_path = test.path.replace(os.sep, '/')
+ if unix_path in exact_matches: return False
+ # Filter out everything else.
+ return True
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index ec97ab226f..c102cddec1 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -15,7 +15,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test, is_flaky=False):
@@ -362,45 +361,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 16fc798c43..638ca100fb 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -115,7 +115,7 @@ def process(filename, lint, should_format):
print(filename + ' requires formatting', file=sys.stderr)
if should_format:
- output_file = open(filename, 'w')
+ output_file = open(filename, 'wb')
output_file.write(output);
output_file.close()
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 373c2ab51a..097b6a7267 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -156,81 +156,82 @@ INSTANCE_TYPES = {
191: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
- 1042: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
- 1043: "JS_ITERATOR_PROTOTYPE_TYPE",
- 1044: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
- 1045: "JS_OBJECT_PROTOTYPE_TYPE",
- 1046: "JS_PROMISE_PROTOTYPE_TYPE",
- 1047: "JS_REG_EXP_PROTOTYPE_TYPE",
- 1048: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
- 1049: "JS_SET_PROTOTYPE_TYPE",
- 1050: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
- 1051: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
- 1052: "JS_GENERATOR_OBJECT_TYPE",
- 1053: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
- 1054: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 1055: "JS_ARGUMENTS_OBJECT_TYPE",
- 1056: "JS_API_OBJECT_TYPE",
- 1058: "JS_BOUND_FUNCTION_TYPE",
- 1059: "JS_FUNCTION_TYPE",
- 1060: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1061: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1062: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1063: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1064: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1065: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1066: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1067: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1068: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1069: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1070: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 1071: "JS_ARRAY_CONSTRUCTOR_TYPE",
- 1072: "JS_PROMISE_CONSTRUCTOR_TYPE",
- 1073: "JS_REG_EXP_CONSTRUCTOR_TYPE",
- 1074: "JS_MAP_KEY_ITERATOR_TYPE",
- 1075: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 1076: "JS_MAP_VALUE_ITERATOR_TYPE",
- 1077: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 1078: "JS_SET_VALUE_ITERATOR_TYPE",
- 1079: "JS_DATA_VIEW_TYPE",
- 1080: "JS_TYPED_ARRAY_TYPE",
- 1081: "JS_MAP_TYPE",
- 1082: "JS_SET_TYPE",
- 1083: "JS_WEAK_MAP_TYPE",
- 1084: "JS_WEAK_SET_TYPE",
- 1085: "JS_ARRAY_TYPE",
- 1086: "JS_ARRAY_BUFFER_TYPE",
- 1087: "JS_ARRAY_ITERATOR_TYPE",
- 1088: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 1089: "JS_COLLATOR_TYPE",
- 1090: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 1091: "JS_DATE_TYPE",
- 1092: "JS_DATE_TIME_FORMAT_TYPE",
- 1093: "JS_DISPLAY_NAMES_TYPE",
- 1094: "JS_ERROR_TYPE",
- 1095: "JS_FINALIZATION_REGISTRY_TYPE",
- 1096: "JS_LIST_FORMAT_TYPE",
- 1097: "JS_LOCALE_TYPE",
- 1098: "JS_MESSAGE_OBJECT_TYPE",
- 1099: "JS_NUMBER_FORMAT_TYPE",
- 1100: "JS_PLURAL_RULES_TYPE",
- 1101: "JS_PROMISE_TYPE",
- 1102: "JS_REG_EXP_TYPE",
- 1103: "JS_REG_EXP_STRING_ITERATOR_TYPE",
- 1104: "JS_RELATIVE_TIME_FORMAT_TYPE",
- 1105: "JS_SEGMENT_ITERATOR_TYPE",
- 1106: "JS_SEGMENTER_TYPE",
- 1107: "JS_SEGMENTS_TYPE",
- 1108: "JS_STRING_ITERATOR_TYPE",
- 1109: "JS_V8_BREAK_ITERATOR_TYPE",
- 1110: "JS_WEAK_REF_TYPE",
- 1111: "WASM_EXCEPTION_OBJECT_TYPE",
- 1112: "WASM_GLOBAL_OBJECT_TYPE",
- 1113: "WASM_INSTANCE_OBJECT_TYPE",
- 1114: "WASM_MEMORY_OBJECT_TYPE",
- 1115: "WASM_MODULE_OBJECT_TYPE",
- 1116: "WASM_TABLE_OBJECT_TYPE",
- 1117: "WASM_VALUE_OBJECT_TYPE",
+ 1058: "JS_API_OBJECT_TYPE",
+ 2058: "JS_LAST_DUMMY_API_OBJECT_TYPE",
+ 2059: "JS_BOUND_FUNCTION_TYPE",
+ 2060: "JS_FUNCTION_TYPE",
+ 2061: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2062: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2063: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2064: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2065: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2066: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2067: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2068: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2069: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2070: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2071: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2072: "JS_ARRAY_CONSTRUCTOR_TYPE",
+ 2073: "JS_PROMISE_CONSTRUCTOR_TYPE",
+ 2074: "JS_REG_EXP_CONSTRUCTOR_TYPE",
+ 2075: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
+ 2076: "JS_ITERATOR_PROTOTYPE_TYPE",
+ 2077: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
+ 2078: "JS_OBJECT_PROTOTYPE_TYPE",
+ 2079: "JS_PROMISE_PROTOTYPE_TYPE",
+ 2080: "JS_REG_EXP_PROTOTYPE_TYPE",
+ 2081: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
+ 2082: "JS_SET_PROTOTYPE_TYPE",
+ 2083: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
+ 2084: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
+ 2085: "JS_MAP_KEY_ITERATOR_TYPE",
+ 2086: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 2087: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 2088: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 2089: "JS_SET_VALUE_ITERATOR_TYPE",
+ 2090: "JS_GENERATOR_OBJECT_TYPE",
+ 2091: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+ 2092: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 2093: "JS_DATA_VIEW_TYPE",
+ 2094: "JS_TYPED_ARRAY_TYPE",
+ 2095: "JS_MAP_TYPE",
+ 2096: "JS_SET_TYPE",
+ 2097: "JS_WEAK_MAP_TYPE",
+ 2098: "JS_WEAK_SET_TYPE",
+ 2099: "JS_ARGUMENTS_OBJECT_TYPE",
+ 2100: "JS_ARRAY_TYPE",
+ 2101: "JS_ARRAY_BUFFER_TYPE",
+ 2102: "JS_ARRAY_ITERATOR_TYPE",
+ 2103: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 2104: "JS_COLLATOR_TYPE",
+ 2105: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 2106: "JS_DATE_TYPE",
+ 2107: "JS_DATE_TIME_FORMAT_TYPE",
+ 2108: "JS_DISPLAY_NAMES_TYPE",
+ 2109: "JS_ERROR_TYPE",
+ 2110: "JS_FINALIZATION_REGISTRY_TYPE",
+ 2111: "JS_LIST_FORMAT_TYPE",
+ 2112: "JS_LOCALE_TYPE",
+ 2113: "JS_MESSAGE_OBJECT_TYPE",
+ 2114: "JS_NUMBER_FORMAT_TYPE",
+ 2115: "JS_PLURAL_RULES_TYPE",
+ 2116: "JS_PROMISE_TYPE",
+ 2117: "JS_REG_EXP_TYPE",
+ 2118: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+ 2119: "JS_RELATIVE_TIME_FORMAT_TYPE",
+ 2120: "JS_SEGMENT_ITERATOR_TYPE",
+ 2121: "JS_SEGMENTER_TYPE",
+ 2122: "JS_SEGMENTS_TYPE",
+ 2123: "JS_STRING_ITERATOR_TYPE",
+ 2124: "JS_V8_BREAK_ITERATOR_TYPE",
+ 2125: "JS_WEAK_REF_TYPE",
+ 2126: "WASM_GLOBAL_OBJECT_TYPE",
+ 2127: "WASM_INSTANCE_OBJECT_TYPE",
+ 2128: "WASM_MEMORY_OBJECT_TYPE",
+ 2129: "WASM_MODULE_OBJECT_TYPE",
+ 2130: "WASM_TABLE_OBJECT_TYPE",
+ 2131: "WASM_TAG_OBJECT_TYPE",
+ 2132: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
@@ -329,68 +330,68 @@ KNOWN_MAPS = {
("read_only_space", 0x03215): (67, "BasicBlockCountersMarkerMap"),
("read_only_space", 0x03259): (91, "ArrayBoilerplateDescriptionMap"),
("read_only_space", 0x03359): (104, "InterceptorInfoMap"),
- ("read_only_space", 0x05691): (76, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x056b9): (77, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x056e1): (78, "CallableTaskMap"),
- ("read_only_space", 0x05709): (79, "CallbackTaskMap"),
- ("read_only_space", 0x05731): (80, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x05759): (83, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x05781): (84, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x057a9): (85, "AccessCheckInfoMap"),
- ("read_only_space", 0x057d1): (86, "AccessorInfoMap"),
- ("read_only_space", 0x057f9): (87, "AccessorPairMap"),
- ("read_only_space", 0x05821): (88, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x05849): (89, "AllocationMementoMap"),
- ("read_only_space", 0x05871): (92, "AsmWasmDataMap"),
- ("read_only_space", 0x05899): (93, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x058c1): (94, "BaselineDataMap"),
- ("read_only_space", 0x058e9): (95, "BreakPointMap"),
- ("read_only_space", 0x05911): (96, "BreakPointInfoMap"),
- ("read_only_space", 0x05939): (97, "CachedTemplateObjectMap"),
- ("read_only_space", 0x05961): (99, "ClassPositionsMap"),
- ("read_only_space", 0x05989): (100, "DebugInfoMap"),
- ("read_only_space", 0x059b1): (103, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x059d9): (105, "InterpreterDataMap"),
- ("read_only_space", 0x05a01): (106, "ModuleRequestMap"),
- ("read_only_space", 0x05a29): (107, "PromiseCapabilityMap"),
- ("read_only_space", 0x05a51): (108, "PromiseReactionMap"),
- ("read_only_space", 0x05a79): (109, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x05aa1): (110, "PrototypeInfoMap"),
- ("read_only_space", 0x05ac9): (111, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x05af1): (112, "ScriptMap"),
- ("read_only_space", 0x05b19): (113, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x05b41): (114, "StackFrameInfoMap"),
- ("read_only_space", 0x05b69): (115, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x05b91): (116, "Tuple2Map"),
- ("read_only_space", 0x05bb9): (117, "WasmExceptionTagMap"),
- ("read_only_space", 0x05be1): (118, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x05c09): (136, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x05c31): (153, "DescriptorArrayMap"),
- ("read_only_space", 0x05c59): (158, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x05c81): (157, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x05ca9): (174, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x05cd1): (170, "InternalClassMap"),
- ("read_only_space", 0x05cf9): (181, "SmiPairMap"),
- ("read_only_space", 0x05d21): (180, "SmiBoxMap"),
- ("read_only_space", 0x05d49): (147, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x05d71): (148, "ExportedSubClassMap"),
- ("read_only_space", 0x05d99): (68, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x05dc1): (69, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x05de9): (135, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x05e11): (171, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x05e39): (149, "ExportedSubClass2Map"),
- ("read_only_space", 0x05e61): (182, "SortStateMap"),
- ("read_only_space", 0x05e89): (90, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05eb1): (90, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05ed9): (81, "LoadHandler1Map"),
- ("read_only_space", 0x05f01): (81, "LoadHandler2Map"),
- ("read_only_space", 0x05f29): (81, "LoadHandler3Map"),
- ("read_only_space", 0x05f51): (82, "StoreHandler0Map"),
- ("read_only_space", 0x05f79): (82, "StoreHandler1Map"),
- ("read_only_space", 0x05fa1): (82, "StoreHandler2Map"),
- ("read_only_space", 0x05fc9): (82, "StoreHandler3Map"),
+ ("read_only_space", 0x05699): (76, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x056c1): (77, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x056e9): (78, "CallableTaskMap"),
+ ("read_only_space", 0x05711): (79, "CallbackTaskMap"),
+ ("read_only_space", 0x05739): (80, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x05761): (83, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x05789): (84, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x057b1): (85, "AccessCheckInfoMap"),
+ ("read_only_space", 0x057d9): (86, "AccessorInfoMap"),
+ ("read_only_space", 0x05801): (87, "AccessorPairMap"),
+ ("read_only_space", 0x05829): (88, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x05851): (89, "AllocationMementoMap"),
+ ("read_only_space", 0x05879): (92, "AsmWasmDataMap"),
+ ("read_only_space", 0x058a1): (93, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x058c9): (94, "BaselineDataMap"),
+ ("read_only_space", 0x058f1): (95, "BreakPointMap"),
+ ("read_only_space", 0x05919): (96, "BreakPointInfoMap"),
+ ("read_only_space", 0x05941): (97, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x05969): (99, "ClassPositionsMap"),
+ ("read_only_space", 0x05991): (100, "DebugInfoMap"),
+ ("read_only_space", 0x059b9): (103, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x059e1): (105, "InterpreterDataMap"),
+ ("read_only_space", 0x05a09): (106, "ModuleRequestMap"),
+ ("read_only_space", 0x05a31): (107, "PromiseCapabilityMap"),
+ ("read_only_space", 0x05a59): (108, "PromiseReactionMap"),
+ ("read_only_space", 0x05a81): (109, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x05aa9): (110, "PrototypeInfoMap"),
+ ("read_only_space", 0x05ad1): (111, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x05af9): (112, "ScriptMap"),
+ ("read_only_space", 0x05b21): (113, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x05b49): (114, "StackFrameInfoMap"),
+ ("read_only_space", 0x05b71): (115, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05b99): (116, "Tuple2Map"),
+ ("read_only_space", 0x05bc1): (117, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05be9): (118, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x05c11): (136, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x05c39): (153, "DescriptorArrayMap"),
+ ("read_only_space", 0x05c61): (158, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x05c89): (157, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x05cb1): (174, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x05cd9): (170, "InternalClassMap"),
+ ("read_only_space", 0x05d01): (181, "SmiPairMap"),
+ ("read_only_space", 0x05d29): (180, "SmiBoxMap"),
+ ("read_only_space", 0x05d51): (147, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x05d79): (148, "ExportedSubClassMap"),
+ ("read_only_space", 0x05da1): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x05dc9): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x05df1): (135, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x05e19): (171, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x05e41): (149, "ExportedSubClass2Map"),
+ ("read_only_space", 0x05e69): (182, "SortStateMap"),
+ ("read_only_space", 0x05e91): (90, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05eb9): (90, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05ee1): (81, "LoadHandler1Map"),
+ ("read_only_space", 0x05f09): (81, "LoadHandler2Map"),
+ ("read_only_space", 0x05f31): (81, "LoadHandler3Map"),
+ ("read_only_space", 0x05f59): (82, "StoreHandler0Map"),
+ ("read_only_space", 0x05f81): (82, "StoreHandler1Map"),
+ ("read_only_space", 0x05fa9): (82, "StoreHandler2Map"),
+ ("read_only_space", 0x05fd1): (82, "StoreHandler3Map"),
("map_space", 0x02119): (1057, "ExternalMap"),
- ("map_space", 0x02141): (1098, "JSMessageObjectMap"),
+ ("map_space", 0x02141): (2113, "JSMessageObjectMap"),
}
# List of known V8 objects.
@@ -475,27 +476,27 @@ KNOWN_OBJECTS = {
("old_space", 0x029b5): "StringSplitCache",
("old_space", 0x02dbd): "RegExpMultipleCache",
("old_space", 0x031c5): "BuiltinsConstantsTable",
- ("old_space", 0x035d5): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x035f9): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x0361d): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x03641): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x03665): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x03689): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x036ad): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x036d1): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x036f5): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x03719): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x0373d): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x03761): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x03785): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x037a9): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x037cd): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x037f1): "PromiseCatchFinallySharedFun",
- ("old_space", 0x03815): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x03839): "PromiseThenFinallySharedFun",
- ("old_space", 0x0385d): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x03881): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x038a5): "ProxyRevokeSharedFun",
+ ("old_space", 0x035e5): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x03609): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x0362d): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x03651): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x03675): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x03699): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x036bd): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x036e1): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x03705): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x03729): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x0374d): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x03771): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x03795): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x037b9): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x037dd): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x03801): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x03825): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x03849): "PromiseThenFinallySharedFun",
+ ("old_space", 0x0386d): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x03891): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x038b5): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index f890e67970..60b58be703 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,7 +7,7 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.......
-The bartender starts to shake the bottles...........................
+The bartender starts to shake the bottles............................
I can't add trailing whitespaces, so I'm adding this line............
I'm starting to think that just adding trailing whitespaces might not be bad.